Compare commits
1 Commits
colab-misc
...
lora-quant
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a22d16842 |
6
.github/workflows/preview-docs.yml
vendored
6
.github/workflows/preview-docs.yml
vendored
@@ -4,12 +4,6 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
# Run the workflow only when one of these files changes
|
|
||||||
paths:
|
|
||||||
- '**/*.md' # any Markdown file
|
|
||||||
- '**/*.qmd' # any Quarto file
|
|
||||||
- '_quarto.yaml'
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
checks: write
|
checks: write
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
@@ -16,15 +16,8 @@ AXOLOTL_LOGO = """
|
|||||||
@@@@ @@@@@@@@@@@@@@@@
|
@@@@ @@@@@@@@@@@@@@@@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
HAS_PRINTED_LOGO = False
|
|
||||||
|
|
||||||
|
|
||||||
def print_axolotl_text_art():
|
def print_axolotl_text_art():
|
||||||
"""Prints axolotl ASCII art."""
|
"""Prints axolotl ASCII art."""
|
||||||
|
|
||||||
global HAS_PRINTED_LOGO # pylint: disable=global-statement
|
|
||||||
if HAS_PRINTED_LOGO:
|
|
||||||
return
|
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
HAS_PRINTED_LOGO = True
|
|
||||||
print(AXOLOTL_LOGO)
|
print(AXOLOTL_LOGO)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
|||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.evaluate import evaluate
|
from axolotl.evaluate import evaluate
|
||||||
from axolotl.utils import patch_optimized_env
|
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -32,7 +32,7 @@ def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
|||||||
cli_args: CLI arguments.
|
cli_args: CLI arguments.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
patch_optimized_env()
|
set_pytorch_cuda_alloc_conf()
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from axolotl.cli.utils import (
|
|||||||
filter_none_kwargs,
|
filter_none_kwargs,
|
||||||
)
|
)
|
||||||
from axolotl.integrations.lm_eval.cli import lm_eval
|
from axolotl.integrations.lm_eval.cli import lm_eval
|
||||||
from axolotl.utils import patch_optimized_env
|
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
|
||||||
@@ -55,8 +55,6 @@ def preprocess(config: str, cloud: Optional[str] = None, **kwargs) -> None:
|
|||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
patch_optimized_env()
|
|
||||||
|
|
||||||
if cloud:
|
if cloud:
|
||||||
from axolotl.cli.cloud import do_cli_preprocess
|
from axolotl.cli.cloud import do_cli_preprocess
|
||||||
|
|
||||||
@@ -102,7 +100,7 @@ def train(
|
|||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
patch_optimized_env()
|
set_pytorch_cuda_alloc_conf()
|
||||||
|
|
||||||
if "use_ray" in kwargs and kwargs["use_ray"]:
|
if "use_ray" in kwargs and kwargs["use_ray"]:
|
||||||
accelerate = False
|
accelerate = False
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from axolotl.cli.config import load_cfg
|
|||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils import patch_optimized_env
|
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
||||||
from axolotl.utils.config import normalize_config, resolve_dtype
|
from axolotl.utils.config import normalize_config, resolve_dtype
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
|||||||
cli_args: Training-specific CLI arguments.
|
cli_args: Training-specific CLI arguments.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
patch_optimized_env()
|
set_pytorch_cuda_alloc_conf()
|
||||||
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
|
|||||||
@@ -48,7 +48,6 @@ def load_datasets(
|
|||||||
*,
|
*,
|
||||||
cfg: DictDefault,
|
cfg: DictDefault,
|
||||||
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||||
debug: bool = False,
|
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""
|
||||||
Loads one or more training or evaluation datasets, calling
|
Loads one or more training or evaluation datasets, calling
|
||||||
@@ -57,7 +56,6 @@ def load_datasets(
|
|||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: Command-specific CLI arguments.
|
cli_args: Command-specific CLI arguments.
|
||||||
debug: Whether to print out tokenization of sample
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
Dataclass with fields for training and evaluation datasets and the computed
|
||||||
@@ -79,25 +77,20 @@ def load_datasets(
|
|||||||
preprocess_iterable=preprocess_iterable,
|
preprocess_iterable=preprocess_iterable,
|
||||||
)
|
)
|
||||||
|
|
||||||
if ( # pylint: disable=too-many-boolean-expressions
|
if cli_args and (
|
||||||
cli_args
|
cli_args.debug
|
||||||
and (
|
or cfg.debug
|
||||||
cli_args.debug
|
or cli_args.debug_text_only
|
||||||
or cfg.debug
|
or int(cli_args.debug_num_examples) > 0
|
||||||
or cli_args.debug_text_only
|
):
|
||||||
or int(cli_args.debug_num_examples) > 0
|
|
||||||
)
|
|
||||||
) or debug:
|
|
||||||
LOG.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
num_examples = cli_args.debug_num_examples if cli_args else 1
|
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
||||||
text_only = cli_args.debug_text_only if cli_args else False
|
|
||||||
train_samples = sample_dataset(train_dataset, num_examples)
|
|
||||||
check_dataset_labels(
|
check_dataset_labels(
|
||||||
train_samples,
|
train_samples,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
num_examples=num_examples,
|
num_examples=cli_args.debug_num_examples,
|
||||||
text_only=text_only,
|
text_only=cli_args.debug_text_only,
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.info("printing prompters...")
|
LOG.info("printing prompters...")
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ import importlib.util
|
|||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -73,7 +72,6 @@ from axolotl.utils.callbacks import (
|
|||||||
SaveBetterTransformerModelCallback,
|
SaveBetterTransformerModelCallback,
|
||||||
bench_eval_callback_factory,
|
bench_eval_callback_factory,
|
||||||
causal_lm_bench_eval_callback_factory,
|
causal_lm_bench_eval_callback_factory,
|
||||||
colab_inference_post_train_callback,
|
|
||||||
log_prediction_callback_factory,
|
log_prediction_callback_factory,
|
||||||
)
|
)
|
||||||
from axolotl.utils.callbacks.lisa import lisa_callback_factory
|
from axolotl.utils.callbacks.lisa import lisa_callback_factory
|
||||||
@@ -295,10 +293,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
||||||
callbacks.append(lisa_callback_factory(trainer))
|
callbacks.append(lisa_callback_factory(trainer))
|
||||||
|
|
||||||
if any("COLAB_" in key for key in os.environ):
|
|
||||||
ColabCallback = colab_inference_post_train_callback(trainer)
|
|
||||||
callbacks.append(ColabCallback(self.cfg))
|
|
||||||
|
|
||||||
callbacks.extend(super().get_post_trainer_create_callbacks(trainer=trainer))
|
callbacks.extend(super().get_post_trainer_create_callbacks(trainer=trainer))
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
|
|||||||
@@ -114,8 +114,6 @@ class AxolotlTrainer(
|
|||||||
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
||||||
batch_max_len=batch_max_len,
|
batch_max_len=batch_max_len,
|
||||||
batch_size=batch_size,
|
batch_size=batch_size,
|
||||||
group_size=self.args.sample_packing_group_size,
|
|
||||||
bin_size=self.args.sample_packing_bin_size,
|
|
||||||
sequential=self.args.sample_packing_sequentially,
|
sequential=self.args.sample_packing_sequentially,
|
||||||
drop_last=True,
|
drop_last=True,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -177,8 +177,12 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
# dpo trainer may incorrectly prepend the bos_token_id to the dpo outputs
|
# dpo trainer may incorrectly prepend the bos_token_id to the dpo outputs
|
||||||
if res["chosen_input_ids"][0] == processing_class.bos_token_id:
|
if res["chosen_input_ids"][0] == processing_class.bos_token_id:
|
||||||
res["chosen_input_ids"] = res["chosen_input_ids"][1:]
|
res["chosen_input_ids"] = res["chosen_input_ids"][1:]
|
||||||
|
res["chosen_labels"] = res["chosen_labels"][1:]
|
||||||
|
res["chosen_attention_mask"] = res["chosen_attention_mask"][1:]
|
||||||
if res["rejected_input_ids"][0] == processing_class.bos_token_id:
|
if res["rejected_input_ids"][0] == processing_class.bos_token_id:
|
||||||
res["rejected_input_ids"] = res["rejected_input_ids"][1:]
|
res["rejected_input_ids"] = res["rejected_input_ids"][1:]
|
||||||
|
res["rejected_labels"] = res["rejected_labels"][1:]
|
||||||
|
res["rejected_attention_mask"] = res["rejected_attention_mask"][1:]
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
if cfg.cut_cross_entropy:
|
if cfg.cut_cross_entropy:
|
||||||
self._check_requirements()
|
self._check_requirements()
|
||||||
|
|
||||||
from .monkeypatch.patch import (
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.patch import (
|
||||||
cce_patch,
|
cce_patch,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -151,30 +151,6 @@ class LigerPlugin(BasePlugin):
|
|||||||
rms_norm=cfg.liger_rms_norm,
|
rms_norm=cfg.liger_rms_norm,
|
||||||
layer_norm=cfg.liger_layer_norm,
|
layer_norm=cfg.liger_layer_norm,
|
||||||
)
|
)
|
||||||
elif cfg.model_config_type == "qwen3":
|
|
||||||
from axolotl.integrations.liger.models.qwen3 import (
|
|
||||||
apply_liger_kernel_to_qwen3,
|
|
||||||
)
|
|
||||||
|
|
||||||
apply_liger_kernel_to_qwen3(
|
|
||||||
cross_entropy=cfg.liger_cross_entropy,
|
|
||||||
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
|
|
||||||
glu_activation=cfg.liger_glu_activation,
|
|
||||||
rms_norm=cfg.liger_rms_norm,
|
|
||||||
layer_norm=cfg.liger_layer_norm,
|
|
||||||
)
|
|
||||||
elif cfg.model_config_type == "qwen3_moe":
|
|
||||||
from axolotl.integrations.liger.models.qwen3_moe import (
|
|
||||||
apply_liger_kernel_to_qwen3_moe,
|
|
||||||
)
|
|
||||||
|
|
||||||
apply_liger_kernel_to_qwen3_moe(
|
|
||||||
cross_entropy=cfg.liger_cross_entropy,
|
|
||||||
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
|
|
||||||
glu_activation=cfg.liger_glu_activation,
|
|
||||||
rms_norm=cfg.liger_rms_norm,
|
|
||||||
layer_norm=cfg.liger_layer_norm,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
f"Unsupported model config type: {cfg.model_config_type}. Liger not applied."
|
f"Unsupported model config type: {cfg.model_config_type}. Liger not applied."
|
||||||
|
|||||||
@@ -1,160 +0,0 @@
|
|||||||
"""
|
|
||||||
Liger FLCE for Qwen3. Based on transformers v4.51.3.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from typing import Optional, Tuple, Union
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
||||||
from transformers.cache_utils import Cache
|
|
||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
||||||
|
|
||||||
|
|
||||||
def lce_forward(
|
|
||||||
self,
|
|
||||||
input_ids: Optional[torch.LongTensor] = None,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
position_ids: Optional[torch.LongTensor] = None,
|
|
||||||
past_key_values: Optional[Cache] = None,
|
|
||||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
||||||
labels: Optional[torch.LongTensor] = None,
|
|
||||||
use_cache: Optional[bool] = None,
|
|
||||||
output_attentions: Optional[bool] = None,
|
|
||||||
output_hidden_states: Optional[bool] = None,
|
|
||||||
cache_position: Optional[torch.LongTensor] = None,
|
|
||||||
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
||||||
**kwargs,
|
|
||||||
) -> Union[Tuple, CausalLMOutputWithPast]:
|
|
||||||
r"""
|
|
||||||
Args:
|
|
||||||
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
||||||
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
||||||
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
||||||
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
||||||
|
|
||||||
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
||||||
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
||||||
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
||||||
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
||||||
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
||||||
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
"""
|
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
output_attentions = (
|
|
||||||
output_attentions
|
|
||||||
if output_attentions is not None
|
|
||||||
else self.config.output_attentions
|
|
||||||
)
|
|
||||||
output_hidden_states = (
|
|
||||||
output_hidden_states
|
|
||||||
if output_hidden_states is not None
|
|
||||||
else self.config.output_hidden_states
|
|
||||||
)
|
|
||||||
|
|
||||||
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
||||||
outputs = self.model(
|
|
||||||
input_ids=input_ids,
|
|
||||||
attention_mask=attention_mask,
|
|
||||||
position_ids=position_ids,
|
|
||||||
past_key_values=past_key_values,
|
|
||||||
inputs_embeds=inputs_embeds,
|
|
||||||
use_cache=use_cache,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
output_hidden_states=output_hidden_states,
|
|
||||||
cache_position=cache_position,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
hidden_states = outputs[0]
|
|
||||||
|
|
||||||
logits = None
|
|
||||||
loss = None
|
|
||||||
# if in training mode, don't materialize logits
|
|
||||||
if self.training and (labels is not None):
|
|
||||||
loss = LigerForCausalLMLoss(
|
|
||||||
hidden_states=hidden_states,
|
|
||||||
lm_head_weight=self.lm_head.weight,
|
|
||||||
labels=labels,
|
|
||||||
hidden_size=self.config.hidden_size,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
else: # if in inference mode materialize logits
|
|
||||||
slice_indices = (
|
|
||||||
slice(-logits_to_keep, None)
|
|
||||||
if isinstance(logits_to_keep, int)
|
|
||||||
else logits_to_keep
|
|
||||||
)
|
|
||||||
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
|
||||||
if labels is not None:
|
|
||||||
loss = self.loss_function(
|
|
||||||
logits=logits,
|
|
||||||
labels=labels,
|
|
||||||
vocab_size=self.config.vocab_size,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
return CausalLMOutputWithPast(
|
|
||||||
loss=loss,
|
|
||||||
logits=logits,
|
|
||||||
past_key_values=outputs.past_key_values,
|
|
||||||
hidden_states=outputs.hidden_states,
|
|
||||||
attentions=outputs.attentions,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def apply_liger_kernel_to_qwen3(
|
|
||||||
cross_entropy: bool = False,
|
|
||||||
fused_linear_cross_entropy: bool = False,
|
|
||||||
rms_norm: bool = False,
|
|
||||||
glu_activation: bool = False,
|
|
||||||
layer_norm: bool = False,
|
|
||||||
**kwargs, # pylint: disable=unused-argument
|
|
||||||
) -> None:
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
"""
|
|
||||||
Apply Liger kernels to replace original implementation in HuggingFace Llama models (2 and 3)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
|
||||||
fused_linear_cross_entropy (bool):
|
|
||||||
Whether to apply Liger's fused linear cross entropy loss. Default is False.
|
|
||||||
`cross_entropy` and `fused_linear_cross_entropy` cannot both be False.
|
|
||||||
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
|
||||||
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is False.
|
|
||||||
glu_activation (bool): Whether to apply Liger's SwiGLU MLP. Default is False.
|
|
||||||
layer_norm (bool): Whether to apply Liger's LayerNorm. Default is False.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import transformers.models.qwen3.modeling_qwen3 # noqa: F401 # pylint: disable=unused-import
|
|
||||||
from liger_kernel.transformers.functional import liger_cross_entropy
|
|
||||||
from liger_kernel.transformers.layer_norm import LigerLayerNorm
|
|
||||||
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
|
||||||
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
|
|
||||||
|
|
||||||
assert not (
|
|
||||||
cross_entropy and fused_linear_cross_entropy
|
|
||||||
), "cross_entropy and fused_linear_cross_entropy cannot both be True."
|
|
||||||
|
|
||||||
modeling_qwen3 = sys.modules["transformers.models.qwen3.modeling_qwen3"]
|
|
||||||
|
|
||||||
if rms_norm:
|
|
||||||
modeling_qwen3.Qwen3RMSNorm = LigerRMSNorm
|
|
||||||
|
|
||||||
if glu_activation:
|
|
||||||
modeling_qwen3.Qwen3MLP = LigerSwiGLUMLP
|
|
||||||
|
|
||||||
if layer_norm:
|
|
||||||
modeling_qwen3.nn.LayerNorm = LigerLayerNorm
|
|
||||||
|
|
||||||
if cross_entropy:
|
|
||||||
from transformers.loss.loss_utils import nn
|
|
||||||
|
|
||||||
nn.functional.cross_entropy = liger_cross_entropy
|
|
||||||
|
|
||||||
if fused_linear_cross_entropy:
|
|
||||||
modeling_qwen3.Qwen3ForCausalLM.forward = lce_forward
|
|
||||||
@@ -1,191 +0,0 @@
|
|||||||
"""
|
|
||||||
Liger FLCE for Qwen3 MoE. Based on transformers v4.51.3.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from copy import deepcopy
|
|
||||||
from typing import List, Optional, Union
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
||||||
from transformers.modeling_outputs import MoeCausalLMOutputWithPast
|
|
||||||
from transformers.models.qwen3_moe.modeling_qwen3_moe import load_balancing_loss_func
|
|
||||||
|
|
||||||
|
|
||||||
def lce_forward(
|
|
||||||
self,
|
|
||||||
input_ids: Optional[torch.LongTensor] = None,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
position_ids: Optional[torch.LongTensor] = None,
|
|
||||||
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
||||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
||||||
labels: Optional[torch.LongTensor] = None,
|
|
||||||
use_cache: Optional[bool] = None,
|
|
||||||
output_attentions: Optional[bool] = None,
|
|
||||||
output_hidden_states: Optional[bool] = None,
|
|
||||||
output_router_logits: Optional[bool] = None,
|
|
||||||
cache_position: Optional[torch.LongTensor] = None,
|
|
||||||
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
||||||
**kwargs,
|
|
||||||
) -> MoeCausalLMOutputWithPast:
|
|
||||||
r"""
|
|
||||||
Args:
|
|
||||||
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
||||||
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
||||||
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
||||||
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
||||||
|
|
||||||
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
||||||
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
||||||
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
||||||
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
||||||
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
||||||
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
"""
|
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
output_attentions = (
|
|
||||||
output_attentions
|
|
||||||
if output_attentions is not None
|
|
||||||
else self.config.output_attentions
|
|
||||||
)
|
|
||||||
output_router_logits = (
|
|
||||||
output_router_logits
|
|
||||||
if output_router_logits is not None
|
|
||||||
else self.config.output_router_logits
|
|
||||||
)
|
|
||||||
output_hidden_states = (
|
|
||||||
output_hidden_states
|
|
||||||
if output_hidden_states is not None
|
|
||||||
else self.config.output_hidden_states
|
|
||||||
)
|
|
||||||
|
|
||||||
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
||||||
outputs = self.model(
|
|
||||||
input_ids=input_ids,
|
|
||||||
attention_mask=attention_mask,
|
|
||||||
position_ids=position_ids,
|
|
||||||
past_key_values=past_key_values,
|
|
||||||
inputs_embeds=inputs_embeds,
|
|
||||||
use_cache=use_cache,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
output_hidden_states=output_hidden_states,
|
|
||||||
output_router_logits=output_router_logits,
|
|
||||||
cache_position=cache_position,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
hidden_states = outputs[0]
|
|
||||||
|
|
||||||
logits = None
|
|
||||||
loss = None
|
|
||||||
# if in training mode, don't materialize logits
|
|
||||||
if self.training and (labels is not None):
|
|
||||||
loss = LigerForCausalLMLoss(
|
|
||||||
hidden_states=hidden_states,
|
|
||||||
lm_head_weight=self.lm_head.weight,
|
|
||||||
labels=labels,
|
|
||||||
hidden_size=self.config.hidden_size,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
else: # if in inference mode materialize logits
|
|
||||||
slice_indices = (
|
|
||||||
slice(-logits_to_keep, None)
|
|
||||||
if isinstance(logits_to_keep, int)
|
|
||||||
else logits_to_keep
|
|
||||||
)
|
|
||||||
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
|
||||||
if labels is not None:
|
|
||||||
loss = self.loss_function(
|
|
||||||
logits=logits,
|
|
||||||
labels=labels,
|
|
||||||
vocab_size=self.config.vocab_size,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
aux_loss = None
|
|
||||||
if output_router_logits:
|
|
||||||
aux_loss = load_balancing_loss_func(
|
|
||||||
outputs.router_logits,
|
|
||||||
self.num_experts,
|
|
||||||
self.num_experts_per_tok,
|
|
||||||
attention_mask,
|
|
||||||
)
|
|
||||||
if labels is not None:
|
|
||||||
loss += self.router_aux_loss_coef * aux_loss.to(
|
|
||||||
loss.device
|
|
||||||
) # make sure to reside in the same device
|
|
||||||
|
|
||||||
return MoeCausalLMOutputWithPast(
|
|
||||||
loss=loss,
|
|
||||||
aux_loss=aux_loss,
|
|
||||||
logits=logits,
|
|
||||||
past_key_values=outputs.past_key_values,
|
|
||||||
hidden_states=outputs.hidden_states,
|
|
||||||
attentions=outputs.attentions,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def apply_liger_kernel_to_qwen3_moe(
|
|
||||||
cross_entropy: bool = False,
|
|
||||||
fused_linear_cross_entropy: bool = False,
|
|
||||||
rms_norm: bool = False,
|
|
||||||
glu_activation: bool = False,
|
|
||||||
layer_norm: bool = False,
|
|
||||||
**kwargs, # pylint: disable=unused-argument
|
|
||||||
) -> None:
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
"""
|
|
||||||
Apply Liger kernels to replace original implementation in HuggingFace Llama models (2 and 3)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
|
||||||
fused_linear_cross_entropy (bool):
|
|
||||||
Whether to apply Liger's fused linear cross entropy loss. Default is False.
|
|
||||||
`cross_entropy` and `fused_linear_cross_entropy` cannot both be False.
|
|
||||||
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
|
||||||
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is False.
|
|
||||||
glu_activation (bool): Whether to apply Liger's SwiGLU MLP. Default is False.
|
|
||||||
layer_norm (bool): Whether to apply Liger's LayerNorm. Default is False.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import transformers.models.qwen3_moe.modeling_qwen3_moe # noqa: F401 # pylint: disable=unused-import
|
|
||||||
from liger_kernel.transformers.functional import liger_cross_entropy
|
|
||||||
from liger_kernel.transformers.layer_norm import LigerLayerNorm
|
|
||||||
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
|
||||||
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
|
|
||||||
|
|
||||||
assert not (
|
|
||||||
cross_entropy and fused_linear_cross_entropy
|
|
||||||
), "cross_entropy and fused_linear_cross_entropy cannot both be True."
|
|
||||||
|
|
||||||
modeling_qwen3_moe = sys.modules["transformers.models.qwen3_moe.modeling_qwen3_moe"]
|
|
||||||
|
|
||||||
if rms_norm:
|
|
||||||
modeling_qwen3_moe.Qwen3MoeRMSNorm = LigerRMSNorm
|
|
||||||
|
|
||||||
if glu_activation:
|
|
||||||
|
|
||||||
def _liger_swiglu_mlp_wrapper(config, intermediate_size=None, **kwargs):
|
|
||||||
"Accepts intermediate_size to pass to LigerSwiGLUMLP"
|
|
||||||
# clone config to avoid modifying the original
|
|
||||||
config = deepcopy(config)
|
|
||||||
if intermediate_size:
|
|
||||||
setattr(config, "intermediate_size", intermediate_size)
|
|
||||||
return LigerSwiGLUMLP(config, **kwargs)
|
|
||||||
|
|
||||||
modeling_qwen3_moe.Qwen3MoeMLP = _liger_swiglu_mlp_wrapper
|
|
||||||
|
|
||||||
if layer_norm:
|
|
||||||
modeling_qwen3_moe.nn.LayerNorm = LigerLayerNorm
|
|
||||||
|
|
||||||
if cross_entropy:
|
|
||||||
from transformers.loss.loss_utils import nn
|
|
||||||
|
|
||||||
nn.functional.cross_entropy = liger_cross_entropy
|
|
||||||
|
|
||||||
if fused_linear_cross_entropy:
|
|
||||||
modeling_qwen3_moe.Qwen3MoeForCausalLM.forward = lce_forward
|
|
||||||
@@ -55,13 +55,16 @@ def dequantize(
|
|||||||
target_device = W.device
|
target_device = W.device
|
||||||
|
|
||||||
# Extract quantization state
|
# Extract quantization state
|
||||||
|
nested = False
|
||||||
if not isinstance(quant_state, list):
|
if not isinstance(quant_state, list):
|
||||||
# New style quant_state class
|
# New style quant_state class
|
||||||
absmax = quant_state.absmax.to(target_device)
|
absmax = quant_state.absmax.to(target_device)
|
||||||
shape = quant_state.shape
|
shape = quant_state.shape
|
||||||
dtype = quant_state.dtype
|
dtype = quant_state.dtype
|
||||||
blocksize = quant_state.blocksize
|
blocksize = quant_state.blocksize
|
||||||
offset = quant_state.offset.to(target_device)
|
if quant_state.nested:
|
||||||
|
nested = True
|
||||||
|
offset = quant_state.offset.to(target_device)
|
||||||
state2 = quant_state.state2
|
state2 = quant_state.state2
|
||||||
absmax2 = state2.absmax.to(target_device)
|
absmax2 = state2.absmax.to(target_device)
|
||||||
code2 = state2.code.to(target_device)
|
code2 = state2.code.to(target_device)
|
||||||
@@ -115,7 +118,8 @@ def dequantize(
|
|||||||
ctypes.c_int(n_elements_absmax),
|
ctypes.c_int(n_elements_absmax),
|
||||||
)
|
)
|
||||||
|
|
||||||
out_absmax += offset
|
if nested:
|
||||||
|
out_absmax += offset
|
||||||
|
|
||||||
# Choose appropriate dequantization function
|
# Choose appropriate dequantization function
|
||||||
fx = (
|
fx = (
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
"""
|
|
||||||
attention module for attention monkeypatches
|
|
||||||
"""
|
|
||||||
|
|
||||||
from transformers.integrations.flash_attention import flash_attention_forward
|
|
||||||
|
|
||||||
|
|
||||||
def patch_xformers_attn_over_fa2():
|
|
||||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
||||||
|
|
||||||
from .xformers import xformers_attention_forward
|
|
||||||
|
|
||||||
ALL_ATTENTION_FUNCTIONS["flash_attention_2"] = xformers_attention_forward
|
|
||||||
|
|
||||||
|
|
||||||
def unpatch_xformers_attn_over_fa2():
|
|
||||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
||||||
|
|
||||||
ALL_ATTENTION_FUNCTIONS["flash_attention_2"] = flash_attention_forward()
|
|
||||||
|
|||||||
@@ -1,160 +0,0 @@
|
|||||||
"""
|
|
||||||
xformers attention implementation for packing
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import xformers
|
|
||||||
import xformers.ops.fmha
|
|
||||||
from transformers.modeling_flash_attention_utils import (
|
|
||||||
_upad_input,
|
|
||||||
)
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
|
||||||
|
|
||||||
xformers_attention = xformers.ops.fmha.memory_efficient_attention
|
|
||||||
|
|
||||||
|
|
||||||
def xformers_attention_forward(
|
|
||||||
module: torch.nn.Module,
|
|
||||||
query: torch.Tensor,
|
|
||||||
key: torch.Tensor,
|
|
||||||
value: torch.Tensor,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
position_ids: Optional[torch.LongTensor] = None,
|
|
||||||
dropout: float = 0.0, # pylint: disable=unused-argument
|
|
||||||
scaling: Optional[float] = None, # pylint: disable=unused-argument
|
|
||||||
sliding_window: Optional[int] = None, # pylint: disable=unused-argument
|
|
||||||
softcap: Optional[float] = None, # pylint: disable=unused-argument
|
|
||||||
cu_seq_lens_q: Optional[torch.LongTensor] = None,
|
|
||||||
cu_seq_lens_k: Optional[torch.LongTensor] = None,
|
|
||||||
max_length_q: Optional[int] = None,
|
|
||||||
max_length_k: Optional[int] = None, # pylint: disable=unused-argument
|
|
||||||
**kwargs, # pylint: disable=unused-argument
|
|
||||||
):
|
|
||||||
# Get dimensions
|
|
||||||
# query: [batch, heads, seq_len, hidden_dim]
|
|
||||||
batch_size = query.size(0)
|
|
||||||
query_length = query.shape[2]
|
|
||||||
key_length = key.shape[2]
|
|
||||||
|
|
||||||
# Default causal mask
|
|
||||||
attn_bias = xformers.ops.LowerTriangularMask()
|
|
||||||
|
|
||||||
# Check if we have sliding window attention
|
|
||||||
has_sliding_window = sliding_window is not None and sliding_window < query_length
|
|
||||||
|
|
||||||
# Transpose dimensions for xformers (Q: [b, h, s, d] -> [b, s, h, d])
|
|
||||||
query = query.transpose(1, 2)
|
|
||||||
key = key.transpose(1, 2)
|
|
||||||
value = value.transpose(1, 2)
|
|
||||||
|
|
||||||
# Get GQA parameters
|
|
||||||
num_attention_heads = module.config.num_attention_heads
|
|
||||||
num_key_value_heads = module.config.num_key_value_heads
|
|
||||||
head_dim = query.size(-1)
|
|
||||||
is_gqa = num_attention_heads != num_key_value_heads
|
|
||||||
n_groups = num_attention_heads // num_key_value_heads if is_gqa else 1
|
|
||||||
|
|
||||||
# If position_ids is provided and check all examples do not contain only 1 sequence, If tensor in increasing
|
|
||||||
# then we probably have one sequence, otherwise it is packed. Additionally check we are in pre-fill/training stage.
|
|
||||||
# Use `flash_attn_varlen_func` to prevent cross-example attention and also allow padding free approach
|
|
||||||
if position_ids is not None and (
|
|
||||||
max_length_q is not None
|
|
||||||
or (query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all())
|
|
||||||
):
|
|
||||||
if cu_seq_lens_q is None or cu_seq_lens_k is None:
|
|
||||||
cu_seq_lens_q = get_cu_seqlens_from_pos_ids(position_ids)[0]
|
|
||||||
cu_seq_lens_q = cu_seq_lens_q.squeeze()
|
|
||||||
seq_lengths = cu_seq_lens_q[1:] - cu_seq_lens_q[:-1]
|
|
||||||
attn_bias = (
|
|
||||||
xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask.from_seqlens(
|
|
||||||
q_seqlen=seq_lengths.tolist(),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
query = query.reshape(-1, query.size(-2), query.size(-1))
|
|
||||||
key = key.reshape(-1, key.size(-2), key.size(-1))
|
|
||||||
value = value.reshape(-1, value.size(-2), value.size(-1))
|
|
||||||
|
|
||||||
# Handle GQA
|
|
||||||
if is_gqa:
|
|
||||||
key = key.repeat_interleave(n_groups, dim=2)
|
|
||||||
value = value.repeat_interleave(n_groups, dim=2)
|
|
||||||
|
|
||||||
elif attention_mask is not None:
|
|
||||||
query, key, value, _, cu_seq_lens, _ = _upad_input(
|
|
||||||
query, key, value, attention_mask, query_length
|
|
||||||
)
|
|
||||||
cu_seq_lens_q, cu_seq_lens_k = cu_seq_lens
|
|
||||||
seq_lengths = []
|
|
||||||
for i in range(len(cu_seq_lens_q) - 1):
|
|
||||||
seq_lengths.append(cu_seq_lens_q[i + 1] - cu_seq_lens_q[i])
|
|
||||||
attn_bias = xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask.from_seqlens(
|
|
||||||
q_seqlen=seq_lengths,
|
|
||||||
kv_seqlen=seq_lengths,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Handle GQA
|
|
||||||
if is_gqa:
|
|
||||||
key = key.repeat_interleave(n_groups, dim=2)
|
|
||||||
value = value.repeat_interleave(n_groups, dim=2)
|
|
||||||
else:
|
|
||||||
# Handle Group Query Attention (GQA) using view/expand approach from reference
|
|
||||||
key = key.view(batch_size, key_length, num_key_value_heads, 1, head_dim)
|
|
||||||
value = value.view(batch_size, key_length, num_key_value_heads, 1, head_dim)
|
|
||||||
key = key.expand(
|
|
||||||
batch_size, key_length, num_key_value_heads, n_groups, head_dim
|
|
||||||
)
|
|
||||||
value = value.expand(
|
|
||||||
batch_size, key_length, num_key_value_heads, n_groups, head_dim
|
|
||||||
)
|
|
||||||
|
|
||||||
if module.training:
|
|
||||||
key = key.reshape(batch_size, key_length, num_attention_heads, head_dim)
|
|
||||||
value = value.reshape(batch_size, key_length, num_attention_heads, head_dim)
|
|
||||||
|
|
||||||
if has_sliding_window:
|
|
||||||
query = query.view(
|
|
||||||
1, batch_size * query_length, num_attention_heads, head_dim
|
|
||||||
)
|
|
||||||
key = key.view(
|
|
||||||
1, batch_size * key_length, num_attention_heads, head_dim
|
|
||||||
)
|
|
||||||
value = value.view(
|
|
||||||
1, batch_size * key_length, num_attention_heads, head_dim
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
query = query.view(
|
|
||||||
batch_size, query_length, num_key_value_heads, n_groups, head_dim
|
|
||||||
)
|
|
||||||
|
|
||||||
# If we need a sliding window attention
|
|
||||||
if has_sliding_window:
|
|
||||||
query = query.view(
|
|
||||||
1,
|
|
||||||
batch_size * query_length,
|
|
||||||
num_key_value_heads,
|
|
||||||
n_groups,
|
|
||||||
head_dim,
|
|
||||||
)
|
|
||||||
key = key.view(
|
|
||||||
1, batch_size * key_length, num_key_value_heads, n_groups, head_dim
|
|
||||||
)
|
|
||||||
value = value.view(
|
|
||||||
1, batch_size * key_length, num_key_value_heads, n_groups, head_dim
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the xformers attention
|
|
||||||
attn_output = xformers_attention(
|
|
||||||
query,
|
|
||||||
key,
|
|
||||||
value,
|
|
||||||
attn_bias=attn_bias,
|
|
||||||
)
|
|
||||||
|
|
||||||
attn_output = attn_output.view(
|
|
||||||
batch_size, -1, attn_output.size(-2), attn_output.size(-1)
|
|
||||||
)
|
|
||||||
return attn_output, None
|
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
"""
|
|
||||||
chunked ce loss
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.nn.functional as F
|
|
||||||
|
|
||||||
|
|
||||||
# copied and modified from torchtune.modules.loss.CEWithChunkedOutputLoss
|
|
||||||
class CEWithChunkedOutputLoss(torch.nn.Module):
|
|
||||||
"""
|
|
||||||
Cross-entropy with chunked outputs that saves memory by only upcasting one chunk at a time.
|
|
||||||
|
|
||||||
For more details, please refer to: https://github.com/pytorch/torchtune/pull/1390
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, num_output_chunks: int = 8, ignore_index: int = -100):
|
|
||||||
super().__init__()
|
|
||||||
self.num_output_chunks = num_output_chunks
|
|
||||||
self.ignore_index = ignore_index
|
|
||||||
|
|
||||||
def compute_cross_entropy(
|
|
||||||
self,
|
|
||||||
logits: torch.Tensor,
|
|
||||||
labels: torch.Tensor,
|
|
||||||
normalize: bool = True, # pylint: disable=unused-argument
|
|
||||||
) -> torch.Tensor:
|
|
||||||
"""
|
|
||||||
Upcast logits to fp32 and compute cross entropy loss.
|
|
||||||
"""
|
|
||||||
return F.cross_entropy(
|
|
||||||
logits.float(), labels, ignore_index=self.ignore_index, reduction="sum"
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self, logits: List[torch.Tensor], labels: torch.Tensor, reduction="sum"
|
|
||||||
) -> torch.Tensor:
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
logits (List[torch.Tensor]): List of chunked logits of length
|
|
||||||
``self.num_output_chunks``, where each chunk has shape
|
|
||||||
``(batch_size, num_tokens / num_output_chunks, vocab_size)``.
|
|
||||||
labels (torch.Tensor): Ground truth labels of shape ``(batch_size, num_tokens)``.
|
|
||||||
reduction (str): The reduction to apply to the output.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
torch.Tensor: Cross entropy loss of shape (1,).
|
|
||||||
"""
|
|
||||||
|
|
||||||
total_elements = (labels != self.ignore_index).sum()
|
|
||||||
|
|
||||||
# chunk and reshape labels (bsz, num_tokens, vocab) -> [(bsz*num_tokens/num_chunks, vocab)]
|
|
||||||
labels = [
|
|
||||||
target_chunk.reshape(-1)
|
|
||||||
for target_chunk in labels.chunk(self.num_output_chunks, dim=1)
|
|
||||||
]
|
|
||||||
# reshape logits [(bsz, num_tokens/num_chunks, vocab)] -> [(bsz*num_tokens/num_chunks, vocab)]
|
|
||||||
logits = [
|
|
||||||
logit_chunk.reshape(-1, logit_chunk.size(-1)) for logit_chunk in logits
|
|
||||||
]
|
|
||||||
|
|
||||||
# compute one chunk at a time
|
|
||||||
total_loss = 0.0
|
|
||||||
for logits_chunk, labels_chunk in zip(logits, labels):
|
|
||||||
total_loss += self.compute_cross_entropy(logits_chunk, labels_chunk)
|
|
||||||
|
|
||||||
if reduction == "sum":
|
|
||||||
return total_loss
|
|
||||||
return total_loss / total_elements
|
|
||||||
|
|
||||||
|
|
||||||
def _build_chunked_ce_loss_fn(num_output_chunks: int = 8, ignore_index: int = -100):
|
|
||||||
loss_fn_ce = CEWithChunkedOutputLoss(num_output_chunks, ignore_index)
|
|
||||||
loss_fn_ce.compute_cross_entropy = torch.compile(
|
|
||||||
loss_fn_ce.compute_cross_entropy, backend="inductor"
|
|
||||||
)
|
|
||||||
return loss_fn_ce
|
|
||||||
|
|
||||||
|
|
||||||
def get_causal_lm_loss(num_output_chunks: int = 8, ignore_index: int = -100):
|
|
||||||
loss_fn_ce = _build_chunked_ce_loss_fn(num_output_chunks, ignore_index)
|
|
||||||
|
|
||||||
def chunked_fix_cross_entropy(
|
|
||||||
source,
|
|
||||||
target,
|
|
||||||
num_items_in_batch: int = None,
|
|
||||||
ignore_index: int = -100,
|
|
||||||
**kwargs,
|
|
||||||
): # pylint: disable=unused-argument
|
|
||||||
reduction = "sum" if num_items_in_batch is not None else "mean"
|
|
||||||
logit_chunks = [ # pylint: disable=unnecessary-comprehension
|
|
||||||
chunk for chunk in source.chunk(loss_fn_ce.num_output_chunks, dim=1)
|
|
||||||
]
|
|
||||||
loss = loss_fn_ce(logit_chunks, target, reduction=reduction)
|
|
||||||
if reduction == "sum":
|
|
||||||
loss = loss / num_items_in_batch
|
|
||||||
return loss
|
|
||||||
|
|
||||||
def for_causal_lm_chunked_loss(
|
|
||||||
logits,
|
|
||||||
labels,
|
|
||||||
vocab_size: int = None, # pylint: disable=unused-argument
|
|
||||||
num_items_in_batch: Optional[int] = None,
|
|
||||||
ignore_index: int = -100,
|
|
||||||
shift_labels: Optional[torch.Tensor] = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> torch.Tensor:
|
|
||||||
# skip the upcast to float since we handle that in the chunking loss
|
|
||||||
if shift_labels is None:
|
|
||||||
# Shift so that tokens < n predict n
|
|
||||||
labels = F.pad(labels, (0, 1), value=ignore_index)
|
|
||||||
shift_labels = labels[..., 1:].contiguous()
|
|
||||||
|
|
||||||
# Skip Flattening the tokens
|
|
||||||
# Enable model parallelism
|
|
||||||
shift_labels = shift_labels.to(logits.device)
|
|
||||||
loss = chunked_fix_cross_entropy(
|
|
||||||
logits, shift_labels, num_items_in_batch, ignore_index, **kwargs
|
|
||||||
)
|
|
||||||
return loss
|
|
||||||
|
|
||||||
return for_causal_lm_chunked_loss
|
|
||||||
|
|
||||||
|
|
||||||
def patch_chunked_ce_loss_fn(num_output_chunks: int = 8, ignore_index: int = -100):
|
|
||||||
import transformers.loss.loss_utils
|
|
||||||
|
|
||||||
for_causal_lm_chunked_loss = get_causal_lm_loss(num_output_chunks, ignore_index)
|
|
||||||
transformers.loss.loss_utils.ForCausalLMLoss = for_causal_lm_chunked_loss
|
|
||||||
transformers.loss.loss_utils.LOSS_MAPPING["ForCausalLM"] = (
|
|
||||||
for_causal_lm_chunked_loss
|
|
||||||
)
|
|
||||||
@@ -18,8 +18,6 @@ SUPPORTED_MULTIPACK_MODEL_TYPES = [
|
|||||||
"mixtral",
|
"mixtral",
|
||||||
"qwen2",
|
"qwen2",
|
||||||
"qwen2_moe",
|
"qwen2_moe",
|
||||||
"qwen3",
|
|
||||||
"qwen3_moe",
|
|
||||||
"falcon",
|
"falcon",
|
||||||
"phi",
|
"phi",
|
||||||
"phi3",
|
"phi3",
|
||||||
|
|||||||
@@ -1,78 +0,0 @@
|
|||||||
"""
|
|
||||||
Patch prepare_model_for_kbit_training to not upcast everything
|
|
||||||
"""
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import peft
|
|
||||||
|
|
||||||
import axolotl
|
|
||||||
from axolotl.monkeypatch.utils import detab_code
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
ORIGINAL_PREPARE_CODE = """
|
|
||||||
for param in model.parameters():
|
|
||||||
if (
|
|
||||||
(param.dtype == torch.float16) or (param.dtype == torch.bfloat16)
|
|
||||||
) and param.__class__.__name__ != "Params4bit":
|
|
||||||
param.data = param.data.to(torch.float32)
|
|
||||||
"""
|
|
||||||
|
|
||||||
PATCHED_PREPARE_CODE = """
|
|
||||||
for name, param in model.named_parameters():
|
|
||||||
if (
|
|
||||||
(param.dtype == torch.float16) or (param.dtype == torch.bfloat16)
|
|
||||||
) and param.__class__.__name__ != "Params4bit" and "norm" in name:
|
|
||||||
param.data = param.data.to(torch.float32)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_peft_prep_code() -> str:
|
|
||||||
prepare = inspect.getsource(peft.utils.other.prepare_model_for_kbit_training)
|
|
||||||
return prepare
|
|
||||||
|
|
||||||
|
|
||||||
def check_peft_prep_code_is_patchable() -> bool:
|
|
||||||
prep_code = get_peft_prep_code()
|
|
||||||
prep_code, _ = detab_code(prep_code)
|
|
||||||
return ORIGINAL_PREPARE_CODE in prep_code
|
|
||||||
|
|
||||||
|
|
||||||
def patch_peft_prep_code():
|
|
||||||
"""
|
|
||||||
monkeypatch create_accelerator_and_postprocess so it checks for additional kwargs
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
prep_code = get_peft_prep_code()
|
|
||||||
except OSError:
|
|
||||||
return
|
|
||||||
peft.utils.other._original_create_accelerator_and_postprocess = ( # pylint: disable=protected-access
|
|
||||||
prep_code
|
|
||||||
)
|
|
||||||
prep_code, _ = detab_code(prep_code)
|
|
||||||
if ORIGINAL_PREPARE_CODE not in prep_code:
|
|
||||||
return
|
|
||||||
|
|
||||||
prep_code = prep_code.replace(ORIGINAL_PREPARE_CODE, PATCHED_PREPARE_CODE)
|
|
||||||
prep_code = prep_code.replace(
|
|
||||||
"def prepare_model_for_kbit_training(",
|
|
||||||
"def fixed_prepare_model_for_kbit_training(",
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
|
|
||||||
items_to_import = []
|
|
||||||
for item in dir(peft.utils.other):
|
|
||||||
if item in prep_code:
|
|
||||||
items_to_import.append(item)
|
|
||||||
|
|
||||||
exec( # pylint: disable=exec-used # nosec B102
|
|
||||||
"from peft.utils.other import (" + ", ".join(x for x in items_to_import) + ")",
|
|
||||||
globals(),
|
|
||||||
)
|
|
||||||
exec(prep_code, globals()) # pylint: disable=exec-used # nosec B102
|
|
||||||
LOG.info("patching prepare_model_for_kbit_training to allow for overrides")
|
|
||||||
peft.utils.other.prepare_model_for_kbit_training = fixed_prepare_model_for_kbit_training # pylint: disable=protected-access # pylint: disable=undefined-variable # noqa: F821
|
|
||||||
axolotl.utils.models.prepare_model_for_kbit_training = fixed_prepare_model_for_kbit_training # pylint: disable=protected-access # pylint: disable=undefined-variable # noqa: F821
|
|
||||||
@@ -21,7 +21,6 @@ from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
|
|||||||
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
|
||||||
from axolotl.common.datasets import TrainDatasetMeta
|
from axolotl.common.datasets import TrainDatasetMeta
|
||||||
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
||||||
fix_untrained_tokens,
|
fix_untrained_tokens,
|
||||||
@@ -517,8 +516,6 @@ def train(
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple of (model, tokenizer) after training
|
Tuple of (model, tokenizer) after training
|
||||||
"""
|
"""
|
||||||
print_axolotl_text_art()
|
|
||||||
|
|
||||||
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
||||||
(
|
(
|
||||||
trainer,
|
trainer,
|
||||||
|
|||||||
@@ -43,12 +43,3 @@ def set_pytorch_cuda_alloc_conf():
|
|||||||
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
||||||
"expandable_segments:True,roundup_power2_divisions:16"
|
"expandable_segments:True,roundup_power2_divisions:16"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def patch_optimized_env():
|
|
||||||
"""
|
|
||||||
Patch environment variables to improve VRAM usage and increase download speed
|
|
||||||
"""
|
|
||||||
if os.getenv("HF_HUB_ENABLE_HF_TRANSFER") is None:
|
|
||||||
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
||||||
set_pytorch_cuda_alloc_conf()
|
|
||||||
|
|||||||
@@ -868,29 +868,3 @@ class GCCallback(TrainerCallback):
|
|||||||
):
|
):
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
|
|
||||||
def colab_inference_post_train_callback(trainer: Trainer):
|
|
||||||
class ColabCallback(TrainerCallback):
|
|
||||||
"""Callback to prep model for inference on Google Colab"""
|
|
||||||
|
|
||||||
def __init__(self, cfg):
|
|
||||||
self.gpu_name = torch.cuda.get_device_name(0)
|
|
||||||
self.cfg = cfg
|
|
||||||
|
|
||||||
def on_train_end(
|
|
||||||
self, args, state, control, **kwargs
|
|
||||||
): # pylint: disable=unused-argument
|
|
||||||
"""
|
|
||||||
handle T4 gpu, we need to convert attention to eager for inference
|
|
||||||
"""
|
|
||||||
if "Tesla T4" in self.gpu_name and self.cfg.xformers_attention:
|
|
||||||
trainer.model.eval()
|
|
||||||
trainer.model.config._attn_implementation = ( # pylint: disable=protected-access
|
|
||||||
"eager"
|
|
||||||
)
|
|
||||||
trainer.model.gradient_checkpointing_disable()
|
|
||||||
trainer.model.config.use_cache = True
|
|
||||||
trainer.model.eval()
|
|
||||||
|
|
||||||
return ColabCallback
|
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ def choose_device(cfg):
|
|||||||
|
|
||||||
def resolve_dtype(cfg):
|
def resolve_dtype(cfg):
|
||||||
if (
|
if (
|
||||||
not cfg.fp16 and cfg.bf16 == "auto" and not cfg.use_ray
|
cfg.bf16 == "auto" and not cfg.use_ray
|
||||||
): # if we use ray we want to defer this check to the worker node
|
): # if we use ray we want to defer this check to the worker node
|
||||||
if is_torch_bf16_gpu_available():
|
if is_torch_bf16_gpu_available():
|
||||||
LOG.debug("bf16 support detected, enabling for this configuration.")
|
LOG.debug("bf16 support detected, enabling for this configuration.")
|
||||||
@@ -70,9 +70,6 @@ def resolve_dtype(cfg):
|
|||||||
if cfg.fp16 is None and not cfg.float16:
|
if cfg.fp16 is None and not cfg.float16:
|
||||||
cfg.fp16 = True
|
cfg.fp16 = True
|
||||||
|
|
||||||
if cfg.fp16 and cfg.bf16 == "auto":
|
|
||||||
cfg.bf16 = False
|
|
||||||
|
|
||||||
if cfg.device == "mps":
|
if cfg.device == "mps":
|
||||||
cfg.load_in_8bit = False
|
cfg.load_in_8bit = False
|
||||||
cfg.tf32 = False
|
cfg.tf32 = False
|
||||||
|
|||||||
@@ -556,30 +556,11 @@ class ModelLoader:
|
|||||||
self.auto_model_loader = AutoModelForCausalLM # pylint: disable=invalid-name
|
self.auto_model_loader = AutoModelForCausalLM # pylint: disable=invalid-name
|
||||||
|
|
||||||
def apply_patches(self) -> None:
|
def apply_patches(self) -> None:
|
||||||
if self.cfg.xformers_attention and self.cfg.sample_packing:
|
|
||||||
from axolotl.monkeypatch.attention import patch_xformers_attn_over_fa2
|
|
||||||
|
|
||||||
patch_xformers_attn_over_fa2()
|
|
||||||
self.cfg.flash_attention = True
|
|
||||||
|
|
||||||
if self.cfg.chunked_cross_entropy:
|
|
||||||
from axolotl.monkeypatch.loss.chunked import patch_chunked_ce_loss_fn
|
|
||||||
|
|
||||||
if self.cfg.chunked_cross_entropy_num_chunks:
|
|
||||||
patch_chunked_ce_loss_fn(self.cfg.chunked_cross_entropy_num_chunks)
|
|
||||||
else:
|
|
||||||
patch_chunked_ce_loss_fn()
|
|
||||||
|
|
||||||
if self.cfg.fsdp_config and str(self.cfg.fsdp_config.fsdp_version) == "2":
|
if self.cfg.fsdp_config and str(self.cfg.fsdp_config.fsdp_version) == "2":
|
||||||
from axolotl.monkeypatch.accelerate.fsdp2 import patch_accelerate_fsdp_utils
|
from axolotl.monkeypatch.accelerate.fsdp2 import patch_accelerate_fsdp_utils
|
||||||
|
|
||||||
patch_accelerate_fsdp_utils()
|
patch_accelerate_fsdp_utils()
|
||||||
|
|
||||||
if self.cfg.adapter:
|
|
||||||
from axolotl.monkeypatch.peft.utils import patch_peft_prep_code
|
|
||||||
|
|
||||||
patch_peft_prep_code()
|
|
||||||
|
|
||||||
if self.cfg.flex_attention:
|
if self.cfg.flex_attention:
|
||||||
from axolotl.monkeypatch.attention.flex_attn import (
|
from axolotl.monkeypatch.attention.flex_attn import (
|
||||||
patch_flex_make_mask,
|
patch_flex_make_mask,
|
||||||
@@ -1199,7 +1180,7 @@ class ModelLoader:
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def prepare_model(self, qlora_fsdp: bool) -> None:
|
def prepare_model(self, qlora_fsdp) -> None:
|
||||||
skip_prepare_model_for_kbit_training = False
|
skip_prepare_model_for_kbit_training = False
|
||||||
if self.cfg.model_config_type == "qwen" and self.cfg.adapter == "lora":
|
if self.cfg.model_config_type == "qwen" and self.cfg.adapter == "lora":
|
||||||
# Qwen doesn't play nicely with LoRA if this is enabled
|
# Qwen doesn't play nicely with LoRA if this is enabled
|
||||||
@@ -1328,7 +1309,7 @@ class ModelLoader:
|
|||||||
|
|
||||||
# make sure these are fp32 per Ramesh et al. (2021)
|
# make sure these are fp32 per Ramesh et al. (2021)
|
||||||
embedding_modules = get_linear_embedding_layers(self.cfg.model_config_type)
|
embedding_modules = get_linear_embedding_layers(self.cfg.model_config_type)
|
||||||
if self.cfg.fsdp:
|
if not self.cfg.fsdp:
|
||||||
# FSDP doesn't like mixed Float and BFloat16
|
# FSDP doesn't like mixed Float and BFloat16
|
||||||
self.convert_embedding_modules_dtype(
|
self.convert_embedding_modules_dtype(
|
||||||
embedding_modules,
|
embedding_modules,
|
||||||
|
|||||||
@@ -1,13 +1,10 @@
|
|||||||
|
# pylint: skip-file
|
||||||
"""
|
"""
|
||||||
Multipack Batch Sampler - An efficient batch sampler for packing variable-length sequences
|
Multipack Batch Sampler
|
||||||
into fixed-capacity batches to optimize memory usage and training throughput.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
from concurrent.futures import ProcessPoolExecutor
|
from typing import Any, Iterable, List, Union
|
||||||
from multiprocessing import cpu_count
|
|
||||||
from typing import Iterable, List, Union
|
|
||||||
|
|
||||||
import numba
|
import numba
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -16,39 +13,26 @@ from torch.utils.data import BatchSampler, Sampler, SequentialSampler
|
|||||||
from axolotl.utils.distributed import reduce_and_broadcast
|
from axolotl.utils.distributed import reduce_and_broadcast
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
LOG.setLevel(logging.INFO)
|
LOG.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
|
||||||
@numba.njit
|
@numba.njit
|
||||||
def ffd_check(sequence_lengths: np.ndarray, bin_capacity: int, num_bins: int):
|
def ffd_check(a: np.ndarray, c: int, n: int):
|
||||||
"""
|
# First-fit-decreasing bin packing
|
||||||
First-fit-decreasing bin packing algorithm check
|
# Check if a[] could fit in n bins with capacity c
|
||||||
|
# https://en.wikipedia.org/wiki/First-fit-decreasing_bin_packing
|
||||||
|
|
||||||
Checks if sequences with the given lengths could fit in the specified number of bins
|
a = np.sort(a)[::-1]
|
||||||
|
bins = np.full((n,), c, dtype=a.dtype)
|
||||||
Args:
|
for size in a:
|
||||||
sequence_lengths: Array of sequence lengths
|
|
||||||
bin_capacity: Maximum capacity of each bin
|
|
||||||
num_bins: Number of bins available
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if all sequences can be packed, False otherwise
|
|
||||||
"""
|
|
||||||
# Sort sequence lengths in descending order for optimal packing
|
|
||||||
sequence_lengths = np.sort(sequence_lengths)[::-1]
|
|
||||||
# Initialize all bins with full capacity
|
|
||||||
bins = np.full((num_bins,), bin_capacity, dtype=sequence_lengths.dtype)
|
|
||||||
|
|
||||||
# Try to place each sequence in the first bin it fits
|
|
||||||
for size in sequence_lengths:
|
|
||||||
not_found = True
|
not_found = True
|
||||||
for idx in range(num_bins):
|
for idx in range(n):
|
||||||
if bins[idx] >= size:
|
if bins[idx] >= size:
|
||||||
bins[idx] -= size
|
bins[idx] -= size
|
||||||
not_found = False
|
not_found = False
|
||||||
break
|
break
|
||||||
|
|
||||||
# If no bin could fit this sequence, packing failed
|
|
||||||
if not_found:
|
if not_found:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -56,380 +40,240 @@ def ffd_check(sequence_lengths: np.ndarray, bin_capacity: int, num_bins: int):
|
|||||||
|
|
||||||
|
|
||||||
@numba.njit
|
@numba.njit
|
||||||
def pack_group(
|
def ffd_with_result(a: np.ndarray, c: int, start_index: int):
|
||||||
sequence_lengths: np.ndarray,
|
# First-fit-decreasing bin packing (with result return)
|
||||||
group_offset: int,
|
|
||||||
bin_capacity: int,
|
|
||||||
max_bins: int,
|
|
||||||
bin_size: int,
|
|
||||||
safe_mode: bool = True,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Pack a group of sequences into bins using First-Fit Decreasing algorithm
|
|
||||||
|
|
||||||
Args:
|
indices = np.argsort(a)[::-1]
|
||||||
sequence_lengths: Array of sequence lengths
|
a = a[indices]
|
||||||
group_offset: Offset to apply to indices when returning results
|
|
||||||
bin_capacity: Maximum capacity of each bin
|
|
||||||
max_bins: Maximum number of bins to use
|
|
||||||
bin_size: Maximum number of sequences per bin
|
|
||||||
safe_mode: If True, use a more conservative packing approach
|
|
||||||
|
|
||||||
Returns:
|
bins: List[Any] = []
|
||||||
List of bins, where each bin contains indices of sequences assigned to it
|
bins_result: List[Any] = []
|
||||||
"""
|
for a_id, size in enumerate(a):
|
||||||
# Get sorting indices and sort lengths in descending order
|
add_new = True
|
||||||
indices = np.argsort(sequence_lengths)[::-1]
|
for idx in range(len(bins)):
|
||||||
sorted_lengths = sequence_lengths[indices]
|
if bins[idx] >= size:
|
||||||
|
bins[idx] -= size
|
||||||
bins_remaining_space: list = [] # Tracks remaining capacity in each bin
|
bins_result[idx].append(indices[a_id] + start_index)
|
||||||
bins_assigned_sequences: list = [] # Tracks sequence indices assigned to each bin
|
add_new = False
|
||||||
|
|
||||||
for seq_id, size in enumerate(sorted_lengths):
|
|
||||||
global_idx = indices[seq_id] + group_offset
|
|
||||||
|
|
||||||
# Try to place sequence in existing bins
|
|
||||||
add_new_bin = True
|
|
||||||
for bin_idx, _ in enumerate(bins_remaining_space):
|
|
||||||
if (
|
|
||||||
bins_remaining_space[bin_idx] >= size
|
|
||||||
and len(bins_assigned_sequences[bin_idx]) < bin_size
|
|
||||||
):
|
|
||||||
bins_remaining_space[bin_idx] -= size
|
|
||||||
bins_assigned_sequences[bin_idx].append(global_idx)
|
|
||||||
add_new_bin = False
|
|
||||||
break
|
break
|
||||||
|
|
||||||
# Create a new bin if needed and if we haven't reached the limit
|
if add_new:
|
||||||
if add_new_bin:
|
bins.append(c - size)
|
||||||
if len(bins_remaining_space) >= max_bins and safe_mode:
|
bins_result.append([indices[a_id] + start_index])
|
||||||
# In safe mode, skip items that would exceed max_bins
|
|
||||||
continue
|
|
||||||
bins_remaining_space.append(bin_capacity - size)
|
|
||||||
bins_assigned_sequences.append([global_idx])
|
|
||||||
|
|
||||||
# Safety check to avoid infinite bins
|
return bins_result
|
||||||
if len(bins_remaining_space) > len(sequence_lengths):
|
|
||||||
break
|
|
||||||
|
|
||||||
return bins_assigned_sequences
|
|
||||||
|
|
||||||
|
|
||||||
# Define a standalone function for multiprocessing
|
|
||||||
def _process_group(args):
|
|
||||||
group_lengths, start_idx, bin_capacity, max_bins, bin_size, safe_mode = args
|
|
||||||
return pack_group(
|
|
||||||
group_lengths, start_idx, bin_capacity, max_bins, bin_size, safe_mode
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def pack_parallel(
|
|
||||||
sequence_lengths: np.ndarray,
|
|
||||||
bin_capacity: int,
|
|
||||||
group_size: int,
|
|
||||||
bin_size: int,
|
|
||||||
num_processes: int | None = None,
|
|
||||||
safe_mode: bool = True,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Pack sequences into bins using parallel processing
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sequence_lengths: Array of sequence lengths
|
|
||||||
bin_capacity: Maximum capacity of each bin as total number of tokens
|
|
||||||
group_size: Number of sequences to process in each group
|
|
||||||
bin_size: Maximum number of bins to use
|
|
||||||
num_processes: Number of parallel processes to use
|
|
||||||
safe_mode: If True, use a more conservative packing approach
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of bins, where each bin contains indices of sequences assigned to it
|
|
||||||
"""
|
|
||||||
num_items = len(sequence_lengths)
|
|
||||||
if num_processes is None:
|
|
||||||
num_processes = max(1, min(num_items // group_size, cpu_count()))
|
|
||||||
|
|
||||||
# Create tasks for parallel processing
|
|
||||||
tasks = []
|
|
||||||
for i in range(0, num_items, group_size):
|
|
||||||
group_lengths = sequence_lengths[i : i + group_size]
|
|
||||||
max_bins = len(group_lengths) # Allow as many bins as items in the group
|
|
||||||
tasks.append((group_lengths, i, bin_capacity, max_bins, bin_size, safe_mode))
|
|
||||||
|
|
||||||
# Process groups in parallel
|
|
||||||
all_bins = []
|
|
||||||
with ProcessPoolExecutor(max_workers=num_processes) as executor:
|
|
||||||
for group_bins in executor.map(_process_group, tasks):
|
|
||||||
all_bins.extend(group_bins)
|
|
||||||
|
|
||||||
return all_bins
|
|
||||||
|
|
||||||
|
|
||||||
@numba.njit
|
@numba.njit
|
||||||
def allocate_sequentially(
|
def allocate(
|
||||||
sequence_lengths: np.ndarray, rank: int, bin_capacity: int, num_ranks: int
|
lengths: np.ndarray, lengths_cumsum: np.ndarray, rank: int, c: int, n: int
|
||||||
):
|
):
|
||||||
|
# Dynamic batch allocator, similar to Multifit
|
||||||
|
# https://en.wikipedia.org/wiki/Multifit_algorithm
|
||||||
|
# ~99.5% efficiency on OpenChat training set (12 * 2048 ctx len)
|
||||||
|
|
||||||
|
s = 0
|
||||||
|
start_index = 0
|
||||||
|
result = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# binary search [l, r)
|
||||||
|
left = 1
|
||||||
|
right = 1 + np.searchsorted(lengths_cumsum[start_index:], s + c * n, "right")
|
||||||
|
|
||||||
|
while right - left > 1:
|
||||||
|
mid = (left + right) // 2
|
||||||
|
if ffd_check(lengths[start_index : start_index + mid], c, n):
|
||||||
|
left = mid
|
||||||
|
else:
|
||||||
|
right = mid
|
||||||
|
|
||||||
|
# use length l
|
||||||
|
batch = ffd_with_result(
|
||||||
|
lengths[start_index : start_index + left], c, start_index
|
||||||
|
)
|
||||||
|
assert len(batch) <= n
|
||||||
|
if len(batch) < n:
|
||||||
|
break
|
||||||
|
|
||||||
|
start_index += left
|
||||||
|
s = lengths_cumsum[start_index - 1]
|
||||||
|
|
||||||
|
# add local rank
|
||||||
|
result.append(batch[rank])
|
||||||
|
|
||||||
|
return result, s, len(result) * c * n
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def allocate_sequentially(lengths: np.ndarray, rank: int, c: int, n: int):
|
||||||
"""
|
"""
|
||||||
Sequential allocator that preserves example order
|
Sequential allocator that preserves example order
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
sequence_lengths: The lengths of all examples
|
- lengths: The lengths of all examples
|
||||||
rank: The current rank (for distributed training)
|
- rank: The current rank (for distributed training)
|
||||||
bin_capacity: The capacity of each bin (maximum sequence length)
|
- c: The capacity of each bin (maximum sequence length)
|
||||||
num_ranks: Number of ranks (processes/GPUs)
|
- n: Number of ranks
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
rank_batches: List of batches for the current rank
|
- result: List of batches for the current rank
|
||||||
total_tokens_used: Number of actual example tokens
|
- total_used: Number of actual example tokens
|
||||||
total_token_slots: Maximum theoretical number of example tokens (number of bins * bin capacity)
|
- total_slots: Maximum theoretical number of example tokens (number of bins * bin capacity)
|
||||||
"""
|
"""
|
||||||
rank_batches = []
|
result = []
|
||||||
total_tokens_used = 0
|
total_used = 0
|
||||||
|
|
||||||
# First, do sequential packing into bins
|
# First, do sequential packing into bins
|
||||||
all_bins = []
|
all_bins = []
|
||||||
current_bin = []
|
current_bin = [0 for i in range(0)] # numba hint
|
||||||
remaining_capacity = bin_capacity
|
remaining_capacity = c
|
||||||
|
|
||||||
# Process each sequence in order
|
for idx, size in enumerate(lengths):
|
||||||
for idx, size in enumerate(sequence_lengths):
|
|
||||||
if size <= remaining_capacity:
|
if size <= remaining_capacity:
|
||||||
# Example fits in current bin
|
# Example fits in current bin
|
||||||
current_bin.append(idx)
|
current_bin.append(idx)
|
||||||
remaining_capacity -= size
|
remaining_capacity -= size
|
||||||
total_tokens_used += size
|
total_used += size
|
||||||
else:
|
else:
|
||||||
# Example doesn't fit, start a new bin
|
# Example doesn't fit, start a new bin
|
||||||
if current_bin: # Add non-empty bin to all_bins
|
if current_bin: # Add non-empty bin to all_bins
|
||||||
all_bins.append(current_bin)
|
all_bins.append(current_bin)
|
||||||
current_bin = [idx]
|
current_bin = [idx]
|
||||||
remaining_capacity = bin_capacity - size
|
remaining_capacity = c - size
|
||||||
total_tokens_used += size
|
total_used += size
|
||||||
|
|
||||||
# Add the last bin if not empty
|
# Add the last bin if not empty
|
||||||
if current_bin:
|
if current_bin:
|
||||||
all_bins.append(current_bin)
|
all_bins.append(current_bin)
|
||||||
|
|
||||||
# Assign bins to ranks - each rank gets every num_ranks-th bin
|
# Assign bins to ranks - each rank gets every n-th bin
|
||||||
for bin_idx in range(rank, len(all_bins), num_ranks):
|
for bin_idx in range(rank, len(all_bins), n):
|
||||||
rank_batches.append(all_bins[bin_idx])
|
result.append(all_bins[bin_idx])
|
||||||
|
|
||||||
return rank_batches, total_tokens_used, len(all_bins) * bin_capacity
|
return result, total_used, len(all_bins) * c
|
||||||
|
|
||||||
|
|
||||||
class MultipackBatchSampler(BatchSampler):
|
class MultipackBatchSampler(BatchSampler):
|
||||||
"""
|
"""Batch sampler class for multipack"""
|
||||||
Batch sampler class for efficient packing of variable-length sequences
|
|
||||||
|
|
||||||
This sampler packs sequences into fixed-capacity bins (batches) to maximize
|
|
||||||
GPU memory utilization and training throughput by reducing padding.
|
|
||||||
|
|
||||||
It supports both parallel packing (using FFD algorithm) and
|
|
||||||
sequential packing (preserving original sequence order).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
sampler: Union[Sampler[int], Iterable[int]],
|
sampler: Union[Sampler[int], Iterable[int]],
|
||||||
batch_size: int, # Number of bins per batch
|
batch_size: int,
|
||||||
batch_max_len: int, # Maximum sequence length (bin capacity)
|
batch_max_len: int,
|
||||||
lengths: np.ndarray, # Sequence lengths
|
lengths: np.ndarray,
|
||||||
packing_efficiency_estimate: float = 1.0, # Initial efficiency estimate
|
packing_efficiency_estimate: float = 1.0,
|
||||||
drop_last: bool = False, # Whether to drop incomplete batches
|
drop_last: bool = False,
|
||||||
num_count_samples: int = 16, # Number of samples to estimate batch count
|
num_count_samples: int = 16,
|
||||||
sequential: bool = False, # Whether to use sequential packing
|
sequential: bool = False,
|
||||||
group_size: int = 100_000, # Size of groups for parallel packing
|
**kwargs,
|
||||||
bin_size: int = 200, # The max number of samples that can be packed in a single bin
|
|
||||||
num_processes: int | None = None, # Number of processes for parallel packing
|
|
||||||
safe_mode: bool = True, # Conservative packing to prevent training instability
|
|
||||||
**kwargs, # pylint: disable=unused-argument
|
|
||||||
):
|
):
|
||||||
super().__init__(sampler, batch_size, drop_last)
|
super().__init__(sampler, batch_size, drop_last)
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
self.batch_max_len = batch_max_len
|
self.batch_max_len = batch_max_len
|
||||||
self.lengths = np.array(lengths, dtype=np.int32)
|
self.lengths: np.ndarray = lengths
|
||||||
self.packing_efficiency_estimate = packing_efficiency_estimate or 1.0
|
self.packing_efficiency_estimate = packing_efficiency_estimate or 1.0
|
||||||
self.sequential = sequential
|
self.sequential = sequential
|
||||||
self.group_size = group_size
|
|
||||||
self.bin_size = bin_size
|
|
||||||
self.num_processes = num_processes
|
|
||||||
self.safe_mode = safe_mode
|
|
||||||
|
|
||||||
assert isinstance(self.lengths, np.ndarray)
|
assert isinstance(self.lengths, np.ndarray)
|
||||||
|
|
||||||
self.epoch = 0
|
self.epoch = 0
|
||||||
|
|
||||||
# Efficiency statistics tracking
|
# statistics
|
||||||
self.total_tokens_used = 0
|
self.eff_total_used = 0
|
||||||
self.total_token_slots = 0
|
self.eff_total_slots = 0
|
||||||
|
|
||||||
# The number of times to calculate batches to determine minimum packed dataset length
|
# The number of times to calculate the batches to determine the minimum packed dataset length for the local rank
|
||||||
self.num_count_samples = num_count_samples
|
self.num_count_samples = num_count_samples
|
||||||
# Minimum packed dataset length across all ranks (determined by gather/broadcast)
|
# the minimum packed dataset length across all ranks determined by a gather/broadcast
|
||||||
self.len_across_ranks = None
|
self.len_across_ranks = None
|
||||||
|
|
||||||
# Cache for batches
|
|
||||||
self._batches = None
|
|
||||||
|
|
||||||
if self.sequential and not isinstance(sampler, SequentialSampler):
|
if self.sequential and not isinstance(sampler, SequentialSampler):
|
||||||
LOG.warning(
|
LOG.warn(
|
||||||
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
||||||
)
|
)
|
||||||
|
|
||||||
def set_epoch(self, epoch: int):
|
def set_epoch(self, epoch: int):
|
||||||
"""Set the epoch number, used for reproducible shuffling across epochs"""
|
|
||||||
self.epoch = epoch
|
self.epoch = epoch
|
||||||
self._batches = None # Invalidate batch cache
|
|
||||||
|
|
||||||
def generate_batches(self, set_stats=False):
|
def generate_batches(self, set_stats=False):
|
||||||
"""
|
indices = [idx for idx in self.sampler]
|
||||||
Generate packed batches for training
|
|
||||||
|
|
||||||
Args:
|
|
||||||
set_stats: Whether to update efficiency statistics
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of batches, where each batch contains multiple bins,
|
|
||||||
and each bin contains multiple sequence indices
|
|
||||||
"""
|
|
||||||
if self._batches is not None:
|
|
||||||
return self._batches
|
|
||||||
|
|
||||||
# Get indices from the sampler
|
|
||||||
indices = [ # pylint: disable=unnecessary-comprehension
|
|
||||||
idx for idx in self.sampler
|
|
||||||
]
|
|
||||||
|
|
||||||
# Get lengths of the selected sequences
|
|
||||||
lengths = self.lengths[indices]
|
lengths = self.lengths[indices]
|
||||||
|
lengths_cumsum = np.cumsum(lengths)
|
||||||
|
|
||||||
# Pack sequences into bins using either sequential or parallel packing
|
|
||||||
if self.sequential:
|
if self.sequential:
|
||||||
bins, total_used, total_slots = allocate_sequentially(
|
batches, total_used, total_slots = allocate_sequentially(
|
||||||
lengths,
|
lengths=lengths,
|
||||||
rank=0,
|
rank=0,
|
||||||
bin_capacity=self.batch_max_len,
|
c=self.batch_max_len,
|
||||||
num_ranks=1,
|
n=1,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Use parallel packing
|
batches, total_used, total_slots = allocate(
|
||||||
all_bins = pack_parallel(
|
lengths=lengths,
|
||||||
lengths,
|
lengths_cumsum=lengths_cumsum,
|
||||||
bin_capacity=self.batch_max_len,
|
rank=0,
|
||||||
group_size=self.group_size,
|
c=self.batch_max_len,
|
||||||
bin_size=self.bin_size,
|
n=1,
|
||||||
num_processes=self.num_processes,
|
|
||||||
safe_mode=self.safe_mode,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Map bin indices back to original indices
|
|
||||||
bins = [
|
|
||||||
[indices[b_idx] for b_idx in bin_indices] for bin_indices in all_bins
|
|
||||||
]
|
|
||||||
|
|
||||||
# Calculate efficiency statistics
|
|
||||||
total_used = lengths.sum()
|
|
||||||
total_slots = len(all_bins) * self.batch_max_len
|
|
||||||
|
|
||||||
# Group bins into batches (each batch contains batch_size bins)
|
|
||||||
batches = [
|
batches = [
|
||||||
bins[i : i + self.batch_size] for i in range(0, len(bins), self.batch_size)
|
[
|
||||||
|
[indices[b_idx] for b_idx in batch]
|
||||||
|
for batch in batches[i : i + self.batch_size]
|
||||||
|
]
|
||||||
|
for i in range(0, len(batches), self.batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
# Drop last batch if requested and it's incomplete
|
# statistics
|
||||||
if self.drop_last and len(batches[-1]) < self.batch_size:
|
|
||||||
batches = batches[:-1]
|
|
||||||
# Adjust total_slots if we dropped a batch
|
|
||||||
if not self.sequential:
|
|
||||||
total_slots -= (self.batch_size - len(batches[-1])) * self.batch_max_len
|
|
||||||
|
|
||||||
# Update statistics if requested
|
|
||||||
if set_stats:
|
if set_stats:
|
||||||
self.total_tokens_used += total_used
|
self.eff_total_used += total_used
|
||||||
self.total_token_slots += total_slots
|
self.eff_total_slots += total_slots
|
||||||
|
|
||||||
self._batches = batches
|
|
||||||
return batches
|
return batches
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
"""
|
|
||||||
Return an iterator over batches
|
|
||||||
|
|
||||||
The batches are truncated to match the minimum number of batches across all ranks
|
|
||||||
to ensure distributed training balance
|
|
||||||
"""
|
|
||||||
batches = self.generate_batches(set_stats=True)
|
batches = self.generate_batches(set_stats=True)
|
||||||
if self.len_across_ranks:
|
if self.len_across_ranks:
|
||||||
# Truncate batches to ensure all ranks have the same number of batches
|
# make sure the batches we iterate over is truncated to the same min length across all ranks
|
||||||
batches = batches[: self.len_across_ranks]
|
batches = batches[: self.len_across_ranks]
|
||||||
return iter(batches)
|
return iter(batches)
|
||||||
|
|
||||||
|
def num_batches(self):
|
||||||
|
batches = self.generate_batches(set_stats=True)
|
||||||
|
return len(batches)
|
||||||
|
|
||||||
def efficiency(self):
|
def efficiency(self):
|
||||||
"""
|
return self.eff_total_used / self.eff_total_slots
|
||||||
Calculate the packing efficiency (ratio of tokens used to total token slots)
|
|
||||||
Higher is better - 1.0 would mean perfect packing with no wasted space
|
|
||||||
"""
|
|
||||||
if self.total_token_slots == 0:
|
|
||||||
self.generate_batches(set_stats=True)
|
|
||||||
if self.total_token_slots == 0:
|
|
||||||
return 0.0
|
|
||||||
# Return a Python float instead of potentially a numpy float
|
|
||||||
return float(self.total_tokens_used / self.total_token_slots)
|
|
||||||
|
|
||||||
def gather_efficiency(self):
|
def gather_efficiency(self):
|
||||||
"""
|
|
||||||
Gather and synchronize packing efficiency estimates across all distributed ranks
|
|
||||||
Returns a conservative efficiency estimate based on the measurements
|
|
||||||
"""
|
|
||||||
|
|
||||||
def calc_sample_packing_eff_est(estimates: List[float]):
|
def calc_sample_packing_eff_est(estimates: List[float]):
|
||||||
LOG.debug(f"sample_packing_eff_est across ranks: {repr(estimates)}")
|
LOG.debug(f"sample_packing_eff_est across ranks: {repr(estimates)}")
|
||||||
# Use 99.7% of max observed efficiency as a safe estimate
|
return math.floor(0.997 * max(estimates))
|
||||||
max_eff = max(float(eff) for eff in estimates)
|
|
||||||
return math.floor(0.997 * max_eff)
|
|
||||||
|
|
||||||
# Gather efficiency from all ranks and apply the calculation function
|
|
||||||
sample_packing_actual_eff_all = reduce_and_broadcast(
|
sample_packing_actual_eff_all = reduce_and_broadcast(
|
||||||
lambda: float(self.efficiency()), # pylint: disable=unnecessary-lambda
|
lambda: self.efficiency(), # pylint: disable=unnecessary-lambda
|
||||||
calc_sample_packing_eff_est,
|
calc_sample_packing_eff_est,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Quantize to 0.5% intervals for stability
|
|
||||||
sample_packing_eff_est = (
|
sample_packing_eff_est = (
|
||||||
math.ceil(sample_packing_actual_eff_all * 200.0) / 200.0
|
math.ceil(sample_packing_actual_eff_all * 200.0) / 200.0
|
||||||
)
|
)
|
||||||
return sample_packing_eff_est
|
return sample_packing_eff_est
|
||||||
|
|
||||||
def gather_len_batches(self, num):
|
def gather_len_batches(self, num):
|
||||||
"""
|
|
||||||
Gather and synchronize batch counts across all distributed ranks
|
|
||||||
Returns the minimum number of batches available on any rank
|
|
||||||
"""
|
|
||||||
|
|
||||||
def calc_min_len(estimates: list[(int, float)]):
|
def calc_min_len(estimates: list[(int, float)]):
|
||||||
LOG.info(f"gather_len_batches: {repr(estimates)}")
|
LOG.info(f"gather_len_batches: {repr(estimates)}")
|
||||||
return math.floor(min(estimates))
|
return math.floor(min(estimates))
|
||||||
|
|
||||||
# Find minimum batch count across ranks to ensure balance
|
|
||||||
min_len_batches = reduce_and_broadcast(lambda: num, calc_min_len)
|
min_len_batches = reduce_and_broadcast(lambda: num, calc_min_len)
|
||||||
return min_len_batches
|
return min_len_batches
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
"""
|
if not self.len_across_ranks:
|
||||||
Return the total number of batches that will be yielded by this sampler
|
len_batches = min(
|
||||||
|
[self.num_batches() for _ in range(self.num_count_samples)]
|
||||||
This is calculated as the minimum number of batches available on any rank
|
|
||||||
to ensure balanced distributed training
|
|
||||||
"""
|
|
||||||
if self._batches is None:
|
|
||||||
self._batches = self.generate_batches(set_stats=True)
|
|
||||||
|
|
||||||
if self.len_across_ranks is None:
|
|
||||||
# Sample multiple times to get stable estimate
|
|
||||||
len_batches = min( # pylint: disable=consider-using-generator
|
|
||||||
[len(self._batches) for _ in range(self.num_count_samples)]
|
|
||||||
)
|
)
|
||||||
# Gather minimum across all ranks
|
|
||||||
self.len_across_ranks = self.gather_len_batches(len_batches)
|
self.len_across_ranks = self.gather_len_batches(len_batches)
|
||||||
|
|
||||||
return self.len_across_ranks
|
return self.len_across_ranks
|
||||||
|
|||||||
@@ -242,9 +242,6 @@ class AxolotlInputConfig(
|
|||||||
unsloth_rms_norm: bool | None = None
|
unsloth_rms_norm: bool | None = None
|
||||||
unsloth_rope: bool | None = None
|
unsloth_rope: bool | None = None
|
||||||
|
|
||||||
chunked_cross_entropy: bool | None = None
|
|
||||||
chunked_cross_entropy_num_chunks: int | None = None
|
|
||||||
|
|
||||||
lora_mlp_kernel: bool | None = None
|
lora_mlp_kernel: bool | None = None
|
||||||
lora_qkv_kernel: bool | None = None
|
lora_qkv_kernel: bool | None = None
|
||||||
lora_o_kernel: bool | None = None
|
lora_o_kernel: bool | None = None
|
||||||
@@ -438,6 +435,16 @@ class AxolotlInputConfig(
|
|||||||
)
|
)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@model_validator(mode="before")
|
||||||
|
@classmethod
|
||||||
|
def check_sample_packing_w_xformers(cls, data):
|
||||||
|
if data.get("sample_packing") and data.get("xformers_attention"):
|
||||||
|
raise ValueError(
|
||||||
|
"sample_packing not compatible with xformers_attention. Use flash_attention"
|
||||||
|
)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
@@ -505,17 +512,10 @@ class AxolotlInputConfig(
|
|||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def hint_sample_packing_padding(cls, data):
|
def hint_sample_packing_padding(cls, data):
|
||||||
if data.get("sample_packing"):
|
if data.get("sample_packing") and not data.get("pad_to_sequence_len"):
|
||||||
pad_to_sequence_len = data.get("pad_to_sequence_len")
|
LOG.warning(
|
||||||
if pad_to_sequence_len is False:
|
"`pad_to_sequence_len: true` is recommended when using sample_packing"
|
||||||
LOG.warning(
|
)
|
||||||
"`pad_to_sequence_len: true` is recommended when using sample_packing"
|
|
||||||
)
|
|
||||||
elif pad_to_sequence_len is None:
|
|
||||||
LOG.info(
|
|
||||||
"Setting `pad_to_sequence_len: true` to prevent memory leaks when sample_packing"
|
|
||||||
)
|
|
||||||
data["pad_to_sequence_len"] = True
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
|
|||||||
@@ -648,7 +648,7 @@ class TestValidation(BaseValidation):
|
|||||||
DictDefault(
|
DictDefault(
|
||||||
{
|
{
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"pad_to_sequence_len": False,
|
"pad_to_sequence_len": None,
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -662,26 +662,6 @@ class TestValidation(BaseValidation):
|
|||||||
for record in self._caplog.records
|
for record in self._caplog.records
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_packing_autoset(self, minimal_cfg):
|
|
||||||
cfg = (
|
|
||||||
DictDefault(
|
|
||||||
{
|
|
||||||
"sample_packing": True,
|
|
||||||
"pad_to_sequence_len": None,
|
|
||||||
"flash_attention": True,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
| minimal_cfg
|
|
||||||
)
|
|
||||||
with self._caplog.at_level(logging.INFO):
|
|
||||||
cfg = validate_config(cfg)
|
|
||||||
assert any(
|
|
||||||
"Setting `pad_to_sequence_len: true` to prevent memory leaks when sample_packing"
|
|
||||||
in record.message
|
|
||||||
for record in self._caplog.records
|
|
||||||
)
|
|
||||||
assert cfg.pad_to_sequence_len is True
|
|
||||||
|
|
||||||
def test_merge_lora_no_bf16_fail(self, minimal_cfg):
|
def test_merge_lora_no_bf16_fail(self, minimal_cfg):
|
||||||
"""
|
"""
|
||||||
This is assumed to be run on a CPU machine, so bf16 is not supported.
|
This is assumed to be run on a CPU machine, so bf16 is not supported.
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
"""
|
|
||||||
test suite for chunked cross entropy
|
|
||||||
"""
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
import torch
|
|
||||||
from torch import nn
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.loss.chunked import get_causal_lm_loss
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def chunked_fixtures():
|
|
||||||
model_dim = 512
|
|
||||||
vocab_size = 1024 * 256
|
|
||||||
seq_len = 2048
|
|
||||||
batch_size = 1
|
|
||||||
|
|
||||||
lm_head = nn.Linear(model_dim, vocab_size)
|
|
||||||
hidden_state = torch.randn(batch_size, seq_len, model_dim)
|
|
||||||
labels = torch.randint(low=0, high=vocab_size, size=(batch_size, seq_len))
|
|
||||||
return lm_head, hidden_state, labels, vocab_size
|
|
||||||
|
|
||||||
|
|
||||||
def test_chunked_forward(chunked_fixtures): # pylint: disable=redefined-outer-name
|
|
||||||
lm_head, hidden_state, labels, vocab_size = chunked_fixtures
|
|
||||||
lm_loss = get_causal_lm_loss()
|
|
||||||
|
|
||||||
logits = lm_head(hidden_state)
|
|
||||||
|
|
||||||
chunked_lm_loss = lm_loss(logits, labels)
|
|
||||||
|
|
||||||
logits_flattened = logits.view(-1, vocab_size)
|
|
||||||
labels_flattened = labels.view(-1)
|
|
||||||
|
|
||||||
loss = nn.functional.cross_entropy(
|
|
||||||
logits_flattened.float(), labels_flattened, reduction="mean"
|
|
||||||
)
|
|
||||||
|
|
||||||
assert torch.allclose(chunked_lm_loss, loss, atol=1e-2, rtol=1e-2)
|
|
||||||
Reference in New Issue
Block a user