Compare commits
3 Commits
accelerato
...
liger-065
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d13a06722 | ||
|
|
ba27e830e8 | ||
|
|
8f7219e139 |
@@ -2,10 +2,10 @@
|
|||||||
|
|
||||||
# START section of dependencies that don't install on Darwin/MacOS
|
# START section of dependencies that don't install on Darwin/MacOS
|
||||||
bitsandbytes==0.49.1
|
bitsandbytes==0.49.1
|
||||||
triton>=3.0.0
|
triton>=3.4.0
|
||||||
mamba-ssm==1.2.0.post1
|
mamba-ssm==1.2.0.post1
|
||||||
xformers>=0.0.23.post1
|
xformers>=0.0.23.post1
|
||||||
liger-kernel==0.6.4
|
liger-kernel==0.6.5
|
||||||
# END section
|
# END section
|
||||||
|
|
||||||
packaging==26.0
|
packaging==26.0
|
||||||
|
|||||||
@@ -258,11 +258,6 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
bf16 = bf16 if bf16 is not None else False
|
bf16 = bf16 if bf16 is not None else False
|
||||||
training_args_kwargs["bf16"] = bf16
|
training_args_kwargs["bf16"] = bf16
|
||||||
|
|
||||||
if self.cfg.fp8:
|
|
||||||
training_args_kwargs["fp8"] = True
|
|
||||||
if self.cfg.fp8_enable_fsdp_float8_all_gather:
|
|
||||||
training_args_kwargs["enable_fsdp_float8_all_gather:"] = True
|
|
||||||
|
|
||||||
def _configure_scheduler(self, training_args_kwargs: dict):
|
def _configure_scheduler(self, training_args_kwargs: dict):
|
||||||
if self.cfg.lr_scheduler in ["one_cycle", "rex"]:
|
if self.cfg.lr_scheduler in ["one_cycle", "rex"]:
|
||||||
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
||||||
|
|||||||
@@ -584,9 +584,11 @@ class AxolotlTrainer(
|
|||||||
|
|
||||||
super().create_accelerator_and_postprocess()
|
super().create_accelerator_and_postprocess()
|
||||||
|
|
||||||
def build_fp8_accelerator_args(self) -> dict[str, Any]:
|
def additional_accelerator_args(
|
||||||
args = {}
|
self, fp8: bool = False, enable_fsdp_float8_all_gather: bool = False, **kwargs
|
||||||
if self.args.fp8:
|
) -> dict[str, Any]:
|
||||||
|
ret_kwargs = {}
|
||||||
|
if fp8:
|
||||||
from accelerate.utils import AORecipeKwargs
|
from accelerate.utils import AORecipeKwargs
|
||||||
from torchao.float8 import Float8LinearConfig
|
from torchao.float8 import Float8LinearConfig
|
||||||
|
|
||||||
@@ -594,22 +596,15 @@ class AxolotlTrainer(
|
|||||||
# scaling strategy. See more details here:
|
# scaling strategy. See more details here:
|
||||||
# https://github.com/pytorch/ao/tree/main/torchao/float8.
|
# https://github.com/pytorch/ao/tree/main/torchao/float8.
|
||||||
config = Float8LinearConfig(
|
config = Float8LinearConfig(
|
||||||
enable_fsdp_float8_all_gather=self.args.enable_fsdp_float8_all_gather,
|
enable_fsdp_float8_all_gather=enable_fsdp_float8_all_gather,
|
||||||
force_recompute_fp8_weight_in_bwd=self.args.enable_fsdp_float8_all_gather
|
force_recompute_fp8_weight_in_bwd=enable_fsdp_float8_all_gather is True,
|
||||||
is True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
args["mixed_precision"] = "fp8"
|
ret_kwargs["mixed_precision"] = "fp8"
|
||||||
args["kwargs_handlers"] = [AORecipeKwargs(config=config)] # type: ignore
|
ret_kwargs["kwargs_handlers"] = [AORecipeKwargs(config=config)] # type: ignore
|
||||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
||||||
|
|
||||||
return args
|
return ret_kwargs
|
||||||
|
|
||||||
def _build_accelerator_args(self, **kwargs) -> dict[str, Any]:
|
|
||||||
args = super().build_accelerator_args(**kwargs)
|
|
||||||
fp8_args = self.build_fp8_accelerator_args()
|
|
||||||
args.update(fp8_args)
|
|
||||||
return args
|
|
||||||
|
|
||||||
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
|
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -263,13 +263,3 @@ class AxolotlTrainingMixins:
|
|||||||
dion_rank_multiple_of: int | None = field(
|
dion_rank_multiple_of: int | None = field(
|
||||||
default=None,
|
default=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
fp8: bool | None = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Whether to use FP8 precision for training"},
|
|
||||||
)
|
|
||||||
|
|
||||||
enable_fsdp_float8_all_gather: bool | None = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Whether to use FSDP with FP8 precision for all_gather"},
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
|
|
||||||
def patch_llama_like(
|
def patch_llama_like(
|
||||||
self,
|
self,
|
||||||
model_type_to_patch: str,
|
model_type: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Generic patch for model architectures with causal lm similar to llama
|
Generic patch for model architectures with causal lm similar to llama
|
||||||
@@ -112,10 +112,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
from cut_cross_entropy.transformers.patch import PATCH_FNS
|
from cut_cross_entropy.transformers.patch import PATCH_FNS
|
||||||
|
|
||||||
def patch_generic(
|
def patch_generic(
|
||||||
maybe_model,
|
maybe_model, patch_options, model_type: str, remote_model_id: str | None
|
||||||
patch_options,
|
|
||||||
remote_model_id: str | None,
|
|
||||||
model_type: str,
|
|
||||||
):
|
):
|
||||||
import cut_cross_entropy.transformers.llama
|
import cut_cross_entropy.transformers.llama
|
||||||
from cut_cross_entropy.transformers.llama import cce_forward
|
from cut_cross_entropy.transformers.llama import cce_forward
|
||||||
@@ -139,13 +136,11 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
f"Error: {str(e)}"
|
f"Error: {str(e)}"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
if model_type_to_patch not in PATCH_FNS:
|
if model_type not in PATCH_FNS:
|
||||||
LOG.warning_once(
|
LOG.warning_once(
|
||||||
"Setting up generic cce patch for model type: %s", model_type_to_patch
|
"Setting up generic cce patch for model type: %s", model_type
|
||||||
)
|
)
|
||||||
LOG.warning_once(
|
LOG.warning_once(
|
||||||
f"Generic Cut Cross Entropy + {model_type_to_patch} support is experimental and may not work as expected."
|
f"Generic Cut Cross Entropy + {model_type} support is experimental and may not work as expected."
|
||||||
)
|
|
||||||
PATCH_FNS[model_type_to_patch] = partial(
|
|
||||||
patch_generic, model_type=model_type_to_patch
|
|
||||||
)
|
)
|
||||||
|
PATCH_FNS[model_type] = partial(patch_generic, model_type=model_type)
|
||||||
|
|||||||
@@ -100,6 +100,7 @@ class PatchManager:
|
|||||||
self._apply_fsdp_patches()
|
self._apply_fsdp_patches()
|
||||||
self._apply_adapter_patches()
|
self._apply_adapter_patches()
|
||||||
self._apply_model_specific_patches()
|
self._apply_model_specific_patches()
|
||||||
|
self._apply_fp8_patches()
|
||||||
self._apply_flash_attention_peft_patches()
|
self._apply_flash_attention_peft_patches()
|
||||||
self._apply_gradient_checkpointing_patches()
|
self._apply_gradient_checkpointing_patches()
|
||||||
self._patch_attention()
|
self._patch_attention()
|
||||||
@@ -234,6 +235,17 @@ class PatchManager:
|
|||||||
|
|
||||||
patch_kimi_model()
|
patch_kimi_model()
|
||||||
|
|
||||||
|
def _apply_fp8_patches(self):
|
||||||
|
"""Apply patches for FP8 support."""
|
||||||
|
if self.cfg.fp8:
|
||||||
|
from axolotl.monkeypatch.trainer_accelerator_args import (
|
||||||
|
patch_create_accelerate_code_for_fp8,
|
||||||
|
)
|
||||||
|
|
||||||
|
patch_create_accelerate_code_for_fp8(
|
||||||
|
self.cfg.fp8_enable_fsdp_float8_all_gather
|
||||||
|
)
|
||||||
|
|
||||||
def _apply_flash_attention_peft_patches(self):
|
def _apply_flash_attention_peft_patches(self):
|
||||||
"""Apply patches for Flash Attention with PEFT."""
|
"""Apply patches for Flash Attention with PEFT."""
|
||||||
if self.cfg.adapter:
|
if self.cfg.adapter:
|
||||||
|
|||||||
83
src/axolotl/monkeypatch/trainer_accelerator_args.py
Normal file
83
src/axolotl/monkeypatch/trainer_accelerator_args.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
"""
|
||||||
|
allow adding additional kwargs to Accelerator init
|
||||||
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from transformers import Trainer
|
||||||
|
|
||||||
|
from axolotl.monkeypatch.utils import detab_code
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
ORIGINAL_TRAINER_CODE = """
|
||||||
|
# create accelerator object
|
||||||
|
self.accelerator = Accelerator(**args)
|
||||||
|
"""
|
||||||
|
|
||||||
|
PATCHED_TRAINER_CODE = """
|
||||||
|
if hasattr(self, "additional_accelerator_args"):
|
||||||
|
additional_args = self.additional_accelerator_args(fp8=True, enable_fsdp_float8_all_gather={enable_fsdp_float8_all_gather}, **args)
|
||||||
|
if additional_args:
|
||||||
|
args.update(additional_args)
|
||||||
|
|
||||||
|
# create accelerator object
|
||||||
|
self.accelerator = Accelerator(**args)
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_create_accelerate_code() -> str:
|
||||||
|
training_loop = inspect.getsource(Trainer.create_accelerator_and_postprocess)
|
||||||
|
return training_loop
|
||||||
|
|
||||||
|
|
||||||
|
def check_create_accelerate_code_is_patchable() -> bool:
|
||||||
|
create_code = get_create_accelerate_code()
|
||||||
|
create_code, _ = detab_code(create_code)
|
||||||
|
return ORIGINAL_TRAINER_CODE in create_code
|
||||||
|
|
||||||
|
|
||||||
|
def patch_create_accelerate_code_for_fp8(enable_fsdp_float8_all_gather: bool):
|
||||||
|
"""
|
||||||
|
Monkeypatch create_accelerator_and_postprocess so it checks for additional kwargs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
create_code = get_create_accelerate_code()
|
||||||
|
except OSError:
|
||||||
|
return
|
||||||
|
Trainer._original_create_accelerator_and_postprocess = create_code
|
||||||
|
create_code, _ = detab_code(create_code)
|
||||||
|
if ORIGINAL_TRAINER_CODE not in create_code:
|
||||||
|
return
|
||||||
|
|
||||||
|
patched_trainer_code = PATCHED_TRAINER_CODE.format(
|
||||||
|
enable_fsdp_float8_all_gather=enable_fsdp_float8_all_gather
|
||||||
|
)
|
||||||
|
create_code = create_code.replace(ORIGINAL_TRAINER_CODE, patched_trainer_code)
|
||||||
|
create_code = create_code.replace(
|
||||||
|
"def create_accelerator_and_postprocess(",
|
||||||
|
"def fixed_create_accelerator_and_postprocess(",
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
|
||||||
|
# load imports necessary
|
||||||
|
import transformers.trainer
|
||||||
|
|
||||||
|
items_to_import = []
|
||||||
|
for item in dir(transformers.trainer):
|
||||||
|
if item in create_code:
|
||||||
|
items_to_import.append(item)
|
||||||
|
|
||||||
|
exec(
|
||||||
|
"from transformers.trainer import ("
|
||||||
|
+ ", ".join(x for x in items_to_import)
|
||||||
|
+ ")",
|
||||||
|
globals(),
|
||||||
|
)
|
||||||
|
exec(create_code, globals())
|
||||||
|
LOG.info("patching create_accelerator_and_postprocess to allow for overrides")
|
||||||
|
Trainer.create_accelerator_and_postprocess = (
|
||||||
|
fixed_create_accelerator_and_postprocess
|
||||||
|
)
|
||||||
@@ -186,6 +186,7 @@ class TestFSDP1:
|
|||||||
|
|
||||||
verify_training_success(temp_dir)
|
verify_training_success(temp_dir)
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="slow test, deprecate fsdp1 asap")
|
||||||
def test_dpo_fft(self, temp_dir):
|
def test_dpo_fft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user