ci for fa3
This commit is contained in:
11
.github/workflows/multi-gpu-e2e.yml
vendored
11
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -32,21 +32,25 @@ jobs:
|
||||
pytorch: 2.6.0
|
||||
axolotl_extras: vllm
|
||||
num_gpus: 2
|
||||
nightly_build: "true"
|
||||
- cuda: 126
|
||||
cuda_version: 12.6.3
|
||||
python_version: "3.11"
|
||||
pytorch: 2.6.0
|
||||
axolotl_extras:
|
||||
suffix: "-hopper"
|
||||
num_gpus: 2
|
||||
- cuda: 124
|
||||
cuda_version: 12.4.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.5.1
|
||||
axolotl_extras:
|
||||
num_gpus: 2
|
||||
nightly_build: "true"
|
||||
- cuda: 126
|
||||
cuda_version: 12.6.3
|
||||
python_version: "3.11"
|
||||
pytorch: 2.7.0
|
||||
axolotl_extras:
|
||||
num_gpus: 2
|
||||
nightly_build: "true"
|
||||
runs-on: [self-hosted, modal]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
@@ -68,7 +72,6 @@ jobs:
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
|
||||
@@ -32,6 +32,11 @@ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
fi
|
||||
|
||||
RUN pip install packaging==23.2 setuptools==75.8.0
|
||||
RUN if [ "$PYTORCH_VERSION" = "2.6.0" ] && [ "$CUDA" = "126" ] ; then \
|
||||
curl -L -O https://d1dttdx32dkk5p.cloudfront.net/fa3/cu${CUDA}/torch-${PYTORCH_VERSION}/flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||
pip3 install --no-cache-dir flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||
rm flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||
fi
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
|
||||
@@ -629,6 +629,27 @@ class ModelLoader:
|
||||
)
|
||||
|
||||
if self.cfg.flash_attention:
|
||||
use_fa3 = False
|
||||
if self.cfg.use_flash_attention_3 is True:
|
||||
use_fa3 = True
|
||||
elif self.cfg.use_flash_attention_3 == "auto":
|
||||
if int(self.cfg.capabilities.compute_capability.split("_")[-1]) >= 90:
|
||||
# FA3 is only available on Hopper GPUs and newer
|
||||
use_fa3 = True
|
||||
if use_fa3 and importlib.util.find_spec("flash_attn_interface") is not None:
|
||||
from flash_attn_interface import flash_attn_func as flash_attn_func_v3
|
||||
from flash_attn_interface import (
|
||||
flash_attn_varlen_func as flash_attn_varlen_func_v3,
|
||||
)
|
||||
|
||||
transformers.modeling_flash_attention_utils.flash_attn_func = (
|
||||
flash_attn_func_v3
|
||||
)
|
||||
transformers.modeling_flash_attention_utils.flash_attn_varlen_func = (
|
||||
flash_attn_varlen_func_v3
|
||||
)
|
||||
LOG.info("Switched to Flash Attention v3")
|
||||
|
||||
self.patch_attention()
|
||||
|
||||
if self.cfg.sample_packing and self.cfg.s2_attention:
|
||||
@@ -699,6 +720,7 @@ class ModelLoader:
|
||||
|
||||
patch_mllama()
|
||||
|
||||
# TODO deprecate soon
|
||||
if self.model_config.model_type == "btlm":
|
||||
from axolotl.monkeypatch.btlm_attn_hijack_flash import (
|
||||
replace_btlm_attn_with_flash_attn,
|
||||
@@ -706,6 +728,7 @@ class ModelLoader:
|
||||
|
||||
replace_btlm_attn_with_flash_attn(self.cfg.base_model)
|
||||
|
||||
# TODO deprecate soon
|
||||
if (
|
||||
self.model_config.model_type == "stablelm_epoch"
|
||||
and self.cfg.sample_packing
|
||||
|
||||
@@ -233,6 +233,7 @@ class AxolotlInputConfig(
|
||||
flash_attn_fuse_qkv: bool | None = None
|
||||
flash_attn_fuse_mlp: bool | None = None
|
||||
flash_optimum: bool | None = None
|
||||
use_flash_attention_3: Literal["auto"] | bool | None = "auto"
|
||||
|
||||
eager_attention: bool | None = None
|
||||
|
||||
|
||||
@@ -458,6 +458,7 @@ def cleanup_monkeypatches():
|
||||
("transformers.trainer",),
|
||||
("transformers", ["Trainer"]),
|
||||
("transformers.loss.loss_utils",),
|
||||
("transformers.modeling_flash_attention_utils",),
|
||||
]
|
||||
for module_name_tuple in modules_to_reset:
|
||||
module_name = module_name_tuple[0]
|
||||
|
||||
@@ -4,8 +4,8 @@ E2E tests for packed training
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
from transformers.utils import is_torch_bf16_gpu_available
|
||||
|
||||
from axolotl.cli.args import TrainerCliArgs
|
||||
@@ -14,19 +14,22 @@ from axolotl.train import train
|
||||
from axolotl.utils.config import normalize_config, validate_config
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
from .utils import check_tensorboard, with_temp_dir
|
||||
from .utils import check_tensorboard
|
||||
|
||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||
os.environ["WANDB_DISABLED"] = "true"
|
||||
|
||||
|
||||
class TestPackedLlama(unittest.TestCase):
|
||||
class TestPackedLlama:
|
||||
"""
|
||||
Test case for Packed training of llama models
|
||||
"""
|
||||
|
||||
@with_temp_dir
|
||||
def test_loss_packed(self, temp_dir):
|
||||
@pytest.mark.parametrize(
|
||||
"use_flash_attention_3",
|
||||
[False, "auto"],
|
||||
)
|
||||
def test_loss_packed(self, temp_dir, use_flash_attention_3):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
@@ -54,6 +57,7 @@ class TestPackedLlama(unittest.TestCase):
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 5,
|
||||
"use_tensorboard": True,
|
||||
"use_flash_attention_3": use_flash_attention_3,
|
||||
}
|
||||
)
|
||||
if is_torch_bf16_gpu_available():
|
||||
|
||||
Reference in New Issue
Block a user