Compare commits
21 Commits
colab-misc
...
model-load
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
103edc7211 | ||
|
|
c7b6790614 | ||
|
|
47e0e71bc8 | ||
|
|
0f3587174d | ||
|
|
25e6c5f9bd | ||
|
|
32f51bca35 | ||
|
|
9daa04da90 | ||
|
|
0d71b0aa5f | ||
|
|
63aaccf85b | ||
|
|
ff0fe767c8 | ||
|
|
8e4158cc0b | ||
|
|
cd84325253 | ||
|
|
0b140fef83 | ||
|
|
e4cfebe995 | ||
|
|
a6cac5dd32 | ||
|
|
b71c0e3447 | ||
|
|
ddaebf8309 | ||
|
|
679743087a | ||
|
|
f720b6e72d | ||
|
|
a980618fd0 | ||
|
|
54960d4de0 |
87
.github/workflows/tests-nightly.yml
vendored
87
.github/workflows/tests-nightly.yml
vendored
@@ -18,9 +18,96 @@ jobs:
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
|
||||
preload-cache:
|
||||
name: Preload HF cache
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_version: ["3.11"]
|
||||
pytorch_version: ["2.6.0"]
|
||||
timeout-minutes: 20
|
||||
|
||||
env:
|
||||
AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
||||
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore HF cache
|
||||
id: hf-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/.cache/huggingface/hub/datasets--*
|
||||
/home/runner/.cache/huggingface/hub/models--*
|
||||
key: ${{ runner.os }}-hf-hub-cache-v2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
pip3 install torch==${{ matrix.pytorch_version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 show torch
|
||||
pip3 install --no-build-isolation -U -e .
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||
|
||||
- name: Ensure axolotl CLI was installed
|
||||
run: |
|
||||
axolotl --help
|
||||
|
||||
- name: Pre-Download dataset fixture
|
||||
run: |
|
||||
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
pytest -v tests/conftest.py
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./coverage.xml
|
||||
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||
fail_ci_if_error: false
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
- name: Save HF cache
|
||||
id: hf-cache
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/.cache/huggingface/hub/datasets--*
|
||||
/home/runner/.cache/huggingface/hub/models--*
|
||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||
|
||||
pytest:
|
||||
name: PyTest
|
||||
runs-on: ubuntu-latest
|
||||
needs: [preload-cache]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 2
|
||||
|
||||
158
.github/workflows/tests.yml
vendored
158
.github/workflows/tests.yml
vendored
@@ -44,12 +44,98 @@ jobs:
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
|
||||
pytest:
|
||||
name: PyTest
|
||||
preload-cache:
|
||||
name: Preload HF cache
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 2
|
||||
matrix:
|
||||
python_version: ["3.11"]
|
||||
pytorch_version: ["2.6.0"]
|
||||
timeout-minutes: 20
|
||||
|
||||
env:
|
||||
AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
||||
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore HF cache
|
||||
id: hf-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/.cache/huggingface/hub/datasets--*
|
||||
/home/runner/.cache/huggingface/hub/models--*
|
||||
key: ${{ runner.os }}-hf-hub-cache-v2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
pip3 install torch==${{ matrix.pytorch_version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 show torch
|
||||
pip3 install --no-build-isolation -U -e .
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||
|
||||
- name: Ensure axolotl CLI was installed
|
||||
run: |
|
||||
axolotl --help
|
||||
|
||||
- name: Pre-Download dataset fixture
|
||||
run: |
|
||||
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
pytest -v tests/conftest.py
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./coverage.xml
|
||||
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||
fail_ci_if_error: false
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
- name: Save HF cache
|
||||
id: hf-cache
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/.cache/huggingface/hub/datasets--*
|
||||
/home/runner/.cache/huggingface/hub/models--*
|
||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||
|
||||
pytest:
|
||||
name: PyTest
|
||||
runs-on: ubuntu-latest
|
||||
needs: [preload-cache]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_version: ["3.11"]
|
||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||
@@ -121,21 +207,12 @@ jobs:
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
- name: Save HF cache
|
||||
id: hf-cache
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/.cache/huggingface/hub/datasets--*
|
||||
/home/runner/.cache/huggingface/hub/models--*
|
||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||
|
||||
pytest-sdist:
|
||||
name: PyTest from Source Dist
|
||||
runs-on: ubuntu-latest
|
||||
needs: [preload-cache]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
python_version: ["3.11"]
|
||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||
@@ -199,15 +276,6 @@ jobs:
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
- name: Save HF cache
|
||||
id: hf-cache
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/home/runner/.cache/huggingface/hub/datasets--*
|
||||
/home/runner/.cache/huggingface/hub/models--*
|
||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||
|
||||
docker-e2e-tests-1st:
|
||||
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||
# this job needs to be run on self-hosted GPU runners...
|
||||
@@ -267,12 +335,6 @@ jobs:
|
||||
pytorch: 2.6.0
|
||||
num_gpus: 1
|
||||
axolotl_extras: llmcompressor
|
||||
- cuda: 124
|
||||
cuda_version: 12.4.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.4.1
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
- cuda: 124
|
||||
cuda_version: 12.4.1
|
||||
python_version: "3.11"
|
||||
@@ -309,3 +371,43 @@ jobs:
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.e2e_tests
|
||||
|
||||
docker-e2e-cleanup:
|
||||
runs-on: [self-hosted, modal]
|
||||
timeout-minutes: 90
|
||||
needs: [docker-e2e-tests]
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 124
|
||||
cuda_version: 12.4.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.6.0
|
||||
num_gpus: 1
|
||||
axolotl_extras: vllm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==0.71.8 jinja2
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.cleanup
|
||||
|
||||
0
cicd/__init__.py
Normal file
0
cicd/__init__.py
Normal file
@@ -18,7 +18,7 @@ pytest -v --durations=10 \
|
||||
--cov-append
|
||||
|
||||
# Run patched tests excluding lora kernels with coverage append
|
||||
pytest -v --durations=10 \
|
||||
pytest --full-trace -vvv --durations=10 \
|
||||
--ignore=tests/e2e/patched/lora_kernels \
|
||||
/workspace/axolotl/tests/e2e/patched \
|
||||
--cov=axolotl \
|
||||
|
||||
19
cicd/cleanup.py
Normal file
19
cicd/cleanup.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""Modal app to run axolotl GPU cleanup"""
|
||||
|
||||
from .single_gpu import VOLUME_CONFIG, app, cicd_image, run_cmd
|
||||
|
||||
|
||||
@app.function(
|
||||
image=cicd_image,
|
||||
timeout=60 * 60,
|
||||
cpu=8.0,
|
||||
memory=131072,
|
||||
volumes=VOLUME_CONFIG,
|
||||
)
|
||||
def cleanup():
|
||||
run_cmd("./cicd/cleanup.sh", "/workspace/axolotl")
|
||||
|
||||
|
||||
@app.local_entrypoint()
|
||||
def main():
|
||||
cleanup.remote()
|
||||
6
cicd/cleanup.sh
Executable file
6
cicd/cleanup.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# cleanup old cache files for datasets processing and intermediate mappings
|
||||
find /workspace/data/huggingface-cache/hub/datasets -name "cache-*" -type f -mtime +1 -exec rm {} \;
|
||||
find /workspace/data/huggingface-cache/hub/datasets -name "*.lock" -type f -mtime +1 -exec rm {} \;
|
||||
@@ -1,69 +1,6 @@
|
||||
"""Modal app to run axolotl GPU tests"""
|
||||
|
||||
# pylint: disable=duplicate-code
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import tempfile
|
||||
|
||||
import jinja2
|
||||
import modal
|
||||
from jinja2 import select_autoescape
|
||||
from modal import App, Image
|
||||
|
||||
cicd_path = pathlib.Path(__file__).parent.resolve()
|
||||
|
||||
template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
||||
template_env = jinja2.Environment(
|
||||
loader=template_loader, autoescape=select_autoescape()
|
||||
)
|
||||
df_template = template_env.get_template("Dockerfile.jinja")
|
||||
|
||||
df_args = {
|
||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
|
||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
|
||||
"CUDA": os.environ.get("CUDA", "121"),
|
||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
||||
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
||||
"HF_HOME": "/workspace/data/huggingface-cache/hub",
|
||||
}
|
||||
|
||||
dockerfile_contents = df_template.render(**df_args)
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
||||
f.write(dockerfile_contents)
|
||||
|
||||
cicd_image = Image.from_dockerfile(
|
||||
pathlib.Path(temp_dir) / "Dockerfile",
|
||||
context_mount=None,
|
||||
force_build=True,
|
||||
gpu="A10G",
|
||||
).env(df_args)
|
||||
|
||||
app = App("Axolotl CI/CD", secrets=[])
|
||||
|
||||
hf_cache_volume = modal.Volume.from_name(
|
||||
"axolotl-ci-hf-hub-cache", create_if_missing=True
|
||||
)
|
||||
VOLUME_CONFIG = {
|
||||
"/workspace/data/huggingface-cache/hub": hf_cache_volume,
|
||||
}
|
||||
|
||||
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
||||
GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
|
||||
|
||||
|
||||
def run_cmd(cmd: str, run_folder: str):
|
||||
import subprocess # nosec
|
||||
|
||||
# Propagate errors from subprocess.
|
||||
if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
|
||||
exit(exit_code) # pylint: disable=consider-using-sys-exit
|
||||
from .single_gpu import GPU_CONFIG, VOLUME_CONFIG, app, cicd_image, run_cmd
|
||||
|
||||
|
||||
@app.function(
|
||||
|
||||
66
cicd/single_gpu.py
Normal file
66
cicd/single_gpu.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""Modal app to run axolotl GPU tests"""
|
||||
|
||||
# pylint: disable=duplicate-code
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import tempfile
|
||||
|
||||
import jinja2
|
||||
import modal
|
||||
from jinja2 import select_autoescape
|
||||
from modal import App, Image
|
||||
|
||||
cicd_path = pathlib.Path(__file__).parent.resolve()
|
||||
|
||||
template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
||||
template_env = jinja2.Environment(
|
||||
loader=template_loader, autoescape=select_autoescape()
|
||||
)
|
||||
df_template = template_env.get_template("Dockerfile.jinja")
|
||||
|
||||
df_args = {
|
||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
|
||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
|
||||
"CUDA": os.environ.get("CUDA", "121"),
|
||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
||||
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
||||
"HF_HOME": "/workspace/data/huggingface-cache/hub",
|
||||
}
|
||||
|
||||
dockerfile_contents = df_template.render(**df_args)
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
||||
f.write(dockerfile_contents)
|
||||
|
||||
cicd_image = Image.from_dockerfile(
|
||||
pathlib.Path(temp_dir) / "Dockerfile",
|
||||
context_mount=None,
|
||||
force_build=True,
|
||||
gpu="A10G",
|
||||
).env(df_args)
|
||||
|
||||
app = App("Axolotl CI/CD", secrets=[])
|
||||
|
||||
hf_cache_volume = modal.Volume.from_name(
|
||||
"axolotl-ci-hf-hub-cache", create_if_missing=True
|
||||
)
|
||||
VOLUME_CONFIG = {
|
||||
"/workspace/data/huggingface-cache/hub": hf_cache_volume,
|
||||
}
|
||||
|
||||
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
||||
GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
|
||||
|
||||
|
||||
def run_cmd(cmd: str, run_folder: str):
|
||||
import subprocess # nosec
|
||||
|
||||
# Propagate errors from subprocess.
|
||||
if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
|
||||
exit(exit_code) # pylint: disable=consider-using-sys-exit
|
||||
@@ -32,6 +32,8 @@ tokenizer_legacy:
|
||||
resize_token_embeddings_to_32x:
|
||||
# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.
|
||||
shrink_embeddings:
|
||||
# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs
|
||||
embeddings_skip_upcast:
|
||||
# Whether to load the model with randomly initialized weights. Useful for
|
||||
# pre-training a model from scratch or debugging purposes.
|
||||
random_init_weights:
|
||||
@@ -73,11 +75,12 @@ load_in_8bit: true
|
||||
load_in_4bit:
|
||||
|
||||
# Use CUDA bf16
|
||||
bf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere
|
||||
bf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere
|
||||
# Use CUDA fp16
|
||||
fp16: true
|
||||
# Use CUDA tf32
|
||||
tf32: true # require >=ampere
|
||||
# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting
|
||||
|
||||
# No AMP (automatic mixed precision)
|
||||
bfloat16: true # require >=ampere
|
||||
@@ -184,8 +187,8 @@ datasets:
|
||||
# adding a system turn with empty content.
|
||||
drop_system_message:
|
||||
|
||||
# Optional[bool]. Whether to split the assistant turn based on a reasoning trace inside delimited tags
|
||||
# defaults to False
|
||||
# Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags
|
||||
# See example at `docs/dataset-formats/conversation.qmd`
|
||||
split_thinking:
|
||||
|
||||
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
||||
@@ -547,7 +550,7 @@ gradient_checkpointing: false
|
||||
early_stopping_patience: 3
|
||||
|
||||
# Specify a scheduler and kwargs to use with the optimizer
|
||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine
|
||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
|
||||
lr_scheduler_kwargs:
|
||||
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
||||
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
||||
@@ -609,6 +612,7 @@ lr_div_factor: # Learning rate div factor
|
||||
# - optimi_adamw
|
||||
# - ao_adamw_8bit
|
||||
# - ao_adamw_fp8
|
||||
# - came_pytorch
|
||||
optimizer:
|
||||
# Dictionary of arguments to pass to the optimizer
|
||||
optim_args:
|
||||
|
||||
@@ -196,6 +196,34 @@ datasets:
|
||||
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
||||
:::
|
||||
|
||||
8. (For Qwen3 template only) Enable reasoning split, where the reasoning is split from the content and passed as a separate field into the template.
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: ...
|
||||
type: chat_template
|
||||
chat_template: qwen3
|
||||
split_thinking: true
|
||||
```
|
||||
|
||||
For example, a content can look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"content": "<think>Some thinking outputs</think>Output after thinking."
|
||||
}
|
||||
```
|
||||
|
||||
After split, it will look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"reasoning_content": "Some thinking outputs",
|
||||
"content": "Output after thinking..."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## sharegpt
|
||||
|
||||
::: {.callout-important}
|
||||
|
||||
@@ -34,3 +34,5 @@ We provide a script to delinearize Llama 4 linearized models into regular Huggin
|
||||
```bash
|
||||
axolotl delinearize-llama4 --model path/to/model_dir --output path/to/output_dir
|
||||
```
|
||||
|
||||
Note: This only works with the non-quantized linearized model. If you have an adapter, merge it with the *non-quantized linearized* model before delinearizing.
|
||||
|
||||
341
examples/orpheus/README.md
Normal file
341
examples/orpheus/README.md
Normal file
@@ -0,0 +1,341 @@
|
||||
# Finetuning LLMs to output audio
|
||||
|
||||
In this example, we finetune Orpcanopylabs/orpheus-tts-0.1-pretrained (a LLaMA 3.2 3b model) to output audio.
|
||||
|
||||
The `finetune.yml` withe current settings will run on any Nvidia GPU with 45GB VRAM or more. If you adjust the batch size it can easily run on any GPU under 24GB.
|
||||
|
||||
## Dataset pre-processing for pre-training
|
||||
If you are adding another voice in English, please jump ahead to finetuning pre-processing.
|
||||
|
||||
For this to work, we need to preprocess our dataset. Since we are expecting to output audio, we will need to add tokens to the tokenizer.
|
||||
|
||||
Using this code, it will download the SNAC model and add the correct tokens and upload the final dataset.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from snac import SNAC
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import snapshot_download
|
||||
from datasets import load_dataset
|
||||
import random
|
||||
import torchaudio.transforms as T
|
||||
from transformers import AutoTokenizer
|
||||
import os
|
||||
|
||||
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
||||
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
||||
|
||||
dsn = my_original_dataset_name
|
||||
|
||||
snapshot_download(
|
||||
repo_id=dsn,
|
||||
repo_type="dataset",
|
||||
revision="main",
|
||||
max_workers=64,
|
||||
)
|
||||
|
||||
|
||||
ds = load_dataset(dsn, split="train")
|
||||
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
||||
|
||||
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
||||
model = model.to("mps")
|
||||
|
||||
def tokenise_audio(waveform):
|
||||
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
||||
waveform = waveform.to(dtype=torch.float32)
|
||||
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
||||
waveform = resample_transform(waveform)
|
||||
|
||||
waveform = waveform.unsqueeze(0).to("cuda")
|
||||
|
||||
#generate the codes from snac
|
||||
with torch.inference_mode():
|
||||
codes = model.encode(waveform)
|
||||
|
||||
all_codes = []
|
||||
for i in range(codes[0].shape[1]):
|
||||
all_codes.append(codes[0][0][i].item()+128266)
|
||||
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
||||
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
||||
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
||||
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
||||
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
||||
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
||||
|
||||
|
||||
return all_codes
|
||||
|
||||
def add_codes(example):
|
||||
# Always initialize codes_list to None
|
||||
codes_list = None
|
||||
|
||||
try:
|
||||
answer_audio = example.get("audio")
|
||||
# If there's a valid audio array, tokenise it
|
||||
if answer_audio and "array" in answer_audio:
|
||||
audio_array = answer_audio["array"]
|
||||
codes_list = tokenise_audio(audio_array)
|
||||
except Exception as e:
|
||||
print(f"Skipping row due to error: {e}")
|
||||
# Keep codes_list as None if we fail
|
||||
example["codes_list"] = codes_list
|
||||
|
||||
return example
|
||||
|
||||
ds = ds.map(add_codes, remove_columns=["audio"])
|
||||
|
||||
#@title Load Tokenizer
|
||||
tokeniser_length = 128256
|
||||
start_of_text = 128000
|
||||
end_of_text = 128009
|
||||
|
||||
start_of_speech = tokeniser_length + 1
|
||||
end_of_speech = tokeniser_length + 2
|
||||
|
||||
start_of_human = tokeniser_length + 3
|
||||
end_of_human = tokeniser_length + 4
|
||||
|
||||
start_of_ai = tokeniser_length + 5
|
||||
end_of_ai = tokeniser_length + 6
|
||||
pad_token = tokeniser_length + 7
|
||||
|
||||
audio_tokens_start = tokeniser_length + 10
|
||||
|
||||
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
||||
num_proc = os.cpu_count() - 2
|
||||
|
||||
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
||||
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
||||
|
||||
#@title Create Input Ids
|
||||
def remove_duplicate_frames(example):
|
||||
vals = example["codes_list"]
|
||||
if len(vals) % 7 != 0:
|
||||
raise ValueError("Input list length must be divisible by 7")
|
||||
|
||||
result = vals[:7]
|
||||
|
||||
removed_frames = 0
|
||||
|
||||
for i in range(7, len(vals), 7):
|
||||
current_first = vals[i]
|
||||
previous_first = result[-7]
|
||||
|
||||
if current_first != previous_first:
|
||||
result.extend(vals[i:i+7])
|
||||
else:
|
||||
removed_frames += 1
|
||||
|
||||
example["codes_list"] = result
|
||||
|
||||
return example
|
||||
|
||||
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
||||
|
||||
|
||||
def create_input_ids(example):
|
||||
text_ids = tokenizer.encode({example['text']}, add_special_tokens=True)
|
||||
text_ids.append(end_of_text)
|
||||
example["text_tokens"] = text_ids
|
||||
input_ids = (
|
||||
[start_of_human]
|
||||
+ example["text_tokens"]
|
||||
+ [end_of_human]
|
||||
+ [start_of_ai]
|
||||
+ [start_of_speech]
|
||||
+ example["codes_list"]
|
||||
+ [end_of_speech]
|
||||
+ [end_of_ai]
|
||||
)
|
||||
example["input_ids"] = input_ids
|
||||
example["labels"] = input_ids
|
||||
example["attention_mask"] = [1] * len(input_ids)
|
||||
|
||||
return example
|
||||
|
||||
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
||||
|
||||
#@title Remove unnecessary columns
|
||||
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
||||
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
||||
|
||||
ds = ds.remove_columns(columns_to_remove)
|
||||
|
||||
ds.push_to_hub(name_to_push_dataset_to)
|
||||
```
|
||||
|
||||
|
||||
## Finetune pre-processing
|
||||
Use this code to add a new voice.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from snac import SNAC
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import snapshot_download
|
||||
from datasets import load_dataset
|
||||
import random
|
||||
import torchaudio.transforms as T
|
||||
from transformers import AutoTokenizer
|
||||
import os
|
||||
|
||||
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
||||
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
||||
|
||||
dsn = my_original_dataset_name
|
||||
|
||||
snapshot_download(
|
||||
repo_id=dsn,
|
||||
repo_type="dataset",
|
||||
revision="main",
|
||||
max_workers=64,
|
||||
)
|
||||
|
||||
|
||||
ds = load_dataset(dsn, split="train")
|
||||
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
||||
|
||||
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
||||
model = model.to("mps")
|
||||
|
||||
def tokenise_audio(waveform):
|
||||
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
||||
waveform = waveform.to(dtype=torch.float32)
|
||||
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
||||
waveform = resample_transform(waveform)
|
||||
|
||||
waveform = waveform.unsqueeze(0).to("cuda")
|
||||
|
||||
#generate the codes from snac
|
||||
with torch.inference_mode():
|
||||
codes = model.encode(waveform)
|
||||
|
||||
all_codes = []
|
||||
for i in range(codes[0].shape[1]):
|
||||
all_codes.append(codes[0][0][i].item()+128266)
|
||||
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
||||
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
||||
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
||||
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
||||
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
||||
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
||||
|
||||
|
||||
return all_codes
|
||||
|
||||
def add_codes(example):
|
||||
# Always initialize codes_list to None
|
||||
codes_list = None
|
||||
|
||||
try:
|
||||
answer_audio = example.get("audio")
|
||||
# If there's a valid audio array, tokenise it
|
||||
if answer_audio and "array" in answer_audio:
|
||||
audio_array = answer_audio["array"]
|
||||
codes_list = tokenise_audio(audio_array)
|
||||
except Exception as e:
|
||||
print(f"Skipping row due to error: {e}")
|
||||
# Keep codes_list as None if we fail
|
||||
example["codes_list"] = codes_list
|
||||
|
||||
return example
|
||||
|
||||
ds = ds.map(add_codes, remove_columns=["audio"])
|
||||
|
||||
#@title Load Tokenizer
|
||||
tokeniser_length = 128256
|
||||
start_of_text = 128000
|
||||
end_of_text = 128009
|
||||
|
||||
start_of_speech = tokeniser_length + 1
|
||||
end_of_speech = tokeniser_length + 2
|
||||
|
||||
start_of_human = tokeniser_length + 3
|
||||
end_of_human = tokeniser_length + 4
|
||||
|
||||
start_of_ai = tokeniser_length + 5
|
||||
end_of_ai = tokeniser_length + 6
|
||||
pad_token = tokeniser_length + 7
|
||||
|
||||
audio_tokens_start = tokeniser_length + 10
|
||||
|
||||
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
||||
num_proc = os.cpu_count() - 2
|
||||
|
||||
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
||||
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
||||
|
||||
#@title Create Input Ids
|
||||
def remove_duplicate_frames(example):
|
||||
vals = example["codes_list"]
|
||||
if len(vals) % 7 != 0:
|
||||
raise ValueError("Input list length must be divisible by 7")
|
||||
|
||||
result = vals[:7]
|
||||
|
||||
removed_frames = 0
|
||||
|
||||
for i in range(7, len(vals), 7):
|
||||
current_first = vals[i]
|
||||
previous_first = result[-7]
|
||||
|
||||
if current_first != previous_first:
|
||||
result.extend(vals[i:i+7])
|
||||
else:
|
||||
removed_frames += 1
|
||||
|
||||
example["codes_list"] = result
|
||||
|
||||
return example
|
||||
|
||||
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
||||
|
||||
tok_info = '''*** HERE you can modify the text prompt
|
||||
i.e. if you wanted a multispeaker model like canopylabs/orpheus-3b-0.1-ft, you can pass:
|
||||
f"{example["source"]}: {example["text"]}", as is passed.
|
||||
'''
|
||||
print(tok_info)
|
||||
|
||||
def create_input_ids(example):
|
||||
text_ids = tokenizer.encode(f"{example['speaker_id']}: {example['text']}", add_special_tokens=True)
|
||||
text_ids.append(end_of_text)
|
||||
example["text_tokens"] = text_ids
|
||||
input_ids = (
|
||||
[start_of_human]
|
||||
+ example["text_tokens"]
|
||||
+ [end_of_human]
|
||||
+ [start_of_ai]
|
||||
+ [start_of_speech]
|
||||
+ example["codes_list"]
|
||||
+ [end_of_speech]
|
||||
+ [end_of_ai]
|
||||
)
|
||||
example["input_ids"] = input_ids
|
||||
example["labels"] = input_ids
|
||||
example["attention_mask"] = [1] * len(input_ids)
|
||||
|
||||
return example
|
||||
|
||||
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
||||
|
||||
#@title Remove unnecessary columns
|
||||
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
||||
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
||||
|
||||
ds = ds.remove_columns(columns_to_remove)
|
||||
|
||||
ds.push_to_hub(name_to_push_dataset_to)
|
||||
```
|
||||
|
||||
## Training
|
||||
After preprocessing is done, fill out the blanks in finetune.yml and simply run `axolotl train finetune.yml`
|
||||
|
||||
## Inference
|
||||
For inference, please refer to the original [orpheus github](https://github.com/canopyai/Orpheus-TTS/tree/main).
|
||||
52
examples/orpheus/finetune.yml
Normal file
52
examples/orpheus/finetune.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
base_model: canopylabs/orpheus-3b-0.1-pretrained
|
||||
|
||||
hub_model_id: <your-hub-model-id>
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.liger.LigerPlugin
|
||||
liger_rope: true
|
||||
liger_rms_norm: true
|
||||
liger_glu_activation: true
|
||||
liger_fused_linear_cross_entropy: true
|
||||
|
||||
datasets:
|
||||
- path: <your-hf-dataset-id>
|
||||
type: # leave empty to load pre-tokenized
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.01
|
||||
output_dir: ./outputs/out
|
||||
|
||||
sequence_len: 8192
|
||||
sample_packing: true
|
||||
pad_to_sequence_len: true
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 8
|
||||
micro_batch_size: 4
|
||||
num_epochs: 3
|
||||
optimizer: adamw_torch_fused
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 2e-5
|
||||
|
||||
bf16: auto
|
||||
tf32: false
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_steps: 20
|
||||
evals_per_epoch: 5
|
||||
saves_per_epoch: 5
|
||||
weight_decay: 0.05
|
||||
|
||||
special_tokens:
|
||||
pad_token: <custom_token_7>
|
||||
@@ -6,16 +6,17 @@ triton>=3.0.0
|
||||
mamba-ssm==1.2.0.post1
|
||||
xformers>=0.0.23.post1
|
||||
autoawq==0.2.7.post3
|
||||
liger-kernel==0.5.8
|
||||
liger-kernel==0.5.9
|
||||
# END section
|
||||
|
||||
packaging==23.2
|
||||
|
||||
huggingface_hub==0.31.0
|
||||
peft==0.15.2
|
||||
transformers==4.51.3
|
||||
tokenizers>=0.21.1
|
||||
accelerate==1.6.0
|
||||
datasets==3.5.0
|
||||
datasets==3.5.1
|
||||
deepspeed>=0.15.4
|
||||
trl==0.17.0
|
||||
hf_xet==1.1.0
|
||||
|
||||
5
setup.py
5
setup.py
@@ -67,13 +67,13 @@ def parse_requirements(extras_require_map):
|
||||
if (major, minor) >= (2, 7):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
||||
extras_require_map["vllm"] = ["vllm==0.8.5"]
|
||||
extras_require_map["vllm"] = ["vllm==0.8.5.post1"]
|
||||
elif (major, minor) >= (2, 6):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append(
|
||||
"xformers==0.0.29.post2"
|
||||
) # vllm needs post2 w torch 2.6
|
||||
extras_require_map["vllm"] = ["vllm==0.8.5"]
|
||||
extras_require_map["vllm"] = ["vllm==0.8.5.post1"]
|
||||
elif (major, minor) >= (2, 5):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
if patch == 0:
|
||||
@@ -142,6 +142,7 @@ extras_require = {
|
||||
"apollo-torch",
|
||||
"lomo-optim==0.1.1",
|
||||
"torch-optimi==0.2.1",
|
||||
"came_pytorch==0.1.3",
|
||||
],
|
||||
"ray": [
|
||||
"ray[train]",
|
||||
|
||||
@@ -16,8 +16,15 @@ AXOLOTL_LOGO = """
|
||||
@@@@ @@@@@@@@@@@@@@@@
|
||||
"""
|
||||
|
||||
HAS_PRINTED_LOGO = False
|
||||
|
||||
|
||||
def print_axolotl_text_art():
|
||||
"""Prints axolotl ASCII art."""
|
||||
|
||||
global HAS_PRINTED_LOGO # pylint: disable=global-statement
|
||||
if HAS_PRINTED_LOGO:
|
||||
return
|
||||
if is_main_process():
|
||||
HAS_PRINTED_LOGO = True
|
||||
print(AXOLOTL_LOGO)
|
||||
|
||||
@@ -18,6 +18,7 @@ from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
||||
from axolotl.cli.config import load_cfg
|
||||
from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
|
||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||
from axolotl.integrations.base import PluginManager
|
||||
from axolotl.utils.dict import DictDefault
|
||||
from axolotl.utils.trainer import disable_datasets_caching
|
||||
|
||||
@@ -47,7 +48,10 @@ def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
||||
cfg.dataset_prepared_path = DEFAULT_DATASET_PREPARED_PATH
|
||||
|
||||
with disable_datasets_caching():
|
||||
if cfg.rl:
|
||||
plugin_manager = PluginManager.get_instance()
|
||||
if plugin_manager.load_datasets(cfg, preprocess=True):
|
||||
pass
|
||||
elif cfg.rl:
|
||||
load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||
else:
|
||||
load_datasets(cfg=cfg, cli_args=cli_args)
|
||||
|
||||
@@ -43,10 +43,13 @@ def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
||||
if int(os.getenv("LOCAL_RANK", "0")) == 0:
|
||||
check_user_token()
|
||||
|
||||
if cfg.rl:
|
||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||
else:
|
||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||
plugin_manager = PluginManager.get_instance()
|
||||
dataset_meta = plugin_manager.load_datasets(cfg, preprocess=False)
|
||||
if not dataset_meta:
|
||||
if cfg.rl:
|
||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||
else:
|
||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||
|
||||
model, tokenizer, trainer = train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ def load_datasets(
|
||||
*,
|
||||
cfg: DictDefault,
|
||||
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||
debug: bool = False,
|
||||
) -> TrainDatasetMeta:
|
||||
"""
|
||||
Loads one or more training or evaluation datasets, calling
|
||||
@@ -56,6 +57,7 @@ def load_datasets(
|
||||
Args:
|
||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||
cli_args: Command-specific CLI arguments.
|
||||
debug: Whether to print out tokenization of sample
|
||||
|
||||
Returns:
|
||||
Dataclass with fields for training and evaluation datasets and the computed
|
||||
@@ -77,20 +79,25 @@ def load_datasets(
|
||||
preprocess_iterable=preprocess_iterable,
|
||||
)
|
||||
|
||||
if cli_args and (
|
||||
cli_args.debug
|
||||
or cfg.debug
|
||||
or cli_args.debug_text_only
|
||||
or int(cli_args.debug_num_examples) > 0
|
||||
):
|
||||
if ( # pylint: disable=too-many-boolean-expressions
|
||||
cli_args
|
||||
and (
|
||||
cli_args.debug
|
||||
or cfg.debug
|
||||
or cli_args.debug_text_only
|
||||
or int(cli_args.debug_num_examples) > 0
|
||||
)
|
||||
) or debug:
|
||||
LOG.info("check_dataset_labels...")
|
||||
|
||||
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
||||
num_examples = cli_args.debug_num_examples if cli_args else 1
|
||||
text_only = cli_args.debug_text_only if cli_args else False
|
||||
train_samples = sample_dataset(train_dataset, num_examples)
|
||||
check_dataset_labels(
|
||||
train_samples,
|
||||
tokenizer,
|
||||
num_examples=cli_args.debug_num_examples,
|
||||
text_only=cli_args.debug_text_only,
|
||||
num_examples=num_examples,
|
||||
text_only=text_only,
|
||||
)
|
||||
|
||||
LOG.info("printing prompters...")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -114,6 +114,8 @@ class AxolotlTrainer(
|
||||
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
||||
batch_max_len=batch_max_len,
|
||||
batch_size=batch_size,
|
||||
group_size=self.args.sample_packing_group_size,
|
||||
bin_size=self.args.sample_packing_bin_size,
|
||||
sequential=self.args.sample_packing_sequentially,
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
21
src/axolotl/core/trainers/builders/__init__.py
Normal file
21
src/axolotl/core/trainers/builders/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright 2024 Axolotl AI. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Init for axolotl.core.trainers.builders"""
|
||||
|
||||
# pylint: disable=unused-import
|
||||
# flake8: noqa
|
||||
|
||||
from .causal import HFCausalTrainerBuilder
|
||||
from .rl import HFRLTrainerBuilder
|
||||
331
src/axolotl/core/trainers/builders/base.py
Normal file
331
src/axolotl/core/trainers/builders/base.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""Base class trainer / training args builder implementation"""
|
||||
|
||||
import abc
|
||||
from typing import Any
|
||||
|
||||
from torch import Type
|
||||
from transformers import TrainerCallback
|
||||
from transformers.training_args import TrainingArguments
|
||||
|
||||
from axolotl.integrations.base import PluginManager
|
||||
from axolotl.monkeypatch.trainer.lr import patch_trainer_get_lr
|
||||
from axolotl.utils import is_comet_available, is_mlflow_available
|
||||
from axolotl.utils.callbacks import GCCallback, SaveAxolotlConfigtoWandBCallback
|
||||
from axolotl.utils.callbacks.profiler import PytorchProfilerCallback
|
||||
|
||||
PLUGIN_MANAGER = PluginManager.get_instance()
|
||||
|
||||
|
||||
class TrainerBuilderBase(abc.ABC):
|
||||
"""Base class for trainer builder."""
|
||||
|
||||
_train_dataset = None
|
||||
_eval_dataset = None
|
||||
_model_ref = None
|
||||
_peft_config = None
|
||||
|
||||
def __init__(self, cfg, model, tokenizer, processor=None):
|
||||
self.cfg = cfg
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.processor = processor
|
||||
|
||||
# If the model supports tagging, add the axolotl tag.
|
||||
# This makes sure the tag is correctly pushed even if a user calls
|
||||
# model.push_to_hub instead of trainer.push_to_hub.
|
||||
if hasattr(model, "add_model_tags"):
|
||||
model.add_model_tags(["axolotl"])
|
||||
|
||||
patch_trainer_get_lr()
|
||||
|
||||
@property
|
||||
def model_ref(self):
|
||||
return self._model_ref
|
||||
|
||||
@model_ref.setter
|
||||
def model_ref(self, model):
|
||||
self._model_ref = model
|
||||
|
||||
@property
|
||||
def train_dataset(self):
|
||||
return self._train_dataset
|
||||
|
||||
@train_dataset.setter
|
||||
def train_dataset(self, dataset):
|
||||
self._train_dataset = dataset
|
||||
|
||||
@property
|
||||
def eval_dataset(self):
|
||||
return self._eval_dataset
|
||||
|
||||
@eval_dataset.setter
|
||||
def eval_dataset(self, dataset):
|
||||
self._eval_dataset = dataset
|
||||
|
||||
@property
|
||||
def peft_config(self):
|
||||
return self._peft_config
|
||||
|
||||
@peft_config.setter
|
||||
def peft_config(self, peft_config):
|
||||
self._peft_config = peft_config
|
||||
|
||||
@abc.abstractmethod
|
||||
def build(self, total_num_steps):
|
||||
pass
|
||||
|
||||
def get_common_training_args_kwargs(
|
||||
self, total_num_steps: int | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""Get common training arguments kwargs used across different trainer types."""
|
||||
training_args_kwargs = {}
|
||||
|
||||
# Common parameters
|
||||
for arg in [
|
||||
"adam_beta1",
|
||||
"adam_beta2",
|
||||
"adam_epsilon",
|
||||
"max_grad_norm",
|
||||
"dataloader_num_workers",
|
||||
"dataloader_pin_memory",
|
||||
"dataloader_prefetch_factor",
|
||||
"dataloader_drop_last",
|
||||
"remove_unused_columns",
|
||||
]:
|
||||
if hasattr(self.cfg, arg) and getattr(self.cfg, arg) is not None:
|
||||
training_args_kwargs[arg] = getattr(self.cfg, arg)
|
||||
|
||||
# Add Hub integration arguments if needed
|
||||
if self.cfg.hub_model_id:
|
||||
training_args_kwargs["hub_model_id"] = self.cfg.hub_model_id
|
||||
training_args_kwargs["push_to_hub"] = True
|
||||
training_args_kwargs["hub_private_repo"] = True
|
||||
training_args_kwargs["hub_always_push"] = True
|
||||
|
||||
if self.cfg.hub_strategy:
|
||||
training_args_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
||||
|
||||
# BF16/FP16 settings
|
||||
if hasattr(self.cfg, "bf16") and self.cfg.bf16:
|
||||
if self.cfg.bf16 == "full":
|
||||
training_args_kwargs["bf16_full_eval"] = True
|
||||
else:
|
||||
training_args_kwargs["bf16"] = self.cfg.bf16
|
||||
elif hasattr(self.cfg, "bfloat16") and self.cfg.bfloat16:
|
||||
training_args_kwargs["bf16"] = True
|
||||
|
||||
if hasattr(self.cfg, "fp16"):
|
||||
training_args_kwargs["fp16"] = (
|
||||
getattr(self.cfg, "fp16", False)
|
||||
and not getattr(self.cfg, "bf16", False)
|
||||
) or False
|
||||
|
||||
# Set save_strategy and save_steps
|
||||
if self.cfg.save_steps:
|
||||
training_args_kwargs["save_strategy"] = "steps"
|
||||
training_args_kwargs["save_steps"] = self.cfg.save_steps
|
||||
elif self.cfg.save_strategy:
|
||||
training_args_kwargs["save_strategy"] = self.cfg.save_strategy
|
||||
else:
|
||||
# default to saving each epoch if not defined
|
||||
training_args_kwargs["save_strategy"] = "epoch"
|
||||
|
||||
# Handle safetensors
|
||||
if self.cfg.save_safetensors is not None:
|
||||
training_args_kwargs["save_safetensors"] = self.cfg.save_safetensors
|
||||
|
||||
# Handle gradient checkpointing
|
||||
if self.cfg.gradient_checkpointing:
|
||||
training_args_kwargs["gradient_checkpointing"] = (
|
||||
self.cfg.gradient_checkpointing
|
||||
)
|
||||
if self.cfg.gradient_checkpointing_kwargs is not None:
|
||||
training_args_kwargs["gradient_checkpointing_kwargs"] = (
|
||||
self.cfg.gradient_checkpointing_kwargs
|
||||
)
|
||||
|
||||
# Common optimizer and LR scheduler settings
|
||||
training_args_kwargs["optim"] = self.cfg.optimizer
|
||||
if hasattr(self.cfg, "lr_scheduler") and self.cfg.lr_scheduler:
|
||||
training_args_kwargs["lr_scheduler_type"] = self.cfg.lr_scheduler
|
||||
else:
|
||||
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
||||
|
||||
if hasattr(self.cfg, "lr_scheduler_kwargs") and self.cfg.lr_scheduler_kwargs:
|
||||
training_args_kwargs["lr_scheduler_kwargs"] = self.cfg.lr_scheduler_kwargs
|
||||
else:
|
||||
training_args_kwargs["lr_scheduler_kwargs"] = {}
|
||||
|
||||
# LoRA+ specific settings
|
||||
if hasattr(self.cfg, "loraplus_lr_ratio"):
|
||||
training_args_kwargs["loraplus_lr_ratio"] = self.cfg.loraplus_lr_ratio
|
||||
if hasattr(self.cfg, "loraplus_lr_embedding"):
|
||||
training_args_kwargs["loraplus_lr_embedding"] = (
|
||||
self.cfg.loraplus_lr_embedding
|
||||
)
|
||||
|
||||
# Reporting tools
|
||||
report_to = []
|
||||
if self.cfg.use_wandb:
|
||||
report_to.append("wandb")
|
||||
if self.cfg.wandb_name:
|
||||
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||
if self.cfg.use_mlflow:
|
||||
report_to.append("mlflow")
|
||||
if self.cfg.use_tensorboard:
|
||||
report_to.append("tensorboard")
|
||||
if self.cfg.use_comet:
|
||||
report_to.append("comet_ml")
|
||||
|
||||
if report_to:
|
||||
training_args_kwargs["report_to"] = report_to
|
||||
|
||||
# Basic training settings
|
||||
if hasattr(self.cfg, "sequence_len"):
|
||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||
|
||||
training_args_kwargs["save_only_model"] = getattr(
|
||||
self.cfg, "save_only_model", False
|
||||
)
|
||||
training_args_kwargs["save_total_limit"] = getattr(
|
||||
self.cfg, "save_total_limit", 5
|
||||
)
|
||||
|
||||
# Compute warmup steps
|
||||
if hasattr(self.cfg, "warmup_steps") and self.cfg.warmup_steps is not None:
|
||||
training_args_kwargs["warmup_steps"] = self.cfg.warmup_steps
|
||||
elif (
|
||||
total_num_steps
|
||||
and hasattr(self.cfg, "warmup_ratio")
|
||||
and self.cfg.warmup_ratio is not None
|
||||
):
|
||||
training_args_kwargs["warmup_steps"] = max(
|
||||
int(self.cfg.warmup_ratio * total_num_steps), 0
|
||||
)
|
||||
elif total_num_steps:
|
||||
training_args_kwargs["warmup_steps"] = min(int(0.03 * total_num_steps), 100)
|
||||
|
||||
return training_args_kwargs
|
||||
|
||||
def create_training_args(
|
||||
self,
|
||||
args_cls: Type[TrainingArguments],
|
||||
total_num_steps: int | None = None,
|
||||
**additional_kwargs,
|
||||
) -> TrainingArguments:
|
||||
"""Create training arguments with common logic."""
|
||||
# Get common trainings args and update with trainer-specific args
|
||||
training_args_kwargs = self.get_common_training_args_kwargs(total_num_steps)
|
||||
training_args_kwargs.update(additional_kwargs)
|
||||
|
||||
# Create training args with pre- and post-creation hooks
|
||||
training_args_kwargs = self.hook_pre_create_training_args(training_args_kwargs)
|
||||
training_args = args_cls(**training_args_kwargs)
|
||||
training_args = self.hook_post_create_training_args(training_args)
|
||||
|
||||
# Unset run_name so wandb sets up experiment names properly
|
||||
if self.cfg.use_wandb and training_args.run_name == training_args.output_dir:
|
||||
training_args.run_name = None
|
||||
|
||||
return training_args
|
||||
|
||||
def create_trainer(
|
||||
self, trainer_cls, training_args, trainer_args=None, trainer_kwargs=None
|
||||
):
|
||||
"""Create trainer with common logic."""
|
||||
if trainer_args is None:
|
||||
trainer_args = []
|
||||
if trainer_kwargs is None:
|
||||
trainer_kwargs = {}
|
||||
|
||||
# Create trainer with pre- and post- creation hooks
|
||||
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
||||
trainer_kwargs, trainer_cls
|
||||
)
|
||||
trainer = trainer_cls(
|
||||
*trainer_args,
|
||||
args=training_args,
|
||||
train_dataset=self.train_dataset,
|
||||
eval_dataset=self.eval_dataset,
|
||||
callbacks=self.get_callbacks(),
|
||||
**trainer_kwargs,
|
||||
)
|
||||
trainer = self.hook_post_create_trainer(trainer)
|
||||
|
||||
# Add post-creation callbacks
|
||||
for callback in self.get_post_trainer_create_callbacks(trainer):
|
||||
trainer.add_callback(callback)
|
||||
|
||||
return trainer
|
||||
|
||||
def get_callbacks(self) -> list[TrainerCallback]:
|
||||
callbacks = []
|
||||
callbacks.extend(
|
||||
PLUGIN_MANAGER.add_callbacks_pre_trainer(cfg=self.cfg, model=self.model)
|
||||
)
|
||||
|
||||
if self.cfg.profiler_steps:
|
||||
callbacks.append(
|
||||
PytorchProfilerCallback(
|
||||
steps_to_profile=self.cfg.profiler_steps,
|
||||
)
|
||||
)
|
||||
|
||||
if self.cfg.gc_steps:
|
||||
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
||||
|
||||
if self.cfg.use_wandb:
|
||||
callbacks.append(
|
||||
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||
)
|
||||
if self.cfg.use_mlflow and is_mlflow_available():
|
||||
from axolotl.utils.callbacks.mlflow_ import (
|
||||
SaveAxolotlConfigtoMlflowCallback,
|
||||
)
|
||||
|
||||
callbacks.extend(
|
||||
[
|
||||
SaveAxolotlConfigtoMlflowCallback(self.cfg.axolotl_config_path),
|
||||
]
|
||||
)
|
||||
if self.cfg.use_comet and is_comet_available():
|
||||
from axolotl.utils.callbacks.comet_ import SaveAxolotlConfigtoCometCallback
|
||||
|
||||
callbacks.append(
|
||||
SaveAxolotlConfigtoCometCallback(self.cfg.axolotl_config_path)
|
||||
)
|
||||
|
||||
return callbacks
|
||||
|
||||
def get_post_trainer_create_callbacks(self, trainer):
|
||||
"""Callbacks added after the trainer is created, usually because these need
|
||||
access to the trainer.
|
||||
"""
|
||||
callbacks = []
|
||||
if self.cfg.plugins:
|
||||
callbacks.extend(
|
||||
[
|
||||
cb
|
||||
for cb in PLUGIN_MANAGER.add_callbacks_post_trainer(
|
||||
self.cfg, trainer
|
||||
)
|
||||
if cb
|
||||
]
|
||||
)
|
||||
return callbacks
|
||||
|
||||
def hook_pre_create_training_args(self, training_arguments_kwargs):
|
||||
# TODO
|
||||
return training_arguments_kwargs
|
||||
|
||||
def hook_post_create_training_args(self, training_arguments):
|
||||
# TODO
|
||||
return training_arguments
|
||||
|
||||
def hook_pre_create_trainer(self, trainer_kwargs, trainer_cls):
|
||||
# TODO
|
||||
return trainer_kwargs, trainer_cls
|
||||
|
||||
def hook_post_create_trainer(self, trainer):
|
||||
# TODO
|
||||
return trainer
|
||||
619
src/axolotl/core/trainers/builders/causal.py
Normal file
619
src/axolotl/core/trainers/builders/causal.py
Normal file
@@ -0,0 +1,619 @@
|
||||
"""Causal trainer / training args builder implementation"""
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Type
|
||||
|
||||
import transformers
|
||||
from transformers import (
|
||||
DataCollatorWithFlattening,
|
||||
EarlyStoppingCallback,
|
||||
)
|
||||
from transformers.training_args import OptimizerNames
|
||||
from trl.trainer.utils import RewardDataCollatorWithPadding
|
||||
|
||||
from axolotl.core.trainers.base import AxolotlTrainer
|
||||
from axolotl.core.trainers.builders.base import TrainerBuilderBase
|
||||
from axolotl.core.trainers.mamba import AxolotlMambaTrainer
|
||||
from axolotl.core.trainers.relora import ReLoRATrainer
|
||||
from axolotl.core.trainers.trl import AxolotlPRMTrainer, AxolotlRewardTrainer
|
||||
from axolotl.core.training_args import (
|
||||
AxolotlPRMConfig,
|
||||
AxolotlRewardConfig,
|
||||
AxolotlTrainingArguments,
|
||||
)
|
||||
from axolotl.integrations.base import PluginManager
|
||||
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
||||
from axolotl.monkeypatch.relora import ReLoRACallback
|
||||
from axolotl.processing_strategies import get_processing_strategy
|
||||
from axolotl.utils import is_comet_available, is_mlflow_available
|
||||
from axolotl.utils.callbacks import (
|
||||
EvalFirstStepCallback,
|
||||
GPUStatsCallback,
|
||||
LossWatchDogCallback,
|
||||
SaveBetterTransformerModelCallback,
|
||||
bench_eval_callback_factory,
|
||||
causal_lm_bench_eval_callback_factory,
|
||||
colab_inference_post_train_callback,
|
||||
log_prediction_callback_factory,
|
||||
)
|
||||
from axolotl.utils.callbacks.lisa import lisa_callback_factory
|
||||
from axolotl.utils.chat_templates import get_chat_template_from_config
|
||||
from axolotl.utils.collators.batching import (
|
||||
BatchSamplerDataCollatorForSeq2Seq,
|
||||
DataCollatorForSeq2Seq,
|
||||
V2BatchSamplerDataCollatorForSeq2Seq,
|
||||
)
|
||||
from axolotl.utils.collators.mamba import MambaDataCollator
|
||||
from axolotl.utils.collators.mm_chat import MultiModalChatDataCollator
|
||||
from axolotl.utils.schemas.enums import CustomSupportedOptimizers
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
PLUGIN_MANAGER = PluginManager.get_instance()
|
||||
|
||||
|
||||
class HFCausalTrainerBuilder(TrainerBuilderBase):
|
||||
"""Build the HuggingFace training args / trainer for causal models and reward
|
||||
modeling using TRL.
|
||||
"""
|
||||
|
||||
def get_callbacks(self):
|
||||
callbacks = super().get_callbacks()
|
||||
callbacks.append(GPUStatsCallback(self.cfg))
|
||||
callbacks.append(EvalFirstStepCallback())
|
||||
|
||||
if self.cfg.relora_steps:
|
||||
callbacks.append(ReLoRACallback(self.cfg))
|
||||
|
||||
if (
|
||||
hasattr(self.model, "use_bettertransformer")
|
||||
and self.model.use_bettertransformer is True
|
||||
):
|
||||
callbacks.append(SaveBetterTransformerModelCallback())
|
||||
|
||||
if self.cfg.loss_watchdog_threshold is not None:
|
||||
callbacks.append(LossWatchDogCallback(self.cfg))
|
||||
|
||||
return callbacks
|
||||
|
||||
def get_post_trainer_create_callbacks(self, trainer):
|
||||
callbacks = []
|
||||
if self.cfg.use_wandb and self.cfg.eval_table_size > 0:
|
||||
LogPredictionCallback = log_prediction_callback_factory(
|
||||
trainer, self.tokenizer, "wandb"
|
||||
)
|
||||
callbacks.append(LogPredictionCallback(self.cfg))
|
||||
if (
|
||||
self.cfg.use_mlflow
|
||||
and is_mlflow_available()
|
||||
and self.cfg.eval_table_size > 0
|
||||
):
|
||||
LogPredictionCallback = log_prediction_callback_factory(
|
||||
trainer, self.tokenizer, "mlflow"
|
||||
)
|
||||
callbacks.append(LogPredictionCallback(self.cfg))
|
||||
if self.cfg.use_comet and is_comet_available() and self.cfg.eval_table_size > 0:
|
||||
LogPredictionCallback = log_prediction_callback_factory(
|
||||
trainer, self.tokenizer, "comet_ml"
|
||||
)
|
||||
callbacks.append(LogPredictionCallback(self.cfg))
|
||||
|
||||
if self.cfg.do_bench_eval:
|
||||
callbacks.append(bench_eval_callback_factory(trainer, self.tokenizer))
|
||||
if self.cfg.do_causal_lm_eval:
|
||||
CausalLMBenchEvalCallback = causal_lm_bench_eval_callback_factory(
|
||||
trainer, self.tokenizer
|
||||
)
|
||||
callbacks.append(CausalLMBenchEvalCallback(self.cfg))
|
||||
|
||||
if self.cfg.early_stopping_patience:
|
||||
early_stop_cb = EarlyStoppingCallback(
|
||||
self.cfg.early_stopping_patience,
|
||||
)
|
||||
callbacks.append(early_stop_cb)
|
||||
|
||||
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
||||
callbacks.append(lisa_callback_factory(trainer))
|
||||
|
||||
if any("COLAB_" in key for key in os.environ):
|
||||
ColabCallback = colab_inference_post_train_callback(trainer)
|
||||
callbacks.append(ColabCallback(self.cfg))
|
||||
|
||||
callbacks.extend(super().get_post_trainer_create_callbacks(trainer=trainer))
|
||||
return callbacks
|
||||
|
||||
def _get_trainer_cls(self):
|
||||
if self.cfg.plugins:
|
||||
trainer_cls = PLUGIN_MANAGER.get_trainer_cls(self.cfg)
|
||||
if trainer_cls:
|
||||
return trainer_cls
|
||||
if self.cfg.relora_steps:
|
||||
return ReLoRATrainer
|
||||
if self.cfg.model_config_type == "mamba":
|
||||
return AxolotlMambaTrainer
|
||||
if self.cfg.reward_model:
|
||||
return AxolotlRewardTrainer
|
||||
if self.cfg.process_reward_model:
|
||||
return AxolotlPRMTrainer
|
||||
|
||||
return AxolotlTrainer
|
||||
|
||||
def build(self, total_num_steps):
|
||||
"""Build and return a causal trainer instance using the refactored base class."""
|
||||
# Get trainer class
|
||||
trainer_cls = self._get_trainer_cls()
|
||||
|
||||
# Prepare training arguments
|
||||
training_args = self._prepare_training_args(total_num_steps)
|
||||
|
||||
# Prepare data collators
|
||||
data_collator_kwargs = self._prepare_data_collator_kwargs()
|
||||
|
||||
# Prepare trainer kwargs
|
||||
trainer_kwargs = self._prepare_trainer_kwargs(
|
||||
trainer_cls=trainer_cls,
|
||||
data_collator_kwargs=data_collator_kwargs,
|
||||
training_args=training_args,
|
||||
)
|
||||
|
||||
# Create the trainer
|
||||
trainer = self.create_trainer(
|
||||
trainer_cls=trainer_cls,
|
||||
training_args=training_args,
|
||||
trainer_kwargs={
|
||||
"model": self.model,
|
||||
"data_collator": self.build_collator(
|
||||
training_args, **data_collator_kwargs
|
||||
),
|
||||
**trainer_kwargs,
|
||||
},
|
||||
)
|
||||
|
||||
# Handle DeepSpeed config for sample packing if needed
|
||||
if self.cfg.deepspeed and self.cfg.sample_packing:
|
||||
trainer.accelerator.state.deepspeed_plugin.deepspeed_config[
|
||||
"train_micro_batch_size_per_gpu"
|
||||
] = self.cfg.micro_batch_size
|
||||
|
||||
return trainer
|
||||
|
||||
def _prepare_training_args(self, total_num_steps):
|
||||
"""Prepare and return training arguments."""
|
||||
# Base training arguments
|
||||
training_args_kwargs = self._get_base_training_args()
|
||||
|
||||
# Add feature configurations
|
||||
self._add_feature_configs(training_args_kwargs)
|
||||
|
||||
# Handle optimizer configuration
|
||||
self._configure_optimizer(training_args_kwargs)
|
||||
|
||||
# Create training args using the base class method
|
||||
training_args_cls = self._get_training_args_cls()
|
||||
|
||||
return self.create_training_args(
|
||||
args_cls=training_args_cls,
|
||||
total_num_steps=total_num_steps,
|
||||
**training_args_kwargs,
|
||||
)
|
||||
|
||||
def _get_base_training_args(self):
|
||||
"""Return the base training arguments."""
|
||||
return {
|
||||
"max_steps": self.cfg.max_steps if self.cfg.max_steps else -1,
|
||||
"max_seq_length": self.cfg.sequence_len,
|
||||
"per_device_train_batch_size": self.cfg.micro_batch_size,
|
||||
"gradient_accumulation_steps": self.cfg.gradient_accumulation_steps,
|
||||
"eval_accumulation_steps": self.cfg.gradient_accumulation_steps,
|
||||
"num_train_epochs": self.cfg.num_epochs,
|
||||
"learning_rate": self.cfg.learning_rate,
|
||||
"output_dir": self.cfg.output_dir,
|
||||
"weight_decay": (
|
||||
self.cfg.weight_decay if self.cfg.weight_decay is not None else 0.0
|
||||
),
|
||||
"model_type": self.cfg.model_config_type,
|
||||
"pretraining": bool(self.cfg.pretraining_dataset),
|
||||
"sequence_parallel_degree": self.cfg.sequence_parallel_degree,
|
||||
"ring_attn_func": self.cfg.ring_attn_func,
|
||||
"embedding_lr": self.cfg.embedding_lr,
|
||||
"embedding_lr_scale": self.cfg.embedding_lr_scale,
|
||||
"loraplus_lr_ratio": self.cfg.loraplus_lr_ratio,
|
||||
"loraplus_lr_embedding": self.cfg.loraplus_lr_embedding,
|
||||
"lr_groups": self.cfg.lr_groups,
|
||||
}
|
||||
|
||||
def _add_feature_configs(self, training_args_kwargs):
|
||||
"""Add various feature configurations."""
|
||||
# Sample packing configurations
|
||||
self._add_sample_packing_configs(training_args_kwargs)
|
||||
|
||||
# Batch size configurations
|
||||
if self.cfg.eval_batch_size:
|
||||
training_args_kwargs["per_device_eval_batch_size"] = (
|
||||
self.cfg.eval_batch_size
|
||||
)
|
||||
if self.cfg.auto_find_batch_size is not None:
|
||||
training_args_kwargs["auto_find_batch_size"] = self.cfg.auto_find_batch_size
|
||||
|
||||
# Advanced training techniques (ReLoRA & Lisa)
|
||||
self._add_advanced_training_configs(training_args_kwargs)
|
||||
|
||||
# Model-specific configurations
|
||||
self._add_model_specific_configs(training_args_kwargs)
|
||||
|
||||
def _add_sample_packing_configs(self, training_args_kwargs):
|
||||
"""Add sample packing configurations if applicable."""
|
||||
if hasattr(self.cfg, "sample_packing") and self.cfg.sample_packing:
|
||||
training_args_kwargs.update(
|
||||
{
|
||||
"sample_packing": bool(self.cfg.sample_packing),
|
||||
"multipack_real_batches": not self.cfg.flash_attention
|
||||
or self.cfg.multipack_real_batches,
|
||||
"eval_sample_packing": bool(self.cfg.eval_sample_packing),
|
||||
}
|
||||
)
|
||||
|
||||
if self.cfg.sample_packing_bin_size is not None:
|
||||
training_args_kwargs["sample_packing_bin_size"] = (
|
||||
self.cfg.sample_packing_bin_size
|
||||
)
|
||||
|
||||
if self.cfg.sample_packing_group_size is not None:
|
||||
training_args_kwargs["sample_packing_group_size"] = (
|
||||
self.cfg.sample_packing_group_size
|
||||
)
|
||||
|
||||
if self.cfg.sample_packing_eff_est:
|
||||
training_args_kwargs["sample_packing_efficiency"] = (
|
||||
self.cfg.sample_packing_eff_est
|
||||
)
|
||||
|
||||
def _add_advanced_training_configs(self, training_args_kwargs):
|
||||
"""Add advanced training techniques configurations (ReLoRA & Lisa)."""
|
||||
# ReLoRA configurations
|
||||
if self.cfg.relora_steps:
|
||||
training_args_kwargs.update(
|
||||
{
|
||||
"relora_steps": self.cfg.relora_steps,
|
||||
"relora_warmup_steps": self.cfg.relora_warmup_steps,
|
||||
}
|
||||
)
|
||||
if self.cfg.relora_anneal_steps:
|
||||
training_args_kwargs["relora_anneal_steps"] = (
|
||||
self.cfg.relora_anneal_steps
|
||||
)
|
||||
if self.cfg.relora_prune_ratio:
|
||||
training_args_kwargs["relora_prune_ratio"] = self.cfg.relora_prune_ratio
|
||||
|
||||
# Lisa configurations
|
||||
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
||||
training_args_kwargs.update(
|
||||
{
|
||||
"lisa_n_layers": self.cfg.lisa_n_layers,
|
||||
"lisa_step_interval": self.cfg.lisa_step_interval,
|
||||
"lisa_layers_attribute": self.cfg.lisa_layers_attribute,
|
||||
}
|
||||
)
|
||||
|
||||
def _add_model_specific_configs(self, training_args_kwargs):
|
||||
"""Add model-specific configurations."""
|
||||
# Chat template
|
||||
if self.cfg.chat_template:
|
||||
training_args_kwargs["chat_template"] = get_chat_template_from_config(
|
||||
cfg=self.cfg,
|
||||
tokenizer=self.tokenizer,
|
||||
)
|
||||
|
||||
# NEFTune
|
||||
if self.cfg.neftune_noise_alpha is not None:
|
||||
training_args_kwargs["neftune_noise_alpha"] = self.cfg.neftune_noise_alpha
|
||||
|
||||
# Knowledge distillation configurations
|
||||
if self.cfg.kd_ce_alpha is not None:
|
||||
training_args_kwargs["kd_ce_alpha"] = self.cfg.kd_ce_alpha
|
||||
if self.cfg.kd_alpha is not None:
|
||||
training_args_kwargs["kd_alpha"] = self.cfg.kd_alpha
|
||||
if self.cfg.kd_temperature is not None:
|
||||
training_args_kwargs["kd_temperature"] = self.cfg.kd_temperature
|
||||
if self.cfg.kd_zscore_base_temp is not None:
|
||||
training_args_kwargs["kd_zscore_base_temp"] = self.cfg.kd_zscore_base_temp
|
||||
if self.cfg.kd_top_k_before_softmax is not None:
|
||||
training_args_kwargs["kd_top_k_before_softmax"] = (
|
||||
self.cfg.kd_top_k_before_softmax
|
||||
)
|
||||
|
||||
# Image configurations
|
||||
if self.cfg.image_size:
|
||||
training_args_kwargs["image_size"] = self.cfg.image_size
|
||||
if self.cfg.image_resize_algorithm:
|
||||
training_args_kwargs["image_resize_algorithm"] = (
|
||||
self.cfg.image_resize_algorithm
|
||||
)
|
||||
|
||||
# Accelerator configuration
|
||||
if self.cfg.accelerator_config:
|
||||
training_args_kwargs["accelerator_config"] = self.cfg.accelerator_config
|
||||
|
||||
def _configure_optimizer(self, training_args_kwargs):
|
||||
"""Configure optimizer settings."""
|
||||
custom_supported_optimizers = [opt.value for opt in CustomSupportedOptimizers]
|
||||
|
||||
if self.cfg.optimizer in custom_supported_optimizers:
|
||||
# Use custom optimizer implementation
|
||||
self._configure_custom_optimizer(training_args_kwargs)
|
||||
else:
|
||||
# Use transformers' optimizer
|
||||
training_args_kwargs["optim"] = self.cfg.optimizer
|
||||
self._add_optimizer_args(training_args_kwargs)
|
||||
|
||||
# Handle optimizer targeting specific modules
|
||||
if self.cfg.optim_target_modules:
|
||||
training_args_kwargs["optim_target_modules"] = self.cfg.optim_target_modules
|
||||
|
||||
# Special case for anyprecision optimizer
|
||||
if self.cfg.optimizer == "adamw_anyprecision":
|
||||
if Path(self.cfg.torchdistx_path).exists():
|
||||
sys.path.append(self.cfg.torchdistx_path)
|
||||
importlib.import_module("torchdistx")
|
||||
|
||||
def _configure_custom_optimizer(self, training_args_kwargs):
|
||||
"""Configure custom optimizer settings."""
|
||||
# Common optimizer kwargs
|
||||
optimizer_kwargs = {
|
||||
"lr": training_args_kwargs.get("learning_rate"),
|
||||
"weight_decay": training_args_kwargs.get("weight_decay"),
|
||||
}
|
||||
|
||||
# Add Adam-specific kwargs if available
|
||||
adam_kwargs = self._get_adam_kwargs(training_args_kwargs)
|
||||
|
||||
# Get optimizer class and update kwargs based on optimizer type
|
||||
optimizer_cls = self._get_optimizer_class(
|
||||
training_args_kwargs, optimizer_kwargs, adam_kwargs
|
||||
)
|
||||
|
||||
# Add any additional optimizer args from config
|
||||
self._update_optimizer_kwargs_from_config(optimizer_kwargs)
|
||||
|
||||
training_args_kwargs["optimizer_cls_and_kwargs"] = (
|
||||
optimizer_cls,
|
||||
optimizer_kwargs,
|
||||
)
|
||||
|
||||
def _get_adam_kwargs(self, training_args_kwargs):
|
||||
"""Get Adam-specific kwargs if available."""
|
||||
adam_kwargs = {}
|
||||
if training_args_kwargs.get("adam_beta1") and training_args_kwargs.get(
|
||||
"adam_beta2"
|
||||
):
|
||||
adam_kwargs["betas"] = (
|
||||
training_args_kwargs.get("adam_beta1"),
|
||||
training_args_kwargs.get("adam_beta2"),
|
||||
)
|
||||
if training_args_kwargs.get("adam_epsilon"):
|
||||
adam_kwargs["eps"] = training_args_kwargs.get("adam_epsilon")
|
||||
return adam_kwargs
|
||||
|
||||
def _get_optimizer_class(self, training_args_kwargs, optimizer_kwargs, adam_kwargs):
|
||||
"""Get optimizer class based on configuration."""
|
||||
if self.cfg.optimizer == "muon":
|
||||
from axolotl.contribs.mit.muon import MuonOptimizerFactory # pylint: disable=no-name-in-module
|
||||
|
||||
optimizer_cls = MuonOptimizerFactory
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
elif self.cfg.optimizer == "optimi_adamw":
|
||||
from optimi import AdamW
|
||||
|
||||
optimizer_kwargs["foreach"] = False
|
||||
optimizer_cls = AdamW
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
elif self.cfg.optimizer == "ao_adamw_4bit":
|
||||
from torchao.prototype.low_bit_optim import AdamW4bit
|
||||
|
||||
optimizer_cls = AdamW4bit
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
LOG.warning(
|
||||
f"`ao_adamw_4bit` will be deprecated soon. Please use `{OptimizerNames.ADAMW_TORCH_4BIT}` instead."
|
||||
)
|
||||
elif self.cfg.optimizer == "ao_adamw_8bit":
|
||||
from torchao.prototype.low_bit_optim import AdamW8bit
|
||||
|
||||
optimizer_cls = AdamW8bit
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
elif self.cfg.optimizer == "ao_adamw_fp8":
|
||||
from torchao.prototype.low_bit_optim import AdamWFp8
|
||||
|
||||
optimizer_cls = AdamWFp8
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
elif self.cfg.optimizer == "adopt_adamw":
|
||||
from axolotl.utils.optimizers.adopt import ADOPT
|
||||
|
||||
optimizer_cls = ADOPT
|
||||
adam_kwargs["decouple"] = True
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
elif self.cfg.optimizer == "came_pytorch":
|
||||
from came_pytorch import CAME
|
||||
|
||||
optimizer_cls = CAME
|
||||
|
||||
beta1 = training_args_kwargs.get("adam_beta1", 0.9)
|
||||
beta2 = training_args_kwargs.get("adam_beta2", 0.999)
|
||||
beta3 = training_args_kwargs.get("adam_beta2", 0.9999)
|
||||
eps1 = training_args_kwargs.get("adam_epsilon", 1e-30)
|
||||
eps2 = training_args_kwargs.get("adam_epsilon2", 1e-16)
|
||||
|
||||
adam_kwargs["betas"] = (beta1, beta2, beta3)
|
||||
adam_kwargs["eps"] = (eps1, eps2)
|
||||
optimizer_kwargs.update(adam_kwargs)
|
||||
else:
|
||||
# Default case or unsupported optimizer
|
||||
optimizer_cls = None
|
||||
|
||||
return optimizer_cls
|
||||
|
||||
def _update_optimizer_kwargs_from_config(self, optimizer_kwargs):
|
||||
"""Update optimizer kwargs from config."""
|
||||
if self.cfg.optim_args:
|
||||
if isinstance(self.cfg.optim_args, dict):
|
||||
optimizer_kwargs.update(self.cfg.optim_args)
|
||||
else:
|
||||
# Parse string format "key1=value1,key2=value2"
|
||||
for mapping in self.cfg.optim_args.replace(" ", "").split(","):
|
||||
key, value = mapping.split("=")
|
||||
optimizer_kwargs[key] = value
|
||||
|
||||
def _add_optimizer_args(self, training_args_kwargs):
|
||||
"""Add optimizer arguments if available."""
|
||||
if self.cfg.optim_args:
|
||||
if isinstance(self.cfg.optim_args, dict):
|
||||
optim_args = ",".join(
|
||||
[f"{key}={value}" for key, value in self.cfg.optim_args.items()]
|
||||
)
|
||||
else:
|
||||
optim_args = self.cfg.optim_args
|
||||
training_args_kwargs["optim_args"] = optim_args
|
||||
|
||||
def _get_training_args_cls(self):
|
||||
"""Get the appropriate training arguments class."""
|
||||
if self.cfg.reward_model:
|
||||
return AxolotlRewardConfig
|
||||
if self.cfg.process_reward_model:
|
||||
return AxolotlPRMConfig
|
||||
return AxolotlTrainingArguments
|
||||
|
||||
def _prepare_data_collator_kwargs(self):
|
||||
"""Prepare data collator kwargs."""
|
||||
data_collator_kwargs = {"padding": True} # True/"longest" is the default
|
||||
|
||||
if self.cfg.pad_to_sequence_len:
|
||||
data_collator_kwargs["pad_to_multiple_of"] = 64 * math.ceil(
|
||||
self.cfg.sequence_len / 64
|
||||
)
|
||||
else:
|
||||
data_collator_kwargs["pad_to_multiple_of"] = 64
|
||||
|
||||
if self.cfg.reward_model:
|
||||
data_collator_kwargs["max_length"] = self.cfg.sequence_len
|
||||
|
||||
return data_collator_kwargs
|
||||
|
||||
def _prepare_trainer_kwargs(self, trainer_cls, data_collator_kwargs, training_args):
|
||||
"""Prepare trainer kwargs."""
|
||||
trainer_kwargs = {}
|
||||
|
||||
# Handle special data collators for evaluation
|
||||
if eval_data_collator := self.build_collator(
|
||||
training_args, is_eval=True, **data_collator_kwargs
|
||||
):
|
||||
if not (self.cfg.reward_model or self.cfg.process_reward_model):
|
||||
trainer_kwargs["eval_data_collator"] = eval_data_collator
|
||||
|
||||
# Add bench data collator if needed
|
||||
if not (self.cfg.reward_model or self.cfg.process_reward_model):
|
||||
trainer_kwargs["bench_data_collator"] = transformers.DataCollatorForSeq2Seq(
|
||||
self.tokenizer,
|
||||
return_tensors="pt",
|
||||
**data_collator_kwargs,
|
||||
)
|
||||
|
||||
# Add tokenizer or processing class
|
||||
sig = inspect.signature(trainer_cls)
|
||||
if "processing_class" in sig.parameters.keys():
|
||||
trainer_kwargs["processing_class"] = self.tokenizer
|
||||
else:
|
||||
trainer_kwargs["tokenizer"] = self.tokenizer
|
||||
|
||||
# Add dataset tags if available
|
||||
if (
|
||||
not (trainer_cls in [AxolotlRewardTrainer, AxolotlPRMTrainer])
|
||||
and self.cfg.datasets is not None
|
||||
):
|
||||
trainer_kwargs["dataset_tags"] = [
|
||||
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
||||
]
|
||||
|
||||
return trainer_kwargs
|
||||
|
||||
def build_collator(
|
||||
self, training_args: AxolotlTrainingArguments, is_eval=False, **kwargs
|
||||
):
|
||||
if training_args.pretraining:
|
||||
if (
|
||||
self.cfg.pretraining_sample_concatenation is False
|
||||
or self.cfg.micro_batch_size > 1
|
||||
):
|
||||
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
|
||||
return None
|
||||
|
||||
if self.cfg.model_config_type == "mamba":
|
||||
return MambaDataCollator(tokenizer=self.tokenizer)
|
||||
|
||||
use_batch_sampler_collator = False
|
||||
if is_eval is False and training_args.sample_packing:
|
||||
use_batch_sampler_collator = True
|
||||
if is_eval and training_args.eval_sample_packing:
|
||||
use_batch_sampler_collator = True
|
||||
|
||||
collator: Type[
|
||||
V2BatchSamplerDataCollatorForSeq2Seq
|
||||
| BatchSamplerDataCollatorForSeq2Seq
|
||||
| DataCollatorForSeq2Seq
|
||||
| DataCollatorWithFlattening
|
||||
| RewardDataCollatorWithPadding
|
||||
]
|
||||
collator_args = [self.tokenizer]
|
||||
if self.cfg.reward_model:
|
||||
collator = RewardDataCollatorWithPadding
|
||||
if "max_length" in kwargs:
|
||||
kwargs.pop("max_length")
|
||||
elif use_batch_sampler_collator:
|
||||
if self.cfg.flex_attention:
|
||||
collator = V2BatchSamplerDataCollatorForSeq2Seq
|
||||
elif self.cfg.model_config_type in SUPPORTED_MULTIPACK_MODEL_TYPES:
|
||||
collator = V2BatchSamplerDataCollatorForSeq2Seq
|
||||
elif (
|
||||
self.cfg.model_config_type in ["llama"]
|
||||
and self.cfg.flash_attention is not True
|
||||
):
|
||||
collator = V2BatchSamplerDataCollatorForSeq2Seq
|
||||
else:
|
||||
collator = BatchSamplerDataCollatorForSeq2Seq
|
||||
else:
|
||||
if self.cfg.processor_type and self.processor:
|
||||
collator = MultiModalChatDataCollator
|
||||
kwargs["processing_strategy"] = get_processing_strategy(
|
||||
self.processor,
|
||||
training_args.chat_template,
|
||||
self.cfg.chat_template,
|
||||
image_size=training_args.image_size,
|
||||
image_resize_algorithm=training_args.image_resize_algorithm,
|
||||
)
|
||||
elif self.cfg.batch_flattening:
|
||||
collator = DataCollatorWithFlattening
|
||||
collator_args.pop(0)
|
||||
kwargs.pop("pad_to_multiple_of", None)
|
||||
kwargs.pop("padding", None)
|
||||
elif self.cfg.kd_trainer:
|
||||
from axolotl.integrations.kd.collator import (
|
||||
DataCollatorForKD,
|
||||
KDBatchSamplerDataCollatorForSeq2Seq,
|
||||
)
|
||||
|
||||
if self.cfg.sample_packing:
|
||||
collator = KDBatchSamplerDataCollatorForSeq2Seq
|
||||
else:
|
||||
collator = DataCollatorForKD
|
||||
else:
|
||||
collator = DataCollatorForSeq2Seq
|
||||
|
||||
kwargs["return_tensors"] = "pt"
|
||||
|
||||
return collator(
|
||||
*collator_args,
|
||||
**kwargs,
|
||||
)
|
||||
367
src/axolotl/core/trainers/builders/rl.py
Normal file
367
src/axolotl/core/trainers/builders/rl.py
Normal file
@@ -0,0 +1,367 @@
|
||||
"""RL trainer / training args builder implementation"""
|
||||
|
||||
import inspect
|
||||
from pathlib import Path
|
||||
|
||||
from axolotl.core.trainers.builders.base import TrainerBuilderBase
|
||||
from axolotl.core.trainers.dpo import DPOStrategy
|
||||
from axolotl.core.trainers.dpo.args import AxolotlDPOConfig
|
||||
from axolotl.core.trainers.grpo import GRPOStrategy
|
||||
from axolotl.core.trainers.trl import (
|
||||
AxolotlCPOTrainer,
|
||||
AxolotlKTOTrainer,
|
||||
AxolotlORPOTrainer,
|
||||
)
|
||||
from axolotl.core.training_args import (
|
||||
AxolotlCPOConfig,
|
||||
AxolotlKTOConfig,
|
||||
AxolotlORPOConfig,
|
||||
)
|
||||
from axolotl.utils.models import ensure_dtype
|
||||
|
||||
|
||||
class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||
"""Trainer factory class for TRL-based RLHF trainers (e.g. DPO)"""
|
||||
|
||||
def get_callbacks(self):
|
||||
callbacks = super().get_callbacks()
|
||||
|
||||
return callbacks
|
||||
|
||||
def get_post_trainer_create_callbacks(self, trainer):
|
||||
callbacks = super().get_post_trainer_create_callbacks(trainer=trainer)
|
||||
return callbacks
|
||||
|
||||
def build_training_arguments(self, total_num_steps):
|
||||
training_args_kwargs = {}
|
||||
for arg in [
|
||||
"adam_beta1",
|
||||
"adam_beta2",
|
||||
"adam_epsilon",
|
||||
"dataloader_num_workers",
|
||||
"dataloader_pin_memory",
|
||||
]:
|
||||
if hasattr(self.cfg, arg) and getattr(self.cfg, arg) is not None:
|
||||
training_args_kwargs[arg] = getattr(self.cfg, arg)
|
||||
|
||||
if self.cfg.hub_model_id:
|
||||
training_args_kwargs["hub_model_id"] = self.cfg.hub_model_id
|
||||
training_args_kwargs["push_to_hub"] = True
|
||||
training_args_kwargs["hub_private_repo"] = True
|
||||
training_args_kwargs["hub_always_push"] = True
|
||||
|
||||
if self.cfg.hub_strategy:
|
||||
training_args_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
||||
|
||||
if self.cfg.save_safetensors is not None:
|
||||
training_args_kwargs["save_safetensors"] = self.cfg.save_safetensors
|
||||
|
||||
if self.eval_dataset:
|
||||
training_args_kwargs["eval_strategy"] = "steps"
|
||||
training_args_kwargs["eval_steps"] = self.cfg.eval_steps
|
||||
else:
|
||||
training_args_kwargs["eval_strategy"] = "no"
|
||||
|
||||
if self.cfg.bf16 or self.cfg.bfloat16:
|
||||
training_args_kwargs["bf16"] = True
|
||||
|
||||
training_args_kwargs["loraplus_lr_ratio"] = self.cfg.loraplus_lr_ratio
|
||||
training_args_kwargs["loraplus_lr_embedding"] = self.cfg.loraplus_lr_embedding
|
||||
training_args_kwargs["lr_scheduler_type"] = (
|
||||
self.cfg.lr_scheduler if self.cfg.lr_scheduler else "cosine"
|
||||
)
|
||||
training_args_kwargs["lr_scheduler_kwargs"] = (
|
||||
self.cfg.lr_scheduler_kwargs if self.cfg.lr_scheduler_kwargs else {}
|
||||
)
|
||||
if self.cfg.remove_unused_columns is not None:
|
||||
training_args_kwargs["remove_unused_columns"] = (
|
||||
self.cfg.remove_unused_columns
|
||||
)
|
||||
else:
|
||||
training_args_kwargs["remove_unused_columns"] = False
|
||||
|
||||
if self.cfg.dataloader_pin_memory is not None:
|
||||
training_args_kwargs["dataloader_pin_memory"] = (
|
||||
self.cfg.dataloader_pin_memory
|
||||
)
|
||||
if self.cfg.dataloader_num_workers is not None:
|
||||
training_args_kwargs["dataloader_num_workers"] = (
|
||||
self.cfg.dataloader_num_workers
|
||||
)
|
||||
if self.cfg.dataloader_prefetch_factor is not None:
|
||||
training_args_kwargs["dataloader_prefetch_factor"] = (
|
||||
self.cfg.dataloader_prefetch_factor
|
||||
)
|
||||
if self.cfg.gradient_checkpointing:
|
||||
training_args_kwargs["gradient_checkpointing"] = (
|
||||
self.cfg.gradient_checkpointing
|
||||
)
|
||||
if self.cfg.gradient_checkpointing_kwargs is not None:
|
||||
training_args_kwargs["gradient_checkpointing_kwargs"] = (
|
||||
self.cfg.gradient_checkpointing_kwargs
|
||||
)
|
||||
else:
|
||||
training_args_kwargs["gradient_checkpointing_kwargs"] = {
|
||||
"use_reentrant": False
|
||||
}
|
||||
|
||||
# set save_strategy and save_steps
|
||||
if self.cfg.save_steps:
|
||||
training_args_kwargs["save_strategy"] = "steps"
|
||||
training_args_kwargs["save_steps"] = self.cfg.save_steps
|
||||
elif self.cfg.save_strategy:
|
||||
training_args_kwargs["save_strategy"] = self.cfg.save_strategy
|
||||
else:
|
||||
# default to saving each epoch if not defined
|
||||
training_args_kwargs["save_strategy"] = "epoch"
|
||||
|
||||
training_args_kwargs["save_only_model"] = self.cfg.save_only_model
|
||||
|
||||
if self.cfg.dataset_processes:
|
||||
training_args_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
|
||||
|
||||
if self.cfg.trl and self.cfg.trl.beta is not None:
|
||||
training_args_kwargs["beta"] = self.cfg.trl.beta
|
||||
elif self.cfg.rl_beta is not None:
|
||||
training_args_kwargs["beta"] = self.cfg.rl_beta
|
||||
elif self.cfg.orpo_alpha is not None:
|
||||
# trl does some odd mapping of alpha to beta to reuse the beta parameter ???
|
||||
training_args_kwargs["beta"] = self.cfg.orpo_alpha
|
||||
|
||||
if self.cfg.rpo_alpha is not None:
|
||||
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
||||
|
||||
if self.cfg.use_wandb:
|
||||
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||
|
||||
training_args_cls = None
|
||||
blocklist_args_kwargs = []
|
||||
if self.cfg.rl == "simpo":
|
||||
training_args_cls = AxolotlCPOConfig
|
||||
training_args_kwargs["loss_type"] = "simpo"
|
||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||
training_args_kwargs["simpo_gamma"] = self.cfg.simpo_gamma
|
||||
if self.cfg.cpo_alpha is not None:
|
||||
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
||||
|
||||
elif self.cfg.rl == "orpo":
|
||||
training_args_cls = AxolotlORPOConfig
|
||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||
if self.cfg.max_prompt_len:
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||
|
||||
elif self.cfg.rl == "kto":
|
||||
training_args_cls = AxolotlKTOConfig
|
||||
|
||||
training_args_kwargs["desirable_weight"] = (
|
||||
self.cfg.kto_desirable_weight or 1.0
|
||||
)
|
||||
training_args_kwargs["undesirable_weight"] = (
|
||||
self.cfg.kto_undesirable_weight or 1.0
|
||||
)
|
||||
|
||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||
if self.cfg.max_prompt_len:
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||
|
||||
elif self.cfg.rl == "grpo":
|
||||
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||
|
||||
else:
|
||||
training_args_cls = AxolotlDPOConfig
|
||||
if self.cfg.rl == "ipo":
|
||||
training_args_kwargs["loss_type"] = "ipo"
|
||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||
training_args_kwargs["max_completion_length"] = None
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.sequence_len
|
||||
training_args_kwargs["generate_during_eval"] = self.cfg.use_wandb
|
||||
if self.cfg.dpo_use_weighting is not None:
|
||||
training_args_kwargs["use_weighting"] = self.cfg.dpo_use_weighting
|
||||
if self.cfg.dpo_use_logits_to_keep is not None:
|
||||
training_args_kwargs["use_logits_to_keep"] = (
|
||||
self.cfg.dpo_use_logits_to_keep
|
||||
)
|
||||
|
||||
for blocklist_key in blocklist_args_kwargs:
|
||||
if blocklist_key in training_args_kwargs:
|
||||
del training_args_kwargs[blocklist_key]
|
||||
|
||||
max_steps = self.cfg.max_steps or total_num_steps or -1
|
||||
training_args_kwargs["num_train_epochs"] = self.cfg.num_epochs
|
||||
training_args = training_args_cls( # pylint: disable=unexpected-keyword-arg
|
||||
self.cfg.output_dir,
|
||||
per_device_train_batch_size=self.cfg.micro_batch_size,
|
||||
max_steps=max_steps,
|
||||
gradient_accumulation_steps=self.cfg.gradient_accumulation_steps,
|
||||
learning_rate=self.cfg.learning_rate,
|
||||
warmup_steps=self.cfg.warmup_steps,
|
||||
logging_first_step=True,
|
||||
logging_steps=1,
|
||||
optim=self.cfg.optimizer,
|
||||
save_total_limit=self.cfg.save_total_limit or 5,
|
||||
**training_args_kwargs,
|
||||
)
|
||||
|
||||
# unset run_name so wandb sets up experiment names
|
||||
if self.cfg.use_wandb and training_args.run_name == training_args.output_dir:
|
||||
training_args.run_name = ( # pylint: disable=attribute-defined-outside-init
|
||||
None
|
||||
)
|
||||
|
||||
return training_args
|
||||
|
||||
def build(self, total_num_steps):
|
||||
"""Build and return an RL trainer instance"""
|
||||
# Prepare RL-specific training args kwargs
|
||||
training_args_kwargs = {
|
||||
"per_device_train_batch_size": self.cfg.micro_batch_size,
|
||||
"max_steps": self.cfg.max_steps or total_num_steps or -1,
|
||||
"gradient_accumulation_steps": self.cfg.gradient_accumulation_steps,
|
||||
"learning_rate": self.cfg.learning_rate,
|
||||
"warmup_steps": self.cfg.warmup_steps,
|
||||
"logging_first_step": True,
|
||||
"logging_steps": 1,
|
||||
"output_dir": self.cfg.output_dir,
|
||||
"num_train_epochs": self.cfg.num_epochs,
|
||||
}
|
||||
|
||||
# Handle dataset processes
|
||||
if self.cfg.dataset_processes:
|
||||
training_args_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
|
||||
|
||||
# Handle beta/alpha parameters for different RL algorithms
|
||||
if self.cfg.trl and self.cfg.trl.beta is not None:
|
||||
training_args_kwargs["beta"] = self.cfg.trl.beta
|
||||
elif self.cfg.rl_beta is not None:
|
||||
training_args_kwargs["beta"] = self.cfg.rl_beta
|
||||
elif self.cfg.orpo_alpha is not None:
|
||||
# trl does some odd mapping of alpha to beta to reuse the beta parameter
|
||||
training_args_kwargs["beta"] = self.cfg.orpo_alpha
|
||||
|
||||
if self.cfg.rpo_alpha is not None:
|
||||
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
||||
|
||||
# Determine training args class and add RL-specific parameters
|
||||
training_args_cls = None
|
||||
blocklist_args_kwargs = []
|
||||
|
||||
if self.cfg.rl == "simpo":
|
||||
training_args_cls = AxolotlCPOConfig
|
||||
training_args_kwargs["loss_type"] = "simpo"
|
||||
training_args_kwargs["simpo_gamma"] = self.cfg.simpo_gamma
|
||||
if self.cfg.cpo_alpha is not None:
|
||||
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
||||
elif self.cfg.rl == "orpo":
|
||||
training_args_cls = AxolotlORPOConfig
|
||||
if self.cfg.max_prompt_len:
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||
elif self.cfg.rl == "kto":
|
||||
training_args_cls = AxolotlKTOConfig
|
||||
training_args_kwargs["desirable_weight"] = (
|
||||
self.cfg.kto_desirable_weight or 1.0
|
||||
)
|
||||
training_args_kwargs["undesirable_weight"] = (
|
||||
self.cfg.kto_undesirable_weight or 1.0
|
||||
)
|
||||
if self.cfg.max_prompt_len:
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||
elif self.cfg.rl == "grpo":
|
||||
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||
else: # Default to DPO
|
||||
training_args_cls = AxolotlDPOConfig
|
||||
if self.cfg.rl == "ipo":
|
||||
training_args_kwargs["loss_type"] = "ipo"
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.sequence_len
|
||||
training_args_kwargs["max_completion_length"] = None
|
||||
training_args_kwargs["generate_during_eval"] = self.cfg.use_wandb
|
||||
if self.cfg.dpo_use_weighting is not None:
|
||||
training_args_kwargs["use_weighting"] = self.cfg.dpo_use_weighting
|
||||
if self.cfg.dpo_use_logits_to_keep is not None:
|
||||
training_args_kwargs["use_logits_to_keep"] = (
|
||||
self.cfg.dpo_use_logits_to_keep
|
||||
)
|
||||
|
||||
# Remove any blocklisted arguments
|
||||
for blocklist_key in blocklist_args_kwargs:
|
||||
if blocklist_key in training_args_kwargs:
|
||||
del training_args_kwargs[blocklist_key]
|
||||
|
||||
# Create training args using the base class method
|
||||
training_args = self.create_training_args(
|
||||
args_cls=training_args_cls,
|
||||
total_num_steps=total_num_steps,
|
||||
**training_args_kwargs,
|
||||
)
|
||||
|
||||
# Prepare trainer kwargs
|
||||
trainer_kwargs = {}
|
||||
if self.cfg.rl == "ipo" and self.cfg.dpo_label_smoothing:
|
||||
trainer_kwargs["label_smoothing"] = self.cfg.dpo_label_smoothing
|
||||
if self.eval_dataset:
|
||||
trainer_kwargs["eval_dataset"] = self.eval_dataset
|
||||
if self.cfg.adapter and self.peft_config:
|
||||
trainer_kwargs["peft_config"] = self.peft_config
|
||||
if self.cfg.precompute_ref_log_probs is not None:
|
||||
trainer_kwargs["precompute_ref_log_probs"] = (
|
||||
self.cfg.precompute_ref_log_probs
|
||||
)
|
||||
|
||||
# Determine trainer class and arguments
|
||||
if self.cfg.rl == "grpo":
|
||||
trainer_cls = GRPOStrategy.get_trainer_class()
|
||||
trainer_args = [self.model]
|
||||
trainer_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
||||
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
||||
elif self.cfg.rl in ["dpo", "ipo"]:
|
||||
trainer_cls = DPOStrategy.get_trainer_class()
|
||||
trainer_args = [self.model, self.model_ref]
|
||||
elif self.cfg.rl == "orpo":
|
||||
trainer_cls = AxolotlORPOTrainer
|
||||
trainer_args = [self.model]
|
||||
elif self.cfg.rl in ["kto"]:
|
||||
trainer_cls = AxolotlKTOTrainer
|
||||
trainer_args = [self.model]
|
||||
elif self.cfg.rl in ["simpo"]:
|
||||
trainer_cls = AxolotlCPOTrainer
|
||||
trainer_args = [self.model]
|
||||
else:
|
||||
raise ValueError(f"Unsupported RL: {self.cfg.rl}")
|
||||
|
||||
# Add tokenizer or processing class
|
||||
sig = inspect.signature(trainer_cls)
|
||||
if "tokenizer" in sig.parameters.keys():
|
||||
trainer_kwargs["tokenizer"] = self.tokenizer
|
||||
else:
|
||||
trainer_kwargs["processing_class"] = self.tokenizer
|
||||
|
||||
# Add dataset tags if available
|
||||
if self.cfg.datasets is not None and (
|
||||
trainer_cls is DPOStrategy.get_trainer_class()
|
||||
):
|
||||
trainer_kwargs["dataset_tags"] = [
|
||||
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
||||
]
|
||||
|
||||
# Create the trainer
|
||||
trainer = self.create_trainer(
|
||||
trainer_cls=trainer_cls,
|
||||
training_args=training_args,
|
||||
trainer_args=trainer_args,
|
||||
trainer_kwargs=trainer_kwargs,
|
||||
)
|
||||
|
||||
# Handle FSDP specific settings
|
||||
if self.cfg.fsdp:
|
||||
ensure_dtype(trainer.model, dtype=self.cfg.torch_dtype)
|
||||
if (
|
||||
self.cfg.rl in ["dpo", "ipo"]
|
||||
and hasattr(trainer, "ref_model")
|
||||
and trainer.ref_model
|
||||
):
|
||||
ensure_dtype(trainer.ref_model, dtype=self.cfg.torch_dtype)
|
||||
|
||||
return trainer
|
||||
@@ -247,7 +247,9 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
||||
)
|
||||
|
||||
# Base evaluation
|
||||
initial_output = super().evaluation_loop(
|
||||
initial_output = super( # pylint: disable=bad-super-call
|
||||
DPOTrainer, self
|
||||
).evaluation_loop(
|
||||
dataloader,
|
||||
description,
|
||||
prediction_loss_only,
|
||||
|
||||
@@ -26,6 +26,8 @@ from typing import OrderedDict
|
||||
import torch
|
||||
from torch.optim.lr_scheduler import LRScheduler
|
||||
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
|
||||
class BasePlugin:
|
||||
"""
|
||||
@@ -36,11 +38,13 @@ class BasePlugin:
|
||||
|
||||
Methods:
|
||||
register(cfg): Registers the plugin with the given configuration.
|
||||
load_datasets(cfg): Loads and preprocesses the dataset for training.
|
||||
pre_model_load(cfg): Performs actions before the model is loaded.
|
||||
post_model_build(cfg, model): Performs actions after the model is loaded, but before LoRA adapters are applied.
|
||||
pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.
|
||||
post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
||||
post_model_load(cfg, model): Performs actions after the model is loaded, inclusive of any adapters.
|
||||
post_trainer_create(cfg, trainer): Performs actions after the trainer is created.
|
||||
create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
||||
create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and returns a learning rate scheduler.
|
||||
add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before training.
|
||||
@@ -63,20 +67,32 @@ class BasePlugin:
|
||||
None
|
||||
"""
|
||||
|
||||
def get_input_args(self):
|
||||
def get_input_args(self) -> str | None:
|
||||
"""
|
||||
Returns a pydantic model for the plugin's input arguments.
|
||||
"""
|
||||
|
||||
def load_datasets(self, cfg: DictDefault, preprocess: bool = False):
|
||||
"""
|
||||
Loads and preprocesses the dataset for training.
|
||||
|
||||
Args:
|
||||
cfg: The configuration for the plugin.
|
||||
preprocess: Whether this is the preprocess step of the datasets.
|
||||
|
||||
Returns:
|
||||
dataset_meta: The metadata for the training dataset.
|
||||
"""
|
||||
|
||||
def pre_model_load(self, cfg): # pylint: disable=unused-argument
|
||||
"""
|
||||
Performs actions before the model is loaded.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
"""
|
||||
|
||||
def post_model_build(self, cfg, model): # pylint: disable=unused-argument
|
||||
@@ -91,59 +107,71 @@ class BasePlugin:
|
||||
"""
|
||||
Performs actions after the model is loaded.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
"""
|
||||
|
||||
def pre_lora_load(self, cfg, model): # pylint: disable=unused-argument
|
||||
"""
|
||||
Performs actions before LoRA weights are loaded.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
"""
|
||||
|
||||
def post_lora_load(self, cfg, model): # pylint: disable=unused-argument
|
||||
"""
|
||||
Performs actions after LoRA weights are loaded.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
"""
|
||||
|
||||
def get_trainer_cls(self, cfg): # pylint: disable=unused-argument):
|
||||
"""
|
||||
Returns a custom class for the trainer.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The global axolotl configuration.
|
||||
Args:
|
||||
cfg (dict): The global axolotl configuration.
|
||||
|
||||
Returns:
|
||||
class: The class for the trainer.
|
||||
class: The class for the trainer.
|
||||
"""
|
||||
|
||||
def post_trainer_create(self, cfg, trainer): # pylint: disable=unused-argument
|
||||
"""
|
||||
Performs actions after the trainer is created.
|
||||
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
def create_optimizer(self, cfg, trainer): # pylint: disable=unused-argument
|
||||
"""
|
||||
Creates and returns an optimizer for training.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
|
||||
Returns:
|
||||
object: The created optimizer.
|
||||
object: The created optimizer.
|
||||
"""
|
||||
|
||||
def create_lr_scheduler(
|
||||
@@ -152,26 +180,26 @@ class BasePlugin:
|
||||
"""
|
||||
Creates and returns a learning rate scheduler.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
optimizer (object): The optimizer for training.
|
||||
num_training_steps (int): Total number of training steps
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
optimizer (object): The optimizer for training.
|
||||
num_training_steps (int): Total number of training steps
|
||||
|
||||
Returns:
|
||||
object (LRScheduler): The created learning rate scheduler.
|
||||
object (LRScheduler): The created learning rate scheduler.
|
||||
"""
|
||||
|
||||
def add_callbacks_pre_trainer(self, cfg, model): # pylint: disable=unused-argument
|
||||
"""
|
||||
setup callbacks before creating the trainer.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
model (object): The loaded model.
|
||||
|
||||
Returns:
|
||||
List[callable]: A list of callback functions to be added to the TrainingArgs
|
||||
List[callable]: A list of callback functions to be added to the TrainingArgs
|
||||
"""
|
||||
return []
|
||||
|
||||
@@ -182,12 +210,12 @@ class BasePlugin:
|
||||
Adds callbacks to the trainer after creating the trainer.
|
||||
This is useful for callbacks that require access to the model or trainer.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
trainer (object): The trainer object for training.
|
||||
|
||||
Returns:
|
||||
List[callable]: A list of callback functions to be added
|
||||
List[callable]: A list of callback functions to be added
|
||||
"""
|
||||
return []
|
||||
|
||||
@@ -195,23 +223,23 @@ class BasePlugin:
|
||||
"""
|
||||
Performs actions after training is complete.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The axolotl configuration
|
||||
model (object): The loaded model.
|
||||
Args:
|
||||
cfg (dict): The axolotl configuration
|
||||
model (object): The loaded model.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
"""
|
||||
|
||||
def post_train_unload(self, cfg): # pylint: disable=unused-argument
|
||||
"""
|
||||
Performs actions after training is complete and the model is unloaded.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
Args:
|
||||
cfg (dict): The configuration for the plugin.
|
||||
|
||||
Returns:
|
||||
None
|
||||
None
|
||||
"""
|
||||
|
||||
|
||||
@@ -338,6 +366,27 @@ class PluginManager:
|
||||
input_args.append(input_args_from_plugin)
|
||||
return input_args
|
||||
|
||||
def load_datasets(self, cfg, preprocess: bool = False):
|
||||
"""
|
||||
Calls the load_datasets method of each registered plugin.
|
||||
|
||||
Args:
|
||||
cfg: The configuration for the plugins.
|
||||
preprocess : Whether this is preprocess step of the datasets.
|
||||
|
||||
Returns:
|
||||
dataset_meta: The dataset metadata loaded from all registered plugins.
|
||||
"""
|
||||
return_ds_meta = None
|
||||
for plugin in self.plugins.values():
|
||||
dataset_meta = plugin.load_datasets(cfg, preprocess)
|
||||
if dataset_meta is not None:
|
||||
if return_ds_meta is None:
|
||||
return_ds_meta = dataset_meta
|
||||
else:
|
||||
raise RuntimeError("Multiple plugins loaded datasets")
|
||||
return return_ds_meta
|
||||
|
||||
def pre_model_load(self, cfg):
|
||||
"""
|
||||
Calls the pre_model_load method of all registered plugins.
|
||||
@@ -422,6 +471,20 @@ class PluginManager:
|
||||
return trainer_cls
|
||||
return None
|
||||
|
||||
def post_trainer_create(self, cfg, trainer):
|
||||
"""
|
||||
Calls the post_trainer_create method of all registered plugins.
|
||||
|
||||
Parameters:
|
||||
cfg (dict): The configuration for the plugins.
|
||||
trainer (object): The trainer object for training.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
for plugin in self.plugins.values():
|
||||
plugin.post_trainer_create(cfg, trainer)
|
||||
|
||||
def create_optimizer(self, trainer):
|
||||
"""
|
||||
Calls the create_optimizer method of all registered plugins and returns the first non-None optimizer.
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
"""
|
||||
attention module for attention monkeypatches
|
||||
"""
|
||||
|
||||
from transformers.integrations.flash_attention import flash_attention_forward
|
||||
|
||||
|
||||
def patch_xformers_attn_over_fa2():
|
||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
||||
|
||||
from .xformers import xformers_attention_forward
|
||||
|
||||
ALL_ATTENTION_FUNCTIONS["flash_attention_2"] = xformers_attention_forward
|
||||
|
||||
|
||||
def unpatch_xformers_attn_over_fa2():
|
||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
||||
|
||||
ALL_ATTENTION_FUNCTIONS["flash_attention_2"] = flash_attention_forward()
|
||||
|
||||
160
src/axolotl/monkeypatch/attention/xformers.py
Normal file
160
src/axolotl/monkeypatch/attention/xformers.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""
|
||||
xformers attention implementation for packing
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import xformers
|
||||
import xformers.ops.fmha
|
||||
from transformers.modeling_flash_attention_utils import (
|
||||
_upad_input,
|
||||
)
|
||||
|
||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
||||
|
||||
xformers_attention = xformers.ops.fmha.memory_efficient_attention
|
||||
|
||||
|
||||
def xformers_attention_forward(
|
||||
module: torch.nn.Module,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
value: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
dropout: float = 0.0, # pylint: disable=unused-argument
|
||||
scaling: Optional[float] = None, # pylint: disable=unused-argument
|
||||
sliding_window: Optional[int] = None, # pylint: disable=unused-argument
|
||||
softcap: Optional[float] = None, # pylint: disable=unused-argument
|
||||
cu_seq_lens_q: Optional[torch.LongTensor] = None,
|
||||
cu_seq_lens_k: Optional[torch.LongTensor] = None,
|
||||
max_length_q: Optional[int] = None,
|
||||
max_length_k: Optional[int] = None, # pylint: disable=unused-argument
|
||||
**kwargs, # pylint: disable=unused-argument
|
||||
):
|
||||
# Get dimensions
|
||||
# query: [batch, heads, seq_len, hidden_dim]
|
||||
batch_size = query.size(0)
|
||||
query_length = query.shape[2]
|
||||
key_length = key.shape[2]
|
||||
|
||||
# Default causal mask
|
||||
attn_bias = xformers.ops.LowerTriangularMask()
|
||||
|
||||
# Check if we have sliding window attention
|
||||
has_sliding_window = sliding_window is not None and sliding_window < query_length
|
||||
|
||||
# Transpose dimensions for xformers (Q: [b, h, s, d] -> [b, s, h, d])
|
||||
query = query.transpose(1, 2)
|
||||
key = key.transpose(1, 2)
|
||||
value = value.transpose(1, 2)
|
||||
|
||||
# Get GQA parameters
|
||||
num_attention_heads = module.config.num_attention_heads
|
||||
num_key_value_heads = module.config.num_key_value_heads
|
||||
head_dim = query.size(-1)
|
||||
is_gqa = num_attention_heads != num_key_value_heads
|
||||
n_groups = num_attention_heads // num_key_value_heads if is_gqa else 1
|
||||
|
||||
# If position_ids is provided and check all examples do not contain only 1 sequence, If tensor in increasing
|
||||
# then we probably have one sequence, otherwise it is packed. Additionally check we are in pre-fill/training stage.
|
||||
# Use `flash_attn_varlen_func` to prevent cross-example attention and also allow padding free approach
|
||||
if position_ids is not None and (
|
||||
max_length_q is not None
|
||||
or (query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all())
|
||||
):
|
||||
if cu_seq_lens_q is None or cu_seq_lens_k is None:
|
||||
cu_seq_lens_q = get_cu_seqlens_from_pos_ids(position_ids)[0]
|
||||
cu_seq_lens_q = cu_seq_lens_q.squeeze()
|
||||
seq_lengths = cu_seq_lens_q[1:] - cu_seq_lens_q[:-1]
|
||||
attn_bias = (
|
||||
xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask.from_seqlens(
|
||||
q_seqlen=seq_lengths.tolist(),
|
||||
)
|
||||
)
|
||||
else:
|
||||
query = query.reshape(-1, query.size(-2), query.size(-1))
|
||||
key = key.reshape(-1, key.size(-2), key.size(-1))
|
||||
value = value.reshape(-1, value.size(-2), value.size(-1))
|
||||
|
||||
# Handle GQA
|
||||
if is_gqa:
|
||||
key = key.repeat_interleave(n_groups, dim=2)
|
||||
value = value.repeat_interleave(n_groups, dim=2)
|
||||
|
||||
elif attention_mask is not None:
|
||||
query, key, value, _, cu_seq_lens, _ = _upad_input(
|
||||
query, key, value, attention_mask, query_length
|
||||
)
|
||||
cu_seq_lens_q, cu_seq_lens_k = cu_seq_lens
|
||||
seq_lengths = []
|
||||
for i in range(len(cu_seq_lens_q) - 1):
|
||||
seq_lengths.append(cu_seq_lens_q[i + 1] - cu_seq_lens_q[i])
|
||||
attn_bias = xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask.from_seqlens(
|
||||
q_seqlen=seq_lengths,
|
||||
kv_seqlen=seq_lengths,
|
||||
)
|
||||
|
||||
# Handle GQA
|
||||
if is_gqa:
|
||||
key = key.repeat_interleave(n_groups, dim=2)
|
||||
value = value.repeat_interleave(n_groups, dim=2)
|
||||
else:
|
||||
# Handle Group Query Attention (GQA) using view/expand approach from reference
|
||||
key = key.view(batch_size, key_length, num_key_value_heads, 1, head_dim)
|
||||
value = value.view(batch_size, key_length, num_key_value_heads, 1, head_dim)
|
||||
key = key.expand(
|
||||
batch_size, key_length, num_key_value_heads, n_groups, head_dim
|
||||
)
|
||||
value = value.expand(
|
||||
batch_size, key_length, num_key_value_heads, n_groups, head_dim
|
||||
)
|
||||
|
||||
if module.training:
|
||||
key = key.reshape(batch_size, key_length, num_attention_heads, head_dim)
|
||||
value = value.reshape(batch_size, key_length, num_attention_heads, head_dim)
|
||||
|
||||
if has_sliding_window:
|
||||
query = query.view(
|
||||
1, batch_size * query_length, num_attention_heads, head_dim
|
||||
)
|
||||
key = key.view(
|
||||
1, batch_size * key_length, num_attention_heads, head_dim
|
||||
)
|
||||
value = value.view(
|
||||
1, batch_size * key_length, num_attention_heads, head_dim
|
||||
)
|
||||
else:
|
||||
query = query.view(
|
||||
batch_size, query_length, num_key_value_heads, n_groups, head_dim
|
||||
)
|
||||
|
||||
# If we need a sliding window attention
|
||||
if has_sliding_window:
|
||||
query = query.view(
|
||||
1,
|
||||
batch_size * query_length,
|
||||
num_key_value_heads,
|
||||
n_groups,
|
||||
head_dim,
|
||||
)
|
||||
key = key.view(
|
||||
1, batch_size * key_length, num_key_value_heads, n_groups, head_dim
|
||||
)
|
||||
value = value.view(
|
||||
1, batch_size * key_length, num_key_value_heads, n_groups, head_dim
|
||||
)
|
||||
|
||||
# Run the xformers attention
|
||||
attn_output = xformers_attention(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
attn_bias=attn_bias,
|
||||
)
|
||||
|
||||
attn_output = attn_output.view(
|
||||
batch_size, -1, attn_output.size(-2), attn_output.size(-1)
|
||||
)
|
||||
return attn_output, None
|
||||
0
src/axolotl/monkeypatch/peft/__init__.py
Normal file
0
src/axolotl/monkeypatch/peft/__init__.py
Normal file
78
src/axolotl/monkeypatch/peft/utils.py
Normal file
78
src/axolotl/monkeypatch/peft/utils.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
Patch prepare_model_for_kbit_training to not upcast everything
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
import peft
|
||||
|
||||
import axolotl
|
||||
from axolotl.monkeypatch.utils import detab_code
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ORIGINAL_PREPARE_CODE = """
|
||||
for param in model.parameters():
|
||||
if (
|
||||
(param.dtype == torch.float16) or (param.dtype == torch.bfloat16)
|
||||
) and param.__class__.__name__ != "Params4bit":
|
||||
param.data = param.data.to(torch.float32)
|
||||
"""
|
||||
|
||||
PATCHED_PREPARE_CODE = """
|
||||
for name, param in model.named_parameters():
|
||||
if (
|
||||
(param.dtype == torch.float16) or (param.dtype == torch.bfloat16)
|
||||
) and param.__class__.__name__ != "Params4bit" and all(embed_name not in name for embed_name in ["embed_tokens", "lm_head"]):
|
||||
param.data = param.data.to(torch.float32)
|
||||
"""
|
||||
|
||||
|
||||
def get_peft_prep_code() -> str:
|
||||
prepare = inspect.getsource(peft.utils.other.prepare_model_for_kbit_training)
|
||||
return prepare
|
||||
|
||||
|
||||
def check_peft_prep_code_is_patchable() -> bool:
|
||||
prep_code = get_peft_prep_code()
|
||||
prep_code, _ = detab_code(prep_code)
|
||||
return ORIGINAL_PREPARE_CODE in prep_code
|
||||
|
||||
|
||||
def patch_peft_prep_code():
|
||||
"""
|
||||
monkeypatch create_accelerator_and_postprocess so it checks for additional kwargs
|
||||
"""
|
||||
|
||||
try:
|
||||
prep_code = get_peft_prep_code()
|
||||
except OSError:
|
||||
return
|
||||
peft.utils.other._original_create_accelerator_and_postprocess = ( # pylint: disable=protected-access
|
||||
prep_code
|
||||
)
|
||||
prep_code, _ = detab_code(prep_code)
|
||||
if ORIGINAL_PREPARE_CODE not in prep_code:
|
||||
return
|
||||
|
||||
prep_code = prep_code.replace(ORIGINAL_PREPARE_CODE, PATCHED_PREPARE_CODE)
|
||||
prep_code = prep_code.replace(
|
||||
"def prepare_model_for_kbit_training(",
|
||||
"def fixed_prepare_model_for_kbit_training(",
|
||||
1,
|
||||
)
|
||||
|
||||
items_to_import = []
|
||||
for item in dir(peft.utils.other):
|
||||
if item in prep_code:
|
||||
items_to_import.append(item)
|
||||
|
||||
exec( # pylint: disable=exec-used # nosec B102
|
||||
"from peft.utils.other import (" + ", ".join(x for x in items_to_import) + ")",
|
||||
globals(),
|
||||
)
|
||||
exec(prep_code, globals()) # pylint: disable=exec-used # nosec B102
|
||||
LOG.info("patching prepare_model_for_kbit_training to allow for overrides")
|
||||
peft.utils.other.prepare_model_for_kbit_training = fixed_prepare_model_for_kbit_training # pylint: disable=protected-access # pylint: disable=undefined-variable # noqa: F821
|
||||
axolotl.utils.models.prepare_model_for_kbit_training = fixed_prepare_model_for_kbit_training # pylint: disable=protected-access # pylint: disable=undefined-variable # noqa: F821
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
@@ -12,7 +13,6 @@ from typing import Any, Dict
|
||||
|
||||
import torch
|
||||
import transformers.modelcard
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import save_fsdp_model
|
||||
from datasets import Dataset
|
||||
from huggingface_hub.errors import OfflineModeIsEnabled
|
||||
@@ -21,11 +21,12 @@ from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
|
||||
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
||||
from transformers.trainer import Trainer
|
||||
|
||||
from axolotl.cli.art import print_axolotl_text_art
|
||||
from axolotl.common.datasets import TrainDatasetMeta
|
||||
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
||||
fix_untrained_tokens,
|
||||
)
|
||||
from axolotl.core.trainer_builder import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
||||
from axolotl.core.trainers.builders import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
||||
from axolotl.core.trainers.mixins.sequence_parallel import (
|
||||
SequenceParallelContextManager,
|
||||
)
|
||||
@@ -41,7 +42,7 @@ try:
|
||||
except ImportError:
|
||||
BetterTransformer = None
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_model_and_tokenizer(
|
||||
@@ -62,7 +63,6 @@ def setup_model_and_tokenizer(
|
||||
# Load tokenizer
|
||||
LOG.debug(
|
||||
f"loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}",
|
||||
main_process_only=True,
|
||||
)
|
||||
tokenizer = load_tokenizer(cfg)
|
||||
|
||||
@@ -516,6 +516,8 @@ def train(
|
||||
Returns:
|
||||
Tuple of (model, tokenizer) after training
|
||||
"""
|
||||
print_axolotl_text_art()
|
||||
|
||||
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
||||
(
|
||||
trainer,
|
||||
@@ -525,6 +527,9 @@ def train(
|
||||
processor,
|
||||
) = setup_model_and_trainer(cfg, dataset_meta)
|
||||
|
||||
plugin_manager = PluginManager.get_instance()
|
||||
plugin_manager.post_trainer_create(cfg, trainer)
|
||||
|
||||
# Handle untrained tokens if configured
|
||||
safe_serialization = cfg.save_safetensors is True
|
||||
train_dataset = dataset_meta.train_dataset
|
||||
@@ -547,7 +552,6 @@ def train(
|
||||
if not cfg.use_ray:
|
||||
cleanup_distributed()
|
||||
|
||||
plugin_manager = PluginManager.get_instance()
|
||||
plugin_manager.post_train(cfg, model)
|
||||
|
||||
return model, tokenizer, trainer
|
||||
|
||||
@@ -46,11 +46,11 @@ from axolotl.utils.distributed import (
|
||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from axolotl.core.trainer_builder import AxolotlTrainingArguments
|
||||
from axolotl.core.training_args import AxolotlTrainingArguments
|
||||
|
||||
|
||||
IGNORE_INDEX = -100
|
||||
LOG = logging.getLogger("axolotl.callbacks")
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EvalFirstStepCallback(
|
||||
@@ -868,3 +868,28 @@ class GCCallback(TrainerCallback):
|
||||
):
|
||||
torch.cuda.empty_cache()
|
||||
gc.collect()
|
||||
|
||||
|
||||
def colab_inference_post_train_callback(trainer: Trainer):
|
||||
class ColabCallback(TrainerCallback):
|
||||
"""Callback to prep model for inference on Google Colab"""
|
||||
|
||||
def __init__(self, cfg):
|
||||
self.gpu_name = torch.cuda.get_device_name(0)
|
||||
self.cfg = cfg
|
||||
|
||||
def on_train_end(
|
||||
self, args, state, control, **kwargs
|
||||
): # pylint: disable=unused-argument
|
||||
"""
|
||||
handle T4 gpu, we need to convert attention to eager for inference
|
||||
"""
|
||||
if "Tesla T4" in self.gpu_name and self.cfg.xformers_attention:
|
||||
trainer.model.config._attn_implementation = ( # pylint: disable=protected-access
|
||||
"eager"
|
||||
)
|
||||
trainer.model.gradient_checkpointing_disable()
|
||||
trainer.model.config.use_cache = True
|
||||
trainer.model.eval()
|
||||
|
||||
return ColabCallback
|
||||
|
||||
@@ -70,6 +70,9 @@ def resolve_dtype(cfg):
|
||||
if cfg.fp16 is None and not cfg.float16:
|
||||
cfg.fp16 = True
|
||||
|
||||
if cfg.fp16 and cfg.bf16 == "auto":
|
||||
cfg.bf16 = False
|
||||
|
||||
if cfg.device == "mps":
|
||||
cfg.load_in_8bit = False
|
||||
cfg.tf32 = False
|
||||
|
||||
@@ -281,6 +281,10 @@ def load_dataset_w_config(
|
||||
**load_ds_kwargs,
|
||||
)
|
||||
if not ds:
|
||||
raise ValueError("unhandled dataset load")
|
||||
raise ValueError(
|
||||
"The dataset could not be loaded. This could be due to a misconfigured dataset path "
|
||||
f"({config_dataset.path}). Try double-check your path / name / data_files. "
|
||||
"This is not caused by the dataset type."
|
||||
)
|
||||
|
||||
return ds
|
||||
|
||||
@@ -1,15 +1,36 @@
|
||||
"""custom checkpointing utils"""
|
||||
|
||||
import importlib
|
||||
from functools import partial
|
||||
|
||||
from packaging import version
|
||||
|
||||
from axolotl.utils.gradient_checkpointing.unsloth import (
|
||||
Unsloth_Offloaded_Gradient_Checkpointer,
|
||||
)
|
||||
|
||||
transformers_version = version.parse(importlib.metadata.version("transformers"))
|
||||
if transformers_version > version.parse("4.51.3"):
|
||||
from transformers.modeling_layers import GradientCheckpointingLayer
|
||||
|
||||
def uses_gc_layers(decoder_layer):
|
||||
return isinstance(decoder_layer.func.__self__, GradientCheckpointingLayer)
|
||||
|
||||
else:
|
||||
|
||||
def uses_gc_layers(_):
|
||||
return False
|
||||
|
||||
|
||||
def hf_grad_checkpoint_offload_wrapper(
|
||||
decoder_layer, *args, use_reentrant=None
|
||||
): # pylint: disable=unused-argument
|
||||
if uses_gc_layers(decoder_layer):
|
||||
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
||||
decoder_layer,
|
||||
*args,
|
||||
)
|
||||
|
||||
return Unsloth_Offloaded_Gradient_Checkpointer.apply(
|
||||
(
|
||||
decoder_layer.func.__self__
|
||||
|
||||
@@ -556,11 +556,21 @@ class ModelLoader:
|
||||
self.auto_model_loader = AutoModelForCausalLM # pylint: disable=invalid-name
|
||||
|
||||
def apply_patches(self) -> None:
|
||||
if self.cfg.xformers_attention and self.cfg.sample_packing:
|
||||
from axolotl.monkeypatch.attention import patch_xformers_attn_over_fa2
|
||||
|
||||
patch_xformers_attn_over_fa2()
|
||||
self.cfg.flash_attention = True
|
||||
if self.cfg.fsdp_config and str(self.cfg.fsdp_config.fsdp_version) == "2":
|
||||
from axolotl.monkeypatch.accelerate.fsdp2 import patch_accelerate_fsdp_utils
|
||||
|
||||
patch_accelerate_fsdp_utils()
|
||||
|
||||
if self.cfg.adapter and self.cfg.embeddings_skip_upcast:
|
||||
from axolotl.monkeypatch.peft.utils import patch_peft_prep_code
|
||||
|
||||
patch_peft_prep_code()
|
||||
|
||||
if self.cfg.flex_attention:
|
||||
from axolotl.monkeypatch.attention.flex_attn import (
|
||||
patch_flex_make_mask,
|
||||
@@ -1180,7 +1190,7 @@ class ModelLoader:
|
||||
],
|
||||
)
|
||||
|
||||
def prepare_model(self, qlora_fsdp) -> None:
|
||||
def prepare_model(self, qlora_fsdp: bool) -> None:
|
||||
skip_prepare_model_for_kbit_training = False
|
||||
if self.cfg.model_config_type == "qwen" and self.cfg.adapter == "lora":
|
||||
# Qwen doesn't play nicely with LoRA if this is enabled
|
||||
@@ -1310,7 +1320,10 @@ class ModelLoader:
|
||||
# make sure these are fp32 per Ramesh et al. (2021)
|
||||
embedding_modules = get_linear_embedding_layers(self.cfg.model_config_type)
|
||||
if not self.cfg.fsdp:
|
||||
# FSDP doesn't like mixed Float and BFloat16
|
||||
# we don't run this during FSDP because this will leave mixed
|
||||
# float and bfloat16 dtypes in the model which FSDP doesn't like
|
||||
if self.cfg.load_in_4bit and self.cfg.embeddings_skip_upcast:
|
||||
embedding_modules = []
|
||||
self.convert_embedding_modules_dtype(
|
||||
embedding_modules,
|
||||
dist_dtype=torch.float32,
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
# pylint: skip-file
|
||||
"""
|
||||
Multipack Batch Sampler
|
||||
Multipack Batch Sampler - An efficient batch sampler for packing variable-length sequences
|
||||
into fixed-capacity batches to optimize memory usage and training throughput.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
from typing import Any, Iterable, List, Union
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from multiprocessing import cpu_count, get_context
|
||||
from typing import Iterable, Union
|
||||
|
||||
import numba
|
||||
import numpy as np
|
||||
@@ -13,26 +16,39 @@ from torch.utils.data import BatchSampler, Sampler, SequentialSampler
|
||||
from axolotl.utils.distributed import reduce_and_broadcast
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
LOG.setLevel(logging.INFO)
|
||||
|
||||
|
||||
@numba.njit
|
||||
def ffd_check(a: np.ndarray, c: int, n: int):
|
||||
# First-fit-decreasing bin packing
|
||||
# Check if a[] could fit in n bins with capacity c
|
||||
# https://en.wikipedia.org/wiki/First-fit-decreasing_bin_packing
|
||||
def ffd_check(sequence_lengths: np.ndarray, bin_capacity: int, num_bins: int):
|
||||
"""
|
||||
First-fit-decreasing bin packing algorithm check
|
||||
|
||||
a = np.sort(a)[::-1]
|
||||
bins = np.full((n,), c, dtype=a.dtype)
|
||||
for size in a:
|
||||
Checks if sequences with the given lengths could fit in the specified number of bins
|
||||
|
||||
Args:
|
||||
sequence_lengths: Array of sequence lengths
|
||||
bin_capacity: Maximum capacity of each bin
|
||||
num_bins: Number of bins available
|
||||
|
||||
Returns:
|
||||
True if all sequences can be packed, False otherwise
|
||||
"""
|
||||
# Sort sequence lengths in descending order for optimal packing
|
||||
sequence_lengths = np.sort(sequence_lengths)[::-1]
|
||||
# Initialize all bins with full capacity
|
||||
bins = np.full((num_bins,), bin_capacity, dtype=sequence_lengths.dtype)
|
||||
|
||||
# Try to place each sequence in the first bin it fits
|
||||
for size in sequence_lengths:
|
||||
not_found = True
|
||||
for idx in range(n):
|
||||
for idx in range(num_bins):
|
||||
if bins[idx] >= size:
|
||||
bins[idx] -= size
|
||||
not_found = False
|
||||
break
|
||||
|
||||
# If no bin could fit this sequence, packing failed
|
||||
if not_found:
|
||||
return False
|
||||
|
||||
@@ -40,86 +56,155 @@ def ffd_check(a: np.ndarray, c: int, n: int):
|
||||
|
||||
|
||||
@numba.njit
|
||||
def ffd_with_result(a: np.ndarray, c: int, start_index: int):
|
||||
# First-fit-decreasing bin packing (with result return)
|
||||
def pack_group(
|
||||
sequence_lengths: np.ndarray,
|
||||
group_offset: int,
|
||||
bin_capacity: int,
|
||||
max_bins: int,
|
||||
bin_size: int,
|
||||
safe_mode: bool = True,
|
||||
):
|
||||
"""
|
||||
Pack a group of sequences into bins using First-Fit Decreasing algorithm
|
||||
|
||||
indices = np.argsort(a)[::-1]
|
||||
a = a[indices]
|
||||
Args:
|
||||
sequence_lengths: Array of sequence lengths
|
||||
group_offset: Offset to apply to indices when returning results
|
||||
bin_capacity: Maximum capacity of each bin
|
||||
max_bins: Maximum number of bins to use
|
||||
bin_size: Maximum number of sequences per bin
|
||||
safe_mode: If True, use a more conservative packing approach
|
||||
|
||||
bins: List[Any] = []
|
||||
bins_result: List[Any] = []
|
||||
for a_id, size in enumerate(a):
|
||||
add_new = True
|
||||
for idx in range(len(bins)):
|
||||
if bins[idx] >= size:
|
||||
bins[idx] -= size
|
||||
bins_result[idx].append(indices[a_id] + start_index)
|
||||
add_new = False
|
||||
Returns:
|
||||
List of bins, where each bin contains indices of sequences assigned to it
|
||||
"""
|
||||
bins_remaining_space: list = [] # Tracks remaining capacity in each bin
|
||||
bins_assigned_sequences: list = [] # Tracks sequence indices assigned to each bin
|
||||
|
||||
for seq_id, size in enumerate(sequence_lengths):
|
||||
global_idx = seq_id + group_offset
|
||||
|
||||
# Try to place sequence in existing bins
|
||||
add_new_bin = True
|
||||
for bin_idx, _ in enumerate(bins_remaining_space):
|
||||
if (
|
||||
bins_remaining_space[bin_idx] >= size
|
||||
and len(bins_assigned_sequences[bin_idx]) < bin_size
|
||||
):
|
||||
bins_remaining_space[bin_idx] -= size
|
||||
bins_assigned_sequences[bin_idx].append(global_idx)
|
||||
add_new_bin = False
|
||||
break
|
||||
|
||||
if add_new:
|
||||
bins.append(c - size)
|
||||
bins_result.append([indices[a_id] + start_index])
|
||||
# Create a new bin if needed and if we haven't reached the limit
|
||||
if add_new_bin:
|
||||
if len(bins_remaining_space) >= max_bins and safe_mode:
|
||||
# In safe mode, skip items that would exceed max_bins
|
||||
continue
|
||||
bins_remaining_space.append(bin_capacity - size)
|
||||
bins_assigned_sequences.append([global_idx])
|
||||
|
||||
return bins_result
|
||||
# Safety check to avoid infinite bins
|
||||
if len(bins_remaining_space) > len(sequence_lengths):
|
||||
break
|
||||
|
||||
return bins_assigned_sequences
|
||||
|
||||
|
||||
@numba.njit
|
||||
def allocate(
|
||||
lengths: np.ndarray, lengths_cumsum: np.ndarray, rank: int, c: int, n: int
|
||||
# Define a standalone function for multiprocessing
|
||||
def _process_group(args):
|
||||
group_lengths, start_idx, bin_capacity, max_bins, bin_size, safe_mode = args
|
||||
return pack_group(
|
||||
group_lengths, start_idx, bin_capacity, max_bins, bin_size, safe_mode
|
||||
)
|
||||
|
||||
|
||||
def pack_parallel(
|
||||
sequence_lengths: np.ndarray,
|
||||
bin_capacity: int,
|
||||
group_size: int,
|
||||
bin_size: int,
|
||||
num_processes: int | None = None,
|
||||
safe_mode: bool = True,
|
||||
mp_start_method: str | None = "spawn",
|
||||
):
|
||||
# Dynamic batch allocator, similar to Multifit
|
||||
# https://en.wikipedia.org/wiki/Multifit_algorithm
|
||||
# ~99.5% efficiency on OpenChat training set (12 * 2048 ctx len)
|
||||
"""
|
||||
Pack sequences into bins using parallel processing
|
||||
|
||||
s = 0
|
||||
start_index = 0
|
||||
result = []
|
||||
Args:
|
||||
sequence_lengths: Array of sequence lengths
|
||||
bin_capacity: Maximum capacity of each bin as total number of tokens
|
||||
group_size: Number of sequences to process in each group
|
||||
bin_size: Maximum number of bins to use
|
||||
num_processes: Number of parallel processes to use
|
||||
safe_mode: If True, use a more conservative packing approach
|
||||
mp_start_method: Multiprocessing start method ('fork', 'spawn', 'forkserver').
|
||||
'spawn' is often safer with Numba/PyTorch.
|
||||
Set to None to use system default.
|
||||
Returns:
|
||||
List of bins, where each bin contains indices of sequences assigned to it
|
||||
"""
|
||||
num_items = len(sequence_lengths)
|
||||
if num_processes is None:
|
||||
num_processes = max(1, min(num_items // group_size, cpu_count()))
|
||||
|
||||
while True:
|
||||
# binary search [l, r)
|
||||
left = 1
|
||||
right = 1 + np.searchsorted(lengths_cumsum[start_index:], s + c * n, "right")
|
||||
# Create tasks for parallel processing
|
||||
tasks = []
|
||||
for i in range(0, num_items, group_size):
|
||||
group_lengths = sequence_lengths[i : i + group_size]
|
||||
max_bins = len(group_lengths) # Allow as many bins as items in the group
|
||||
tasks.append((group_lengths, i, bin_capacity, max_bins, bin_size, safe_mode))
|
||||
|
||||
while right - left > 1:
|
||||
mid = (left + right) // 2
|
||||
if ffd_check(lengths[start_index : start_index + mid], c, n):
|
||||
left = mid
|
||||
else:
|
||||
right = mid
|
||||
# Process groups in parallel
|
||||
all_bins = []
|
||||
|
||||
# use length l
|
||||
batch = ffd_with_result(
|
||||
lengths[start_index : start_index + left], c, start_index
|
||||
)
|
||||
assert len(batch) <= n
|
||||
if len(batch) < n:
|
||||
break
|
||||
mp_ctx = None
|
||||
if mp_start_method:
|
||||
try:
|
||||
mp_ctx = get_context(mp_start_method)
|
||||
except ValueError:
|
||||
LOG.warning(
|
||||
f"Failed to get multiprocessing context '{mp_start_method}'. "
|
||||
f"Falling back to default. Available: {get_context().get_all_start_methods()}"
|
||||
)
|
||||
mp_ctx = (
|
||||
None # Fallback to default context if specified one is not available
|
||||
)
|
||||
|
||||
start_index += left
|
||||
s = lengths_cumsum[start_index - 1]
|
||||
if num_processes == 1:
|
||||
LOG.debug("Using single process for pack_parallel, running sequentially.")
|
||||
for task_args in tasks:
|
||||
group_bins = _process_group(task_args)
|
||||
all_bins.extend(group_bins)
|
||||
else:
|
||||
# Use ProcessPoolExecutor only if num_processes > 1
|
||||
# Pass mp_context if available
|
||||
with ProcessPoolExecutor(
|
||||
max_workers=num_processes, mp_context=mp_ctx
|
||||
) as executor:
|
||||
for group_bins in executor.map(_process_group, tasks):
|
||||
all_bins.extend(group_bins)
|
||||
|
||||
# add local rank
|
||||
result.append(batch[rank])
|
||||
|
||||
return result, s, len(result) * c * n
|
||||
return all_bins
|
||||
|
||||
|
||||
@numba.njit
|
||||
def allocate_sequentially(lengths: np.ndarray, rank: int, c: int, n: int):
|
||||
def allocate_sequentially(
|
||||
sequence_lengths: np.ndarray, rank: int, bin_capacity: int, num_ranks: int
|
||||
):
|
||||
"""
|
||||
Sequential allocator that preserves example order
|
||||
|
||||
Parameters:
|
||||
- lengths: The lengths of all examples
|
||||
- rank: The current rank (for distributed training)
|
||||
- c: The capacity of each bin (maximum sequence length)
|
||||
- n: Number of ranks
|
||||
Args:
|
||||
sequence_lengths: The lengths of all examples
|
||||
rank: The current rank (for distributed training)
|
||||
bin_capacity: The capacity of each bin (maximum sequence length)
|
||||
num_ranks: Number of ranks (processes/GPUs)
|
||||
|
||||
Returns:
|
||||
- result: List of batches for the current rank
|
||||
- total_used: Number of actual example tokens
|
||||
- total_slots: Maximum theoretical number of example tokens (number of bins * bin capacity)
|
||||
rank_batches: List of batches for the current rank
|
||||
total_tokens_used: Number of actual example tokens
|
||||
total_token_slots: Maximum theoretical number of example tokens (number of bins * bin capacity)
|
||||
"""
|
||||
result = []
|
||||
total_used = 0
|
||||
@@ -127,9 +212,9 @@ def allocate_sequentially(lengths: np.ndarray, rank: int, c: int, n: int):
|
||||
# First, do sequential packing into bins
|
||||
all_bins = []
|
||||
current_bin = [0 for i in range(0)] # numba hint
|
||||
remaining_capacity = c
|
||||
remaining_capacity = bin_capacity
|
||||
|
||||
for idx, size in enumerate(lengths):
|
||||
for idx, size in enumerate(sequence_lengths):
|
||||
if size <= remaining_capacity:
|
||||
# Example fits in current bin
|
||||
current_bin.append(idx)
|
||||
@@ -140,7 +225,7 @@ def allocate_sequentially(lengths: np.ndarray, rank: int, c: int, n: int):
|
||||
if current_bin: # Add non-empty bin to all_bins
|
||||
all_bins.append(current_bin)
|
||||
current_bin = [idx]
|
||||
remaining_capacity = c - size
|
||||
remaining_capacity = bin_capacity - size
|
||||
total_used += size
|
||||
|
||||
# Add the last bin if not empty
|
||||
@@ -148,132 +233,227 @@ def allocate_sequentially(lengths: np.ndarray, rank: int, c: int, n: int):
|
||||
all_bins.append(current_bin)
|
||||
|
||||
# Assign bins to ranks - each rank gets every n-th bin
|
||||
for bin_idx in range(rank, len(all_bins), n):
|
||||
for bin_idx in range(rank, len(all_bins), num_ranks):
|
||||
result.append(all_bins[bin_idx])
|
||||
|
||||
return result, total_used, len(all_bins) * c
|
||||
return result, total_used, len(all_bins) * bin_capacity
|
||||
|
||||
|
||||
class MultipackBatchSampler(BatchSampler):
|
||||
"""Batch sampler class for multipack"""
|
||||
"""
|
||||
Batch sampler class for efficient packing of variable-length sequences
|
||||
|
||||
This sampler packs sequences into fixed-capacity bins (batches) to maximize
|
||||
GPU memory utilization and training throughput by reducing padding.
|
||||
|
||||
It supports both parallel packing (using FFD algorithm) and
|
||||
sequential packing (preserving original sequence order).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sampler: Union[Sampler[int], Iterable[int]],
|
||||
batch_size: int,
|
||||
batch_max_len: int,
|
||||
lengths: np.ndarray,
|
||||
packing_efficiency_estimate: float = 1.0,
|
||||
drop_last: bool = False,
|
||||
num_count_samples: int = 16,
|
||||
sequential: bool = False,
|
||||
**kwargs,
|
||||
batch_size: int, # Number of bins per batch
|
||||
batch_max_len: int, # Maximum sequence length (bin capacity)
|
||||
lengths: np.ndarray, # Sequence lengths
|
||||
packing_efficiency_estimate: float = 1.0, # Initial efficiency estimate
|
||||
drop_last: bool = False, # Whether to drop final batches (might be incomplete)
|
||||
num_count_samples: int = 16, # Number of times to estimate batch count
|
||||
sequential: bool = False, # Whether to use sequential packing
|
||||
group_size: int = 100_000, # Size of groups for parallel packing
|
||||
bin_size: int = 200, # The max number of samples that can be packed in a single bin
|
||||
num_processes: int | None = None, # Number of processes for parallel packing
|
||||
safe_mode: bool = True, # Conservative packing to prevent training instability
|
||||
**kwargs, # pylint: disable=unused-argument
|
||||
):
|
||||
super().__init__(sampler, batch_size, drop_last)
|
||||
self.batch_size = batch_size
|
||||
self.batch_max_len = batch_max_len
|
||||
self.lengths: np.ndarray = lengths
|
||||
self.lengths = np.array(lengths, dtype=np.int32)
|
||||
self.packing_efficiency_estimate = packing_efficiency_estimate or 1.0
|
||||
self.sequential = sequential
|
||||
self.group_size = group_size
|
||||
self.bin_size = bin_size
|
||||
self.num_processes = num_processes
|
||||
self.safe_mode = safe_mode
|
||||
|
||||
assert isinstance(self.lengths, np.ndarray)
|
||||
|
||||
self.epoch = 0
|
||||
|
||||
# statistics
|
||||
self.eff_total_used = 0
|
||||
self.eff_total_slots = 0
|
||||
# Efficiency statistics tracking
|
||||
self.total_tokens_used = 0
|
||||
self.total_token_slots = 0
|
||||
|
||||
# The number of times to calculate the batches to determine the minimum packed dataset length for the local rank
|
||||
# The number of times to calculate batches to determine minimum packed dataset length
|
||||
self.num_count_samples = num_count_samples
|
||||
# the minimum packed dataset length across all ranks determined by a gather/broadcast
|
||||
# Minimum packed dataset length across all ranks (determined by gather/broadcast)
|
||||
self.len_across_ranks = None
|
||||
|
||||
# Cache for batches
|
||||
self._batches = None
|
||||
|
||||
if self.sequential and not isinstance(sampler, SequentialSampler):
|
||||
LOG.warn(
|
||||
LOG.warning(
|
||||
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
||||
)
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
"""Set the epoch number, used for reproducible shuffling across epochs"""
|
||||
self.epoch = epoch
|
||||
self._batches = None # Invalidate batch cache
|
||||
|
||||
def generate_batches(self, set_stats=False):
|
||||
indices = [idx for idx in self.sampler]
|
||||
"""
|
||||
Generate packed batches for training
|
||||
|
||||
lengths = self.lengths[indices]
|
||||
lengths_cumsum = np.cumsum(lengths)
|
||||
Args:
|
||||
set_stats: Whether to update efficiency statistics
|
||||
|
||||
if self.sequential:
|
||||
batches, total_used, total_slots = allocate_sequentially(
|
||||
lengths=lengths,
|
||||
rank=0,
|
||||
c=self.batch_max_len,
|
||||
n=1,
|
||||
)
|
||||
else:
|
||||
batches, total_used, total_slots = allocate(
|
||||
lengths=lengths,
|
||||
lengths_cumsum=lengths_cumsum,
|
||||
rank=0,
|
||||
c=self.batch_max_len,
|
||||
n=1,
|
||||
)
|
||||
Returns:
|
||||
List of batches, where each batch contains multiple bins,
|
||||
and each bin contains multiple sequence indices
|
||||
"""
|
||||
if self._batches is not None:
|
||||
return self._batches
|
||||
|
||||
batches = [
|
||||
[
|
||||
[indices[b_idx] for b_idx in batch]
|
||||
for batch in batches[i : i + self.batch_size]
|
||||
]
|
||||
for i in range(0, len(batches), self.batch_size)
|
||||
# Get indices from the sampler
|
||||
indices = [ # pylint: disable=unnecessary-comprehension
|
||||
idx for idx in self.sampler
|
||||
]
|
||||
|
||||
# statistics
|
||||
if set_stats:
|
||||
self.eff_total_used += total_used
|
||||
self.eff_total_slots += total_slots
|
||||
# Get lengths of the selected sequences
|
||||
lengths = self.lengths[indices]
|
||||
|
||||
# Pack sequences into bins using either sequential or parallel packing
|
||||
if self.sequential:
|
||||
bins, total_used, total_slots = allocate_sequentially(
|
||||
lengths,
|
||||
rank=0,
|
||||
bin_capacity=self.batch_max_len,
|
||||
num_ranks=1,
|
||||
)
|
||||
# Map bin indices back to original indices
|
||||
bins = [[indices[b_idx] for b_idx in bin_indices] for bin_indices in bins]
|
||||
else:
|
||||
# Use parallel packing
|
||||
all_bins = pack_parallel(
|
||||
lengths,
|
||||
bin_capacity=self.batch_max_len,
|
||||
group_size=self.group_size,
|
||||
bin_size=self.bin_size,
|
||||
num_processes=self.num_processes,
|
||||
safe_mode=self.safe_mode,
|
||||
)
|
||||
|
||||
# Map bin indices back to original indices
|
||||
bins = [
|
||||
[indices[b_idx] for b_idx in bin_indices] for bin_indices in all_bins
|
||||
]
|
||||
|
||||
# Calculate efficiency statistics
|
||||
total_used = lengths.sum()
|
||||
total_slots = len(all_bins) * self.batch_max_len
|
||||
|
||||
# Group bins into batches (each batch contains batch_size bins)
|
||||
batches = [
|
||||
bins[i : i + self.batch_size] for i in range(0, len(bins), self.batch_size)
|
||||
]
|
||||
|
||||
# Drop last batch if requested and it's incomplete
|
||||
if self.drop_last and len(batches[-1]) < self.batch_size:
|
||||
batches = batches[:-1]
|
||||
# Adjust total_slots if we dropped a batch
|
||||
if not self.sequential:
|
||||
total_slots -= (self.batch_size - len(batches[-1])) * self.batch_max_len
|
||||
|
||||
# Update statistics if requested
|
||||
if set_stats:
|
||||
self.total_tokens_used += total_used
|
||||
self.total_token_slots += total_slots
|
||||
|
||||
self._batches = batches
|
||||
return batches
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
Return an iterator over batches
|
||||
|
||||
The batches are truncated to match the minimum number of batches across all ranks
|
||||
to ensure distributed training balance
|
||||
"""
|
||||
batches = self.generate_batches(set_stats=True)
|
||||
if self.len_across_ranks:
|
||||
# make sure the batches we iterate over is truncated to the same min length across all ranks
|
||||
# Truncate batches to ensure all ranks have the same number of batches
|
||||
batches = batches[: self.len_across_ranks]
|
||||
return iter(batches)
|
||||
|
||||
def num_batches(self):
|
||||
batches = self.generate_batches(set_stats=True)
|
||||
return len(batches)
|
||||
|
||||
def efficiency(self):
|
||||
return self.eff_total_used / self.eff_total_slots
|
||||
"""
|
||||
Calculate the packing efficiency (ratio of tokens used to total token slots)
|
||||
Higher is better - 1.0 would mean perfect packing with no wasted space
|
||||
"""
|
||||
if self.total_token_slots == 0:
|
||||
self.generate_batches(set_stats=True)
|
||||
if self.total_token_slots == 0:
|
||||
return 0.0
|
||||
# Return a Python float instead of potentially a numpy float
|
||||
return float(self.total_tokens_used / self.total_token_slots)
|
||||
|
||||
def gather_efficiency(self):
|
||||
def calc_sample_packing_eff_est(estimates: List[float]):
|
||||
LOG.debug(f"sample_packing_eff_est across ranks: {repr(estimates)}")
|
||||
return math.floor(0.997 * max(estimates))
|
||||
"""
|
||||
Gather and synchronize packing efficiency estimates across all distributed ranks
|
||||
Returns a conservative efficiency estimate based on the measurements
|
||||
"""
|
||||
|
||||
def calc_sample_packing_eff_est(estimates: list[float]):
|
||||
LOG.debug(f"sample_packing_eff_est across ranks: {repr(estimates)}")
|
||||
# Use 99.7% of max observed efficiency as a safe estimate
|
||||
max_eff = max(float(eff) for eff in estimates)
|
||||
return math.floor(0.997 * max_eff)
|
||||
|
||||
# Gather efficiency from all ranks and apply the calculation function
|
||||
sample_packing_actual_eff_all = reduce_and_broadcast(
|
||||
lambda: self.efficiency(), # pylint: disable=unnecessary-lambda
|
||||
lambda: float(self.efficiency()), # pylint: disable=unnecessary-lambda
|
||||
calc_sample_packing_eff_est,
|
||||
)
|
||||
|
||||
# Quantize to 0.5% intervals for stability
|
||||
sample_packing_eff_est = (
|
||||
math.ceil(sample_packing_actual_eff_all * 200.0) / 200.0
|
||||
)
|
||||
return sample_packing_eff_est
|
||||
|
||||
def gather_len_batches(self, num):
|
||||
"""
|
||||
Gather and synchronize batch counts across all distributed ranks
|
||||
Returns the minimum number of batches available on any rank
|
||||
"""
|
||||
|
||||
def calc_min_len(estimates: list[(int, float)]):
|
||||
LOG.info(f"gather_len_batches: {repr(estimates)}")
|
||||
return math.floor(min(estimates))
|
||||
|
||||
# Find minimum batch count across ranks to ensure balance
|
||||
min_len_batches = reduce_and_broadcast(lambda: num, calc_min_len)
|
||||
return min_len_batches
|
||||
|
||||
def __len__(self):
|
||||
if not self.len_across_ranks:
|
||||
len_batches = min(
|
||||
[self.num_batches() for _ in range(self.num_count_samples)]
|
||||
"""
|
||||
Return the total number of batches that will be yielded by this sampler
|
||||
|
||||
This is calculated as the minimum number of batches available on any rank
|
||||
to ensure balanced distributed training
|
||||
"""
|
||||
if self._batches is None:
|
||||
self._batches = self.generate_batches(set_stats=True)
|
||||
|
||||
if self.len_across_ranks is None:
|
||||
# Sample multiple times to get stable estimate
|
||||
len_batches = min( # pylint: disable=consider-using-generator
|
||||
[len(self._batches) for _ in range(self.num_count_samples)]
|
||||
)
|
||||
# Gather minimum across all ranks
|
||||
self.len_across_ranks = self.gather_len_batches(len_batches)
|
||||
|
||||
return self.len_across_ranks
|
||||
|
||||
@@ -82,6 +82,7 @@ class AxolotlInputConfig(
|
||||
mean_resizing_embeddings: bool | None = False
|
||||
# optionally shrink the embeddings when the tokenizer vocab size is smaller
|
||||
shrink_embeddings: bool | None = None
|
||||
embeddings_skip_upcast: bool | None = None
|
||||
|
||||
rl: RLType | None = None
|
||||
trl: TRLConfig | None = Field(
|
||||
@@ -435,16 +436,6 @@ class AxolotlInputConfig(
|
||||
)
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_sample_packing_w_xformers(cls, data):
|
||||
if data.get("sample_packing") and data.get("xformers_attention"):
|
||||
raise ValueError(
|
||||
"sample_packing not compatible with xformers_attention. Use flash_attention"
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
# pylint: disable=duplicate-code
|
||||
@@ -471,9 +462,10 @@ class AxolotlInputConfig(
|
||||
and not data.get("flash_attention")
|
||||
and not data.get("sdp_attention")
|
||||
and not data.get("flex_attention")
|
||||
and not data.get("xformers_attention")
|
||||
):
|
||||
LOG.warning(
|
||||
"sample_packing without flash, sdp or flex attention does not handle cross sample decontamination."
|
||||
"sample_packing without flash, sdp, xformers or flex attention does not handle cross sample decontamination."
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
@@ -53,4 +53,5 @@ class CustomSupportedOptimizers(str, Enum):
|
||||
ao_adamw_8bit = "ao_adamw_8bit" # pylint: disable=invalid-name
|
||||
ao_adamw_fp8 = "ao_adamw_fp8" # pylint: disable=invalid-name
|
||||
adopt_adamw = "adopt_adamw" # pylint: disable=invalid-name
|
||||
came_pytorch = "came_pytorch" # pylint: disable=invalid-name
|
||||
muon = "muon" # pylint: disable=invalid-name
|
||||
|
||||
@@ -75,8 +75,10 @@ class HyperparametersConfig(BaseModel):
|
||||
lr_groups: list[LrGroup] | None = None
|
||||
|
||||
adam_epsilon: float | None = None
|
||||
adam_epsilon2: float | None = None
|
||||
adam_beta1: float | None = None
|
||||
adam_beta2: float | None = None
|
||||
adam_beta3: float | None = None
|
||||
max_grad_norm: float | None = None
|
||||
num_epochs: float = Field(default=1.0)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ from datasets import IterableDataset, disable_caching, enable_caching
|
||||
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
|
||||
from transformers.utils import is_torch_bf16_gpu_available
|
||||
|
||||
from axolotl.core.trainer_builder import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
||||
from axolotl.core.trainers.builders import HFCausalTrainerBuilder, HFRLTrainerBuilder
|
||||
from axolotl.monkeypatch.trainer_eval_guard import patch_evaluation_loop_for_fsdp2
|
||||
from axolotl.utils.distributed import reduce_and_broadcast
|
||||
from axolotl.utils.environment import check_cuda_p2p_ib_support
|
||||
@@ -633,8 +633,7 @@ def setup_trainer(
|
||||
peft_config: Optional PEFT (Parameter-Efficient Fine-Tuning) configuration. Default is None.
|
||||
|
||||
Returns:
|
||||
A trainer instance (either `HFRLTrainer` or `HFCausalTrainer`) configured based
|
||||
on the provided parameters.
|
||||
A trainer instance configured based on the provided parameters.
|
||||
"""
|
||||
if (
|
||||
cfg.torch_compile
|
||||
|
||||
@@ -4,6 +4,7 @@ shared pytest fixtures
|
||||
|
||||
import functools
|
||||
import importlib
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
@@ -529,31 +530,32 @@ def dataset_fozziethebeat_alpaca_messages_2k_dpo_test_rev_ea82cff(
|
||||
|
||||
|
||||
# # pylint: disable=redefined-outer-name,unused-argument
|
||||
# def test_load_fixtures(
|
||||
# download_smollm2_135m_model,
|
||||
# download_llama_68m_random_model,
|
||||
# download_qwen_2_5_half_billion_model,
|
||||
# download_tatsu_lab_alpaca_dataset,
|
||||
# download_mhenrichsen_alpaca_2k_dataset,
|
||||
# download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
||||
# download_mlabonne_finetome_100k_dataset,
|
||||
# download_argilla_distilabel_capybara_dpo_7k_binarized_dataset,
|
||||
# download_argilla_ultrafeedback_binarized_preferences_cleaned_dataset,
|
||||
# download_fozzie_alpaca_dpo_dataset,
|
||||
# download_arcee_ai_distilabel_intel_orca_dpo_pairs_dataset,
|
||||
# download_argilla_dpo_pairs_dataset,
|
||||
# download_tiny_shakespeare_dataset,
|
||||
# download_deepseek_model_fixture,
|
||||
# download_huggyllama_model_fixture,
|
||||
# download_llama_1b_model_fixture,
|
||||
# download_llama3_8b_model_fixture,
|
||||
# download_llama3_8b_instruct_model_fixture,
|
||||
# download_phi_35_mini_model_fixture,
|
||||
# download_phi_3_medium_model_fixture,
|
||||
# download_mistral_7b_model_fixture,
|
||||
# download_gemma_2b_model_fixture,
|
||||
# download_gemma2_9b_model_fixture,
|
||||
# download_mlx_mistral_7b_model_fixture,
|
||||
# download_llama2_model_fixture,
|
||||
# ):
|
||||
# pass
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get("AXOLOTL_IS_CI_CACHE_PRELOAD", "-1") != "1",
|
||||
reason="Not running in CI cache preload",
|
||||
)
|
||||
def test_load_fixtures(
|
||||
download_smollm2_135m_model,
|
||||
download_qwen_2_5_half_billion_model,
|
||||
download_tatsu_lab_alpaca_dataset,
|
||||
download_mhenrichsen_alpaca_2k_dataset,
|
||||
download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
||||
download_mlabonne_finetome_100k_dataset,
|
||||
download_argilla_distilabel_capybara_dpo_7k_binarized_dataset,
|
||||
download_arcee_ai_distilabel_intel_orca_dpo_pairs_dataset,
|
||||
download_argilla_dpo_pairs_dataset,
|
||||
download_tiny_shakespeare_dataset,
|
||||
download_deepseek_model_fixture,
|
||||
download_huggyllama_model_fixture,
|
||||
download_llama_1b_model_fixture,
|
||||
download_llama3_8b_model_fixture,
|
||||
download_llama3_8b_instruct_model_fixture,
|
||||
download_phi_35_mini_model_fixture,
|
||||
download_phi_3_medium_model_fixture,
|
||||
download_mistral_7b_model_fixture,
|
||||
download_gemma_2b_model_fixture,
|
||||
download_gemma2_9b_model_fixture,
|
||||
download_mlx_mistral_7b_model_fixture,
|
||||
download_llama2_model_fixture,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
"""
|
||||
unit tests for axolotl.core.trainer_builder
|
||||
"""
|
||||
"""Unit tests for axolotl.core.trainers.builders"""
|
||||
|
||||
import pytest
|
||||
|
||||
from axolotl.core.trainer_builder import HFRLTrainerBuilder
|
||||
from axolotl.core.trainers.builders import HFRLTrainerBuilder
|
||||
from axolotl.utils.config import normalize_config
|
||||
from axolotl.utils.dict import DictDefault
|
||||
from axolotl.utils.models import load_model, load_tokenizer
|
||||
@@ -53,9 +51,7 @@ def fixture_model(cfg, tokenizer):
|
||||
|
||||
|
||||
class TestHFRLTrainerBuilder:
|
||||
"""
|
||||
TestCase class for DPO trainer builder
|
||||
"""
|
||||
"""Test case class for RL trainer builder"""
|
||||
|
||||
def test_build_training_arguments(self, cfg, model, tokenizer):
|
||||
builder = HFRLTrainerBuilder(cfg, model, tokenizer)
|
||||
|
||||
@@ -29,6 +29,12 @@ class LogHooksPlugin(BasePlugin):
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
def post_trainer_create(self, cfg, trainer): # pylint: disable=unused-argument
|
||||
with open(
|
||||
self.base_dir.joinpath("plugin_hooks.log"), "a", encoding="utf-8"
|
||||
) as f:
|
||||
f.write("post_trainer_create\n")
|
||||
|
||||
def pre_model_load(self, cfg): # pylint: disable=unused-argument
|
||||
with open(
|
||||
self.base_dir.joinpath("plugin_hooks.log"), "a", encoding="utf-8"
|
||||
@@ -165,6 +171,7 @@ class TestPluginHooks:
|
||||
) as f:
|
||||
file_contents = f.readlines()
|
||||
file_contents = "\n".join(file_contents)
|
||||
assert "post_trainer_create" in file_contents
|
||||
assert "pre_model_load" in file_contents
|
||||
assert "post_model_build" in file_contents
|
||||
assert "pre_lora_load" in file_contents
|
||||
|
||||
@@ -90,7 +90,7 @@ class TestKnowledgeDistillation:
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/loss", 1.2, "Train Loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -121,5 +121,5 @@ class TestKnowledgeDistillation:
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/loss", 1.2, "Train Loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -479,7 +479,7 @@ class TestMultiGPULlama:
|
||||
"sample_packing": True,
|
||||
"pad_to_sequence_len": True,
|
||||
"sequence_len": 2048,
|
||||
"val_set_size": 0.05,
|
||||
"val_set_size": 0.1,
|
||||
"special_tokens": {
|
||||
"pad_token": "<|endoftext|>",
|
||||
},
|
||||
|
||||
@@ -29,12 +29,12 @@ from axolotl.utils.dict import DictDefault
|
||||
|
||||
MODEL_CONFIGS = [
|
||||
{
|
||||
"name": "openaccess-ai-collective/tiny-mistral",
|
||||
"name": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
||||
"expected_activation": apply_lora_mlp_swiglu,
|
||||
"dtype": torch.float16,
|
||||
},
|
||||
{
|
||||
"name": "Qwen/Qwen2-7B",
|
||||
"name": "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
|
||||
"expected_activation": apply_lora_mlp_swiglu,
|
||||
"dtype": torch.float16,
|
||||
},
|
||||
@@ -44,7 +44,7 @@ MODEL_CONFIGS = [
|
||||
"dtype": torch.float32,
|
||||
},
|
||||
{
|
||||
"name": "mhenrichsen/gemma-2b",
|
||||
"name": "trl-internal-testing/tiny-Gemma2ForCausalLM",
|
||||
"expected_activation": apply_lora_mlp_geglu,
|
||||
"dtype": torch.float16,
|
||||
},
|
||||
@@ -156,7 +156,9 @@ def test_swiglu_mlp_integration(small_llama_model):
|
||||
def test_geglu_model_integration():
|
||||
"""Test GeGLU activation with Gemma model."""
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"mhenrichsen/gemma-2b", torch_dtype=torch.float16, device_map="cuda:0"
|
||||
"trl-internal-testing/tiny-Gemma2ForCausalLM",
|
||||
torch_dtype=torch.float16,
|
||||
device_map="cuda:0",
|
||||
)
|
||||
peft_config = get_peft_config(
|
||||
{
|
||||
|
||||
@@ -57,9 +57,9 @@ class Test4dMultipackLlama(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_torch_fused",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"eval_steps": 10,
|
||||
"max_steps": 5,
|
||||
"save_steps": 3,
|
||||
"eval_steps": 4,
|
||||
"fp16": True,
|
||||
}
|
||||
)
|
||||
@@ -105,9 +105,9 @@ class Test4dMultipackLlama(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_torch_fused",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"eval_steps": 10,
|
||||
"max_steps": 5,
|
||||
"save_steps": 3,
|
||||
"eval_steps": 4,
|
||||
"fp16": True,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -6,6 +6,8 @@ import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
from axolotl.cli.args import TrainerCliArgs
|
||||
from axolotl.common.datasets import load_datasets
|
||||
from axolotl.train import train
|
||||
@@ -23,6 +25,7 @@ class TestFalconPatched(unittest.TestCase):
|
||||
Test case for Falcon models
|
||||
"""
|
||||
|
||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
||||
@with_temp_dir
|
||||
def test_qlora(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
@@ -71,6 +74,7 @@ class TestFalconPatched(unittest.TestCase):
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
||||
@with_temp_dir
|
||||
def test_ft(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
|
||||
@@ -28,7 +28,7 @@ class TestMistral(unittest.TestCase):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "openaccess-ai-collective/tiny-mistral",
|
||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
||||
"flash_attention": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 1024,
|
||||
@@ -57,9 +57,9 @@ class TestMistral(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_torch_fused",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"eval_steps": 10,
|
||||
"max_steps": 5,
|
||||
"save_steps": 3,
|
||||
"eval_steps": 4,
|
||||
"bf16": "auto",
|
||||
}
|
||||
)
|
||||
@@ -76,7 +76,7 @@ class TestMistral(unittest.TestCase):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "openaccess-ai-collective/tiny-mistral",
|
||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
||||
"flash_attention": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 1024,
|
||||
@@ -99,9 +99,9 @@ class TestMistral(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_torch_fused",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"eval_steps": 10,
|
||||
"max_steps": 5,
|
||||
"save_steps": 3,
|
||||
"eval_steps": 4,
|
||||
"bf16": "auto",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -54,9 +54,9 @@ class TestMixtral(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_bnb_8bit",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"eval_steps": 10,
|
||||
"max_steps": 5,
|
||||
"save_steps": 3,
|
||||
"eval_steps": 4,
|
||||
"bf16": "auto",
|
||||
}
|
||||
)
|
||||
@@ -93,9 +93,9 @@ class TestMixtral(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_bnb_8bit",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"save_steps": 10,
|
||||
"eval_steps": 10,
|
||||
"max_steps": 5,
|
||||
"save_steps": 3,
|
||||
"eval_steps": 4,
|
||||
"bf16": "auto",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -56,7 +56,7 @@ class TestModelPatches(unittest.TestCase):
|
||||
def test_mistral_multipack(self, temp_dir):
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "openaccess-ai-collective/tiny-mistral",
|
||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
||||
"flash_attention": True,
|
||||
"sample_packing": True,
|
||||
"sequence_len": 2048,
|
||||
|
||||
63
tests/e2e/patched/test_peft_embeddings.py
Normal file
63
tests/e2e/patched/test_peft_embeddings.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""
|
||||
Test case for handling embeddings when using peft
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
from axolotl.train import setup_model_and_tokenizer
|
||||
from axolotl.utils.config import normalize_config, validate_config
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
|
||||
class TestLlamaPeftEmbeddings:
|
||||
"""
|
||||
test class for handling embeddings when using peft
|
||||
"""
|
||||
|
||||
def test_peft_embeddings_upcast(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||
"load_in_4bit": True,
|
||||
"adapter": "qlora",
|
||||
"lora_r": 8,
|
||||
"lora_alpha": 16,
|
||||
"lora_target_linear": True,
|
||||
"trust_remote_code": True,
|
||||
"sequence_len": 512,
|
||||
"val_set_size": 0.01,
|
||||
"special_tokens": {
|
||||
"pad_token": "<|endoftext|>",
|
||||
},
|
||||
"datasets": [
|
||||
{
|
||||
"path": "mhenrichsen/alpaca_2k_test",
|
||||
"type": "alpaca",
|
||||
},
|
||||
],
|
||||
"num_epochs": 1,
|
||||
"max_steps": 2,
|
||||
"micro_batch_size": 1,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"output_dir": temp_dir,
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_8bit",
|
||||
"lr_scheduler": "cosine",
|
||||
"flash_attention": True,
|
||||
"sample_packing": False,
|
||||
"bf16": "auto",
|
||||
"save_safetensors": True,
|
||||
"embeddings_skip_upcast": True,
|
||||
}
|
||||
)
|
||||
|
||||
cfg = validate_config(cfg)
|
||||
normalize_config(cfg)
|
||||
|
||||
model, _, _, _ = setup_model_and_tokenizer(cfg)
|
||||
|
||||
# Check if the embeddings are upcast correctly
|
||||
# only embed_tokens is a parameter that may be upcast
|
||||
assert model.base_model.model.model.embed_tokens.weight.dtype == torch.bfloat16
|
||||
assert model.base_model.model.lm_head.weight.dtype == torch.bfloat16
|
||||
@@ -56,9 +56,9 @@ class TestPhiMultipack(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_bnb_8bit",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"eval_steps": 10,
|
||||
"save_steps": 10,
|
||||
"max_steps": 5,
|
||||
"eval_steps": 3,
|
||||
"save_steps": 4,
|
||||
"bf16": "auto",
|
||||
}
|
||||
)
|
||||
@@ -108,9 +108,9 @@ class TestPhiMultipack(unittest.TestCase):
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_bnb_8bit",
|
||||
"lr_scheduler": "cosine",
|
||||
"max_steps": 20,
|
||||
"eval_steps": 10,
|
||||
"save_steps": 10,
|
||||
"max_steps": 5,
|
||||
"eval_steps": 3,
|
||||
"save_steps": 4,
|
||||
"bf16": "auto",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ from axolotl.train import train
|
||||
from axolotl.utils.config import normalize_config, validate_config
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
from ..utils import check_model_output_exists, most_recent_subdir
|
||||
from ..utils import check_model_output_exists, most_recent_subdir, require_torch_2_6_0
|
||||
|
||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||
os.environ["WANDB_DISABLED"] = "true"
|
||||
@@ -26,6 +26,7 @@ class TestResumeLlama:
|
||||
Test case for resuming training of llama models
|
||||
"""
|
||||
|
||||
@require_torch_2_6_0
|
||||
def test_resume_lora_packed(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
@@ -62,6 +63,7 @@ class TestResumeLlama:
|
||||
"save_total_limit": 5,
|
||||
"max_steps": 15,
|
||||
"use_tensorboard": True,
|
||||
"save_safetensors": True,
|
||||
}
|
||||
)
|
||||
if is_torch_bf16_gpu_available():
|
||||
|
||||
@@ -19,14 +19,11 @@ class TestE2eEvaluate:
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "JackFram/llama-68m",
|
||||
"tokenizer_type": "LlamaTokenizer",
|
||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||
"sequence_len": 1024,
|
||||
"val_set_size": 0.02,
|
||||
"special_tokens": {
|
||||
"unk_token": "<unk>",
|
||||
"bos_token": "<s>",
|
||||
"eos_token": "</s>",
|
||||
"pad_token": "<|endoftext|>",
|
||||
},
|
||||
"datasets": [
|
||||
{
|
||||
|
||||
@@ -6,6 +6,8 @@ import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
from axolotl.cli.args import TrainerCliArgs
|
||||
from axolotl.common.datasets import load_datasets
|
||||
from axolotl.train import train
|
||||
@@ -23,6 +25,7 @@ class TestFalcon(unittest.TestCase):
|
||||
Test case for falcon
|
||||
"""
|
||||
|
||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
||||
@with_temp_dir
|
||||
def test_lora(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
@@ -74,6 +77,7 @@ class TestFalcon(unittest.TestCase):
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
||||
@with_temp_dir
|
||||
def test_lora_added_vocab(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
@@ -129,6 +133,7 @@ class TestFalcon(unittest.TestCase):
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
||||
@with_temp_dir
|
||||
def test_ft(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
"""
|
||||
test module to import various submodules that have historically broken due to dependency issues
|
||||
"""Test module to import various submodules that have historically broken due to
|
||||
dependency issues.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
|
||||
|
||||
class TestImports(unittest.TestCase):
|
||||
"""
|
||||
Test class to import various submodules that have historically broken due to dependency issues
|
||||
"""Test class to import various submodules that have historically broken due to
|
||||
dependency issues.
|
||||
"""
|
||||
|
||||
def test_import_causal_trainer(self):
|
||||
from axolotl.core.trainer_builder import ( # pylint: disable=unused-import # noqa: F401
|
||||
from axolotl.core.trainers.builders import ( # pylint: disable=unused-import # noqa: F401
|
||||
HFCausalTrainerBuilder,
|
||||
)
|
||||
|
||||
def test_import_rl_trainer(self):
|
||||
from axolotl.core.trainer_builder import ( # pylint: disable=unused-import # noqa: F401
|
||||
from axolotl.core.trainers.builders import ( # pylint: disable=unused-import # noqa: F401
|
||||
HFRLTrainerBuilder,
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ class TestMistral(unittest.TestCase):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "openaccess-ai-collective/tiny-mistral",
|
||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
||||
"flash_attention": True,
|
||||
"sequence_len": 1024,
|
||||
"load_in_8bit": True,
|
||||
@@ -77,7 +77,7 @@ class TestMistral(unittest.TestCase):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "openaccess-ai-collective/tiny-mistral",
|
||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
||||
"flash_attention": True,
|
||||
"sequence_len": 1024,
|
||||
"val_set_size": 0.02,
|
||||
|
||||
@@ -199,3 +199,50 @@ class TestCustomOptimizers(unittest.TestCase):
|
||||
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
@with_temp_dir
|
||||
def test_came_pytorch(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "JackFram/llama-68m",
|
||||
"tokenizer_type": "LlamaTokenizer",
|
||||
"sequence_len": 1024,
|
||||
"load_in_8bit": True,
|
||||
"adapter": "lora",
|
||||
"lora_r": 8,
|
||||
"lora_alpha": 16,
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.1,
|
||||
"special_tokens": {
|
||||
"unk_token": "<unk>",
|
||||
"bos_token": "<s>",
|
||||
"eos_token": "</s>",
|
||||
},
|
||||
"datasets": [
|
||||
{
|
||||
"path": "mhenrichsen/alpaca_2k_test",
|
||||
"type": "alpaca",
|
||||
},
|
||||
],
|
||||
"num_epochs": 1,
|
||||
"micro_batch_size": 8,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"output_dir": temp_dir,
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "came_pytorch",
|
||||
"adam_beta3": 0.9999,
|
||||
"adam_epsilon2": 1e-16,
|
||||
"max_steps": 5,
|
||||
"lr_scheduler": "cosine",
|
||||
}
|
||||
)
|
||||
|
||||
cfg = validate_config(cfg)
|
||||
normalize_config(cfg)
|
||||
cli_args = TrainerCliArgs()
|
||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
@@ -414,7 +414,6 @@ class TestDatasetPreparation:
|
||||
snapshot_path = snapshot_download(
|
||||
repo_id="mhenrichsen/alpaca_2k_test",
|
||||
repo_type="dataset",
|
||||
local_dir=tmp_ds_path,
|
||||
)
|
||||
shutil.copytree(snapshot_path, tmp_ds_path, dirs_exist_ok=True)
|
||||
|
||||
|
||||
@@ -106,3 +106,4 @@ class TestBatchedSamplerPacking:
|
||||
|
||||
original_idxs = set(range(len(train_dataset)))
|
||||
assert original_idxs == set(batch_idxs)
|
||||
assert len(batch_idxs) == len(set(batch_idxs))
|
||||
|
||||
Reference in New Issue
Block a user