Compare commits
8 Commits
attn-imple
...
scattermoe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42922f8f8b | ||
|
|
7041592ca7 | ||
|
|
fec0c3a99e | ||
|
|
31d8d068bb | ||
|
|
66fea258c7 | ||
|
|
07ff389be8 | ||
|
|
2dcca15f65 | ||
|
|
c5db90aa3f |
5
.github/CONTRIBUTING.md
vendored
5
.github/CONTRIBUTING.md
vendored
@@ -31,10 +31,7 @@ PRs are **greatly welcome**!
|
||||
|
||||
Please run below to setup env
|
||||
```bash
|
||||
# Install axolotl + dev and test dependencies from lockfile
|
||||
export UV_TORCH_BACKEND=cu128 # or cu130
|
||||
uv sync --extra flash-attn --extra deepspeed --group dev --group test
|
||||
source .venv/bin/activate
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
pre-commit install
|
||||
|
||||
# test
|
||||
|
||||
16
.github/workflows/base.yml
vendored
16
.github/workflows/base.yml
vendored
@@ -30,6 +30,14 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: "128"
|
||||
cuda_version: 12.8.1
|
||||
cudnn_version: ""
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
dockerfile: "Dockerfile-base"
|
||||
platforms: "linux/amd64"
|
||||
- cuda: "128"
|
||||
cuda_version: 12.8.1
|
||||
cudnn_version: ""
|
||||
@@ -152,6 +160,14 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: "128"
|
||||
cuda_version: 12.8.1
|
||||
cudnn_version: ""
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
dockerfile: "Dockerfile-uv-base"
|
||||
platforms: "linux/amd64"
|
||||
- cuda: "128"
|
||||
cuda_version: 12.8.1
|
||||
cudnn_version: ""
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- '**.py'
|
||||
- 'pyproject.toml'
|
||||
- 'requirements.txt'
|
||||
- '.github/workflows/*.yml'
|
||||
- "*.[q]md"
|
||||
- "examples/**/*.y[a]?ml"
|
||||
|
||||
12
.github/workflows/main.yml
vendored
12
.github/workflows/main.yml
vendored
@@ -18,6 +18,12 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
axolotl_extras:
|
||||
platforms: "linux/amd64"
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
@@ -180,6 +186,12 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
axolotl_extras:
|
||||
platforms: "linux/amd64"
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
|
||||
41
.github/workflows/multi-gpu-e2e.yml
vendored
41
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -3,15 +3,17 @@ name: docker-multigpu-tests-biweekly
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tests/e2e/multigpu/**.py"
|
||||
- "pyproject.toml"
|
||||
- ".github/workflows/multi-gpu-e2e.yml"
|
||||
- "scripts/cutcrossentropy_install.py"
|
||||
- "src/axolotl/core/trainers/mixins/sequence_parallel.py"
|
||||
- "src/axolotl/utils/distributed.py"
|
||||
- 'tests/e2e/multigpu/**.py'
|
||||
- 'requirements.txt'
|
||||
- 'setup.py'
|
||||
- 'pyproject.toml'
|
||||
- '.github/workflows/multi-gpu-e2e.yml'
|
||||
- 'scripts/cutcrossentropy_install.py'
|
||||
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||
- 'src/axolotl/utils/distributed.py'
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * 1,4" # Runs at 00:00 UTC every monday & thursday
|
||||
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
||||
|
||||
# Cancel jobs on the same ref if a new one is triggered
|
||||
concurrency:
|
||||
@@ -31,19 +33,25 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# - cuda: 129
|
||||
# cuda_version: 12.9.1
|
||||
# python_version: "3.12"
|
||||
# pytorch: 2.9.1
|
||||
# axolotl_extras: "fbgemm-gpu"
|
||||
# num_gpus: 2
|
||||
# dockerfile: "Dockerfile-uv.jinja"
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
axolotl_extras: fbgemm-gpu
|
||||
num_gpus: 2
|
||||
# - cuda: 129
|
||||
# cuda_version: 12.9.1
|
||||
# python_version: "3.12"
|
||||
# pytorch: 2.9.1
|
||||
# axolotl_extras: "fbgemm-gpu"
|
||||
# num_gpus: 2
|
||||
# dockerfile: "Dockerfile-uv.jinja"
|
||||
- cuda: 130
|
||||
cuda_version: 13.0.0
|
||||
python_version: "3.11"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras:
|
||||
# axolotl_extras: fbgemm-gpu
|
||||
# axolotl_extras: fbgemm-gpu
|
||||
num_gpus: 2
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
@@ -51,6 +59,7 @@ jobs:
|
||||
pytorch: 2.10.0
|
||||
axolotl_extras: "fbgemm-gpu"
|
||||
num_gpus: 2
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
runs-on: [self-hosted, modal]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
@@ -72,7 +81,7 @@ jobs:
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile-uv.jinja'}}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
10
.github/workflows/nightlies.yml
vendored
10
.github/workflows/nightlies.yml
vendored
@@ -15,6 +15,11 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
axolotl_extras:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
@@ -62,6 +67,11 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
axolotl_extras:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
|
||||
13
.github/workflows/pypi.yml
vendored
13
.github/workflows/pypi.yml
vendored
@@ -8,9 +8,6 @@ on:
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
UV_SYSTEM_PYTHON: "1"
|
||||
|
||||
jobs:
|
||||
setup_release:
|
||||
name: Create Release
|
||||
@@ -44,15 +41,11 @@ jobs:
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v7
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install wheel packaging
|
||||
uv pip install --no-build-isolation -e .
|
||||
uv pip install black mypy pre-commit types-requests quartodoc jupyter blobfile tiktoken \
|
||||
codecov codecov-cli pytest pytest-cov pytest-retry pytest-sugar pytest-xdist tbparse
|
||||
pip3 install wheel packaging==26.0
|
||||
pip3 install --no-build-isolation -e .
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: Extract tag name
|
||||
id: tag
|
||||
|
||||
57
.github/workflows/tests-nightly.yml
vendored
57
.github/workflows/tests-nightly.yml
vendored
@@ -2,18 +2,15 @@ name: Tests Nightly against upstream main
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # Runs at 00:00 UTC every day
|
||||
- cron: '0 0 * * *' # Runs at 00:00 UTC every day
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- ".github/workflows/tests-nightly.yml"
|
||||
- '.github/workflows/tests-nightly.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
UV_SYSTEM_PYTHON: "1"
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
name: pre-commit
|
||||
@@ -23,7 +20,7 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: "pip" # caching pip dependencies
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
@@ -46,8 +43,8 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_version: ["3.12"] # TODO include py3.14 once https://github.com/mistralai/mistral-common/pull/194 is merged
|
||||
pytorch_version: ["2.9.1", "2.10.0"]
|
||||
python_version: ["3.12"] # TODO include py3.14 once https://github.com/mistralai/mistral-common/pull/194 is merged
|
||||
pytorch_version: ["2.8.0", "2.9.1", "2.10.0"]
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
@@ -64,34 +61,36 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v7
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==26.0 setuptools==78.1.1 wheel
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
uv pip install torch==${{ matrix.pytorch_version }} torchvision
|
||||
uv pip freeze | grep -E "^(torch|torchvision)==" > /tmp/torch-pin.txt
|
||||
pip3 install torch==${{ matrix.pytorch_version }} torchvision
|
||||
|
||||
- name: Update requirements.txt
|
||||
run: |
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt
|
||||
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --no-build-isolation -e . --override /tmp/torch-pin.txt
|
||||
python scripts/cutcrossentropy_install.py --uv | sh
|
||||
uv pip install black mypy pre-commit types-requests quartodoc jupyter blobfile tiktoken \
|
||||
codecov codecov-cli pytest pytest-cov pytest-retry pytest-sugar pytest-xdist tbparse
|
||||
|
||||
- name: Override with nightly HF packages
|
||||
run: |
|
||||
uv pip install --no-deps \
|
||||
"transformers @ git+https://github.com/huggingface/transformers.git@main" \
|
||||
"peft @ git+https://github.com/huggingface/peft.git@main" \
|
||||
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
||||
"trl @ git+https://github.com/huggingface/trl.git@main" \
|
||||
"datasets @ git+https://github.com/huggingface/datasets.git@main"
|
||||
pip3 show torch
|
||||
pip3 install --no-build-isolation -U -e .
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__, f'Expected torch ${{ matrix.pytorch_version }} but got {torch.__version__}'"
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||
|
||||
- name: Ensure axolotl CLI was installed
|
||||
run: |
|
||||
@@ -103,6 +102,9 @@ jobs:
|
||||
pytest -v --durations=10 tests/patched/
|
||||
pytest -v --durations=10 tests/cli/
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
docker-e2e-tests:
|
||||
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||
@@ -134,6 +136,7 @@ jobs:
|
||||
pytorch: 2.9.1
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
nightly_build: "true"
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -154,7 +157,7 @@ jobs:
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile-uv.jinja'}}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
|
||||
126
.github/workflows/tests.yml
vendored
126
.github/workflows/tests.yml
vendored
@@ -6,19 +6,21 @@ on:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "pyproject.toml"
|
||||
- ".github/workflows/*.yml"
|
||||
- "cicd/cicd.sh"
|
||||
- "cicd/Dockerfile-uv.jinja"
|
||||
- '**.py'
|
||||
- 'requirements.txt'
|
||||
- '.github/workflows/*.yml'
|
||||
- 'requirements-tests.txt'
|
||||
- 'cicd/cicd.sh'
|
||||
- 'cicd/Dockerfile.jinja'
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- "**.py"
|
||||
- "pyproject.toml"
|
||||
- ".github/workflows/*.yml"
|
||||
- "cicd/cicd.sh"
|
||||
- "cicd/Dockerfile-uv.jinja"
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- '**.py'
|
||||
- 'requirements.txt'
|
||||
- '.github/workflows/*.yml'
|
||||
- 'requirements-tests.txt'
|
||||
- 'cicd/cicd.sh'
|
||||
- 'cicd/Dockerfile.jinja'
|
||||
workflow_dispatch:
|
||||
|
||||
# Cancel jobs on the same ref if a new one is triggered
|
||||
@@ -31,7 +33,6 @@ permissions:
|
||||
|
||||
env:
|
||||
TRANSFORMERS_IS_CI: "yes"
|
||||
UV_SYSTEM_PYTHON: "1"
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
@@ -43,7 +44,7 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: "pip" # caching pip dependencies
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
@@ -67,11 +68,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_version: ["3.12", "3.14"]
|
||||
pytorch_version: ["2.9.1", "2.10.0"]
|
||||
exclude:
|
||||
- python_version: "3.14"
|
||||
pytorch_version: "2.9.1"
|
||||
python_version: ["3.12"] # TODO include py3.14 once https://github.com/mistralai/mistral-common/pull/194 is merged
|
||||
pytorch_version: ["2.8.0", "2.9.1", "2.10.0"]
|
||||
# exclude:
|
||||
# - python_version: "3.14"
|
||||
# pytorch_version: "2.8.0"
|
||||
# - python_version: "3.14"
|
||||
# pytorch_version: "2.9.1"
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
@@ -93,25 +96,32 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v7
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==26.0 setuptools==75.8.0 wheel
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
uv pip install torch==${{ matrix.pytorch_version }} torchvision
|
||||
uv pip freeze | grep -E "^(torch|torchvision)==" > /tmp/torch-pin.txt
|
||||
pip3 install --no-cache-dir torch==${{ matrix.pytorch_version }} torchvision
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install --no-build-isolation -e . --override /tmp/torch-pin.txt
|
||||
python scripts/cutcrossentropy_install.py --uv | sh
|
||||
uv pip install black mypy pre-commit types-requests quartodoc jupyter blobfile tiktoken \
|
||||
codecov codecov-cli pytest pytest-cov pytest-retry pytest-sugar pytest-xdist tbparse
|
||||
pip3 show torch
|
||||
pip3 install --no-cache-dir --no-build-isolation -U -e .
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__, f'Expected torch ${{ matrix.pytorch_version }} but got {torch.__version__}'"
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||
|
||||
- name: Ensure axolotl CLI was installed
|
||||
run: |
|
||||
@@ -154,11 +164,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_version: ["3.12", "3.14"]
|
||||
pytorch_version: ["2.9.1", "2.10.0"]
|
||||
exclude:
|
||||
- python_version: "3.14"
|
||||
pytorch_version: "2.9.1"
|
||||
python_version: ["3.12"] # TODO include py3.14 once https://github.com/mistralai/mistral-common/pull/194 is merged
|
||||
pytorch_version: ["2.8.0", "2.9.1", "2.10.0"]
|
||||
# exclude:
|
||||
# - python_version: "3.14"
|
||||
# pytorch_version: "2.8.0"
|
||||
# - python_version: "3.14"
|
||||
# pytorch_version: "2.9.1"
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
@@ -180,42 +192,38 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v7
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==26.0 setuptools==75.8.0 setuptools_scm build wheel psutil
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
uv pip install torch==${{ matrix.pytorch_version }} torchvision
|
||||
uv pip freeze | grep -E "^(torch|torchvision)==" > /tmp/torch-pin.txt
|
||||
pip3 install --no-cache-dir torch==${{ matrix.pytorch_version }} torchvision
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv pip install packaging setuptools_scm build wheel psutil
|
||||
pip3 show torch
|
||||
python -m build --no-isolation --sdist
|
||||
uv pip install --no-build-isolation dist/axolotl*.tar.gz --override /tmp/torch-pin.txt
|
||||
python scripts/cutcrossentropy_install.py --uv | sh
|
||||
uv pip install black mypy pre-commit types-requests quartodoc jupyter blobfile tiktoken \
|
||||
codecov codecov-cli pytest pytest-cov pytest-retry pytest-sugar pytest-xdist tbparse
|
||||
pip3 install --no-cache-dir --no-build-isolation dist/axolotl*.tar.gz
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__, f'Expected torch ${{ matrix.pytorch_version }} but got {torch.__version__}'"
|
||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||
|
||||
- name: Ensure axolotl CLI was installed
|
||||
run: |
|
||||
axolotl --help
|
||||
|
||||
- name: Verify agent docs are discoverable
|
||||
run: |
|
||||
# Agent docs live in docs/agents/ (source of truth) and are resolved
|
||||
# at runtime from the repo checkout or via `axolotl fetch docs`
|
||||
axolotl agent-docs --list
|
||||
axolotl agent-docs | grep -q "Fine-tuning framework"
|
||||
axolotl agent-docs grpo | grep -q "GRPO"
|
||||
axolotl agent-docs sft | grep -q "SFT"
|
||||
python -c "from axolotl.cli.agent_docs import get_doc, list_topics; assert len(list_topics()) >= 5; assert 'GRPO' in get_doc('grpo')"
|
||||
|
||||
- name: Show HF cache
|
||||
run: hf cache ls
|
||||
|
||||
@@ -277,6 +285,7 @@ jobs:
|
||||
pytorch: 2.9.1
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -297,7 +306,7 @@ jobs:
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile-uv.jinja'}}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -320,6 +329,13 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
num_gpus: 1
|
||||
gpu_type: "B200"
|
||||
axolotl_extras: fbgemm-gpu
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
@@ -359,7 +375,7 @@ jobs:
|
||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "GPU_TYPE=${{ matrix.gpu_type || 'L40S'}}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile-uv.jinja'}}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
@@ -11,7 +11,7 @@ repos:
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.15.8
|
||||
rev: v0.15.4
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
|
||||
99
AGENTS.md
99
AGENTS.md
@@ -1,99 +0,0 @@
|
||||
# Axolotl
|
||||
|
||||
Fine-tuning framework for LLMs. Config-driven: every training run is defined by a single YAML file.
|
||||
|
||||
## Tech Stack
|
||||
|
||||
Python, PyTorch, HuggingFace Transformers, TRL, PEFT (LoRA/QLoRA), DeepSpeed, FSDP, vLLM (for GRPO generation).
|
||||
|
||||
## Commands
|
||||
|
||||
```bash
|
||||
axolotl train config.yaml # Train (single or multi-GPU, auto-detected)
|
||||
axolotl preprocess config.yaml # Tokenize dataset and validate config
|
||||
axolotl preprocess config.yaml --debug # Inspect tokenized samples and label masking
|
||||
axolotl inference config.yaml # Interactive inference
|
||||
axolotl merge-lora config.yaml # Merge LoRA adapter into base model
|
||||
axolotl vllm-serve config.yaml # Start vLLM server for GRPO/EBFT training
|
||||
axolotl fetch examples # Download example configs
|
||||
axolotl agent-docs # Show agent-optimized docs (bundled with pip package)
|
||||
axolotl agent-docs grpo # Topic-specific agent reference
|
||||
axolotl config-schema # Dump config JSON schema
|
||||
```
|
||||
|
||||
## Training Methods
|
||||
|
||||
| Method | Config Key | When to Use |
|
||||
|--------|-----------|-------------|
|
||||
| SFT | *(default)* | Input-output pairs, instruction tuning |
|
||||
| DPO/IPO | `rl: dpo` / `rl: dpo, dpo_loss_type: ["ipo"]` | Paired preference data (chosen vs rejected) |
|
||||
| KTO | `rl: kto` | Unpaired binary preference labels |
|
||||
| ORPO | `rl: orpo` | Single-stage alignment, no ref model |
|
||||
| GRPO | `rl: grpo` | RL with verifiable reward functions (math, code) |
|
||||
| EBFT | `rl: ebft` | Feature-matching rewards from internal representations |
|
||||
|
||||
Agent-specific references:
|
||||
- [docs/agents/sft.md](docs/agents/sft.md) — supervised fine-tuning
|
||||
- [docs/agents/preference_tuning.md](docs/agents/preference_tuning.md) — DPO, IPO, KTO, ORPO, SimPO
|
||||
- [docs/agents/grpo.md](docs/agents/grpo.md) — GRPO online RL with reward functions
|
||||
- [docs/agents/reward_modelling.md](docs/agents/reward_modelling.md) — outcome and process reward models
|
||||
- [docs/agents/pretraining.md](docs/agents/pretraining.md) — continual pretraining
|
||||
- [docs/agents/model_architectures.md](docs/agents/model_architectures.md) — model-specific quirks (Gemma4, Qwen3.5 MoE, etc.)
|
||||
- [docs/agents/new_model_support.md](docs/agents/new_model_support.md) — debugging and adding support for new model architectures
|
||||
|
||||
## Config Pattern
|
||||
|
||||
All training is config-driven. A YAML file specifies model, adapter, dataset(s), and hyperparameters:
|
||||
|
||||
```yaml
|
||||
base_model: meta-llama/Llama-3.1-8B-Instruct
|
||||
adapter: lora # or qlora, or omit for full fine-tune
|
||||
datasets:
|
||||
- path: my_dataset
|
||||
type: chat_template # prompt strategy (see docs/dataset-formats/)
|
||||
output_dir: ./outputs/lora-out
|
||||
```
|
||||
|
||||
Config schema: `src/axolotl/utils/schemas/config.py` (AxolotlInputConfig).
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
src/axolotl/
|
||||
cli/ # CLI entry points (train, preprocess, inference, merge_lora, vllm_serve)
|
||||
core/
|
||||
builders/ # TrainerBuilder classes (causal.py for SFT, rl.py for RLHF)
|
||||
trainers/ # Trainer classes, mixins (optimizer, scheduler, packing)
|
||||
dpo/ # DPO trainer and config
|
||||
grpo/ # GRPO trainer and sampler
|
||||
loaders/ # Model, tokenizer, adapter, processor loading
|
||||
prompt_strategies/ # Dataset format handlers (chat_template, alpaca, dpo/, kto/, orpo/)
|
||||
utils/schemas/ # Pydantic config schemas (config, model, training, peft, trl, fsdp)
|
||||
integrations/ # Plugins (liger, cut_cross_entropy, swanlab, nemo_gym)
|
||||
monkeypatch/ # Runtime patches for HF transformers
|
||||
|
||||
examples/ # Example YAML configs by model (llama-3/, qwen2/, mistral/, ebft/)
|
||||
deepspeed_configs/ # DeepSpeed JSON configs (zero2, zero3)
|
||||
docs/ # Quarto documentation site
|
||||
```
|
||||
|
||||
## Code Conventions
|
||||
|
||||
- Config-driven: features are toggled via YAML, not code changes
|
||||
- Prompt strategies: `src/axolotl/prompt_strategies/` — each `type:` value maps to a function
|
||||
- Plugin system: `plugins:` list in config loads integration modules
|
||||
- Trainer mixins: `core/trainers/mixins/` for composable trainer behaviors
|
||||
- Schemas: all config validation via Pydantic in `utils/schemas/`
|
||||
|
||||
## Key Documentation
|
||||
|
||||
- [Getting Started](docs/getting-started.qmd) — quickstart tutorial
|
||||
- [Choosing a Method](docs/choosing_method.qmd) — SFT vs DPO vs GRPO decision guide
|
||||
- [Config Reference](docs/config-reference.qmd) — all config options
|
||||
- [Dataset Formats](docs/dataset-formats/) — chat_template, alpaca, input_output, completion
|
||||
- [RLHF](docs/rlhf.qmd) — DPO, KTO, ORPO, GRPO, EBFT configs and dataset formats
|
||||
- [GRPO Deep Dive](docs/grpo.qmd) — async training, custom rewards, scaling
|
||||
- [vLLM Serving](docs/vllm_serving.qmd) — vLLM setup for GRPO/EBFT
|
||||
- [Multi-GPU](docs/multi-gpu.qmd) — FSDP and DeepSpeed
|
||||
- [Training Stability](docs/training_stability.qmd) — debugging loss, NaN, OOM
|
||||
- [Debugging](docs/debugging.qmd) — VSCode setup, Docker debugging
|
||||
@@ -1,142 +0,0 @@
|
||||
# `attn-implementation-refactor` branch review
|
||||
|
||||
Review target: `attn-implementation-refactor` (5 commits ahead of main, merge base `69904781`).
|
||||
Scope: 16 files, +682 / −96.
|
||||
|
||||
## 1. What the branch is trying to do
|
||||
|
||||
Collapse seven boolean attention flags (`flash_attention`, `sdp_attention`, `xformers_attention`, `flex_attention`, `sage_attention`, `s2_attention`, `eager_attention`) into a single `attn_implementation` field, with derived capability flags (`attn_supports_packing`, `attn_uses_flash_lib`, `attn_needs_dtype_cast`) for the gates that used to be ad-hoc OR-chains.
|
||||
|
||||
Mechanism: `normalize_attn_implementation` (a `@model_validator(mode="before")` on `AxolotlInputConfig`) maps bidirectionally between the new field and the legacy flags, with a priority list for legacy combos (`s2 + flash → s2`), and then computes the three capability flags from frozen sets in `enums.py`.
|
||||
|
||||
Adjacent changes: `xformers` and `sage` now register as their own entries in `ALL_ATTENTION_FUNCTIONS` (with FA2 mask behavior) instead of stomping the `flash_attention_2` slot. New `fp8` backend wires `torchao.prototype.attention.apply_low_precision_attention` in `apply_post_model_load_patches`.
|
||||
|
||||
## 2. Target design
|
||||
|
||||
**`cfg.attn_implementation` is the single source of truth on the validated config.**
|
||||
|
||||
- Its type is `str | None`. Accepted values are **canonical names only** — no short-form aliases:
|
||||
- HF-native: `eager`, `sdpa`, `flash_attention_2`, `flash_attention_3`, `flex_attention`. (`flash_attention_3` is net-new to axolotl — the current branch only encodes `flash_attention_2` under the short name `flash`.)
|
||||
- Axolotl-owned (registered into `ALL_ATTENTION_FUNCTIONS` under exactly these names): `xformers`, `sage`, `s2`, `fp8`.
|
||||
- Hub-kernel paths: `kernels-community/sage-attention`, `kernels-community/flash-attn3`, etc. — passthrough. Known-kernel allowlist in `enums.py` classifies the common ones into the capability tables.
|
||||
Short forms like `flash`, `fa2`, `fa3`, `sdp`, `flex` are rejected (Pydantic validation error with a pointer to the canonical name).
|
||||
- `model.py:_set_attention_config` passes `cfg.attn_implementation` to HF verbatim — no `_ATTN_IMPL_TO_HF` translation dict needed.
|
||||
- Legacy booleans (`flash_attention: true`, `sdp_attention: true`, …) are the **only** input aliases, kept for backwards compatibility. The normalizer maps them to the canonical `attn_implementation` value, emits a one-time `DeprecationWarning` per flag, and removes them from `data` so they're never readable on the validated `cfg`. `deprecated=True` on each Field surfaces this in JSON schema. Mapping is 1:1 with the current legacy-flag semantics (`flash_attention → flash_attention_2`, `sdp_attention → sdpa`, `flex_attention → flex_attention`, `xformers_attention → xformers`, `sage_attention → sage`, `s2_attention → s2`, `eager_attention → eager`).
|
||||
- Capability flags (`attn_supports_packing`, `attn_uses_flash_lib`, `attn_needs_dtype_cast`) are **`@computed_field` on the model**, not settable inputs. Lookup is keyed by the canonical `attn_implementation` string.
|
||||
- Unknown / user-supplied strings (custom hub kernels) pass through to HF but get **conservative capability defaults** (packing=False, flash-lib=False, dtype-cast=True). Known hub kernels axolotl can classify live in a small allowlist.
|
||||
- Downstream consumers read *only* `cfg.attn_implementation` and the capability flags. No `cfg.flash_attention`, `cfg.xformers_attention`, etc. anywhere in `src/`.
|
||||
|
||||
This is strictly what the branch is already *trying* to do — the gaps below are places it hasn't landed that goal yet.
|
||||
|
||||
## 3. Gaps and holes
|
||||
|
||||
### A. Legacy flags are still parallel state, not input-only
|
||||
|
||||
1. The normalizer *sets* the legacy flags on `data` (`impl_to_flag[attn_impl]` branch). It does not delete them. So `cfg.flash_attention` is still truthy after validation, and downstream code still reads it (see G).
|
||||
2. Short-form enum values (`flash`, `sdpa`, `fp8`) are persisted as-is on `cfg.attn_implementation`, which is why `model.py` needs `_ATTN_IMPL_TO_HF` to translate before passing to HF. Source-of-truth implies canonicalize at normalize-time, not translate at consume-time.
|
||||
3. Legacy flag + `attn_implementation` (consistent combo, e.g. `attn_implementation: flash + flash_attention: true`) emits no deprecation warning — only legacy-only path warns.
|
||||
4. Legacy Field descriptions (`xformers_attention`, `sdp_attention`, etc.) don't have `deprecated=True`, so JSON schema still advertises them as first-class.
|
||||
|
||||
### B. Validators that still only check the legacy flag
|
||||
|
||||
5. **`check_ebft_activation_offloading`** (`validation.py:1607-1619`) reads only `data.get("flex_attention")`. Users on `attn_implementation: flex_attention` bypass the incompatibility check.
|
||||
6. **`check_sample_packing_without_attention`** (`validation.py:188-203`) early-returns when `attn_implementation` is set but never validates the chosen backend actually supports packing. `attn_implementation: eager + sample_packing: true` silently passes; the old legacy-flag check warned.
|
||||
|
||||
### C. Non-enum strings fall through the capability tables
|
||||
|
||||
7. **HF-native `"flash_attention_2"`** is neither in `impl_to_flag` nor `FLASH_ATTN_LIB_IMPLS`. A user copy-pasting from HF docs gets `attn_uses_flash_lib=False`, silently disabling FA4 auto-apply, LLaMA flash hijack, `_patch_attention` (btlm, stablelm_epoch, mistral3, llava), and `_apply_flash_attention_peft_patches`.
|
||||
8. **Hub kernel strings** (`kernels-community/flash-attn3`, `kernels-community/sage-attention`) default to `attn_supports_packing=True` (silently enters multipack with varlen `position_ids` — correctness depends on the kernel honoring them) and `attn_uses_flash_lib=False` (so `context_parallel_size > 1` raises "requires flash attention" even for FA3 hub kernels).
|
||||
9. **Conflict trap for hub-kernel + legacy flag** (`config.py:1414-1419`): `attn_implementation: kernels-community/flash-attn3 + flash_attention: true` always raises, because `impl_to_flag.get(custom) is None` and the loop treats `flag != None` as conflict. Common combo in existing YAMLs breaks hard on upgrade.
|
||||
|
||||
### D. Silent behaviour change for xformers
|
||||
|
||||
10. Old `_apply_flash_attention_patches` did `self.cfg.flash_attention = True` for `xformers + sample_packing`. The new version doesn't, and xformers is not in `FLASH_ATTN_LIB_IMPLS`. Consumers that keyed off `cfg.flash_attention` now see falsy for xformers, silently dropping `_patch_attention` (btlm / stablelm_epoch+packing / mistral3 / llava model-type FA patches). Some of this is arguably correct cleanup (xformers has its own HF registry entry now), but the btlm/stablelm/mistral3 regression is not called out and not tested. Decide consciously, not by omission.
|
||||
|
||||
### E. Capability flags are writable Pydantic fields, not computed
|
||||
|
||||
11. `attn_supports_packing`, `attn_uses_flash_lib`, `attn_needs_dtype_cast` are declared `bool | None = Field(default=None)` on `AxolotlInputConfig`. YAML is not rejected — a user can set `attn_uses_flash_lib: true` and override the normalizer.
|
||||
|
||||
### F. Validator ordering (not covered by tests)
|
||||
|
||||
12. `AttentionValidationMixin.check_attention_fields` (inherited, `mode="before"`) and `normalize_attn_implementation` (subclass, `mode="before"`) both run during `model_validator` phase. Pydantic MRO may run the inherited one first. For legacy-only `s2_attention: true + flash_attention: true` (the test `test_s2_plus_flash_maps_to_s2` asserts this maps to `s2`), the inherited check may raise "multiple attention implementations set" before the normalizer runs. The test calls the classmethod directly and does not build the model, so this is unverified either way.
|
||||
|
||||
### G. Remaining legacy reads in `src/`
|
||||
|
||||
13. `src/axolotl/integrations/lm_eval/cli.py:120` reads `cfg.flash_attention`. Works for `attn_implementation=flash` only.
|
||||
14. `tests/e2e/multigpu/test_llama.py:524-526` writes `cfg.flash_attention = True` / `cfg.flex_attention = True`. Stale pattern.
|
||||
15. Dual-check idioms in `config.py` (lines 1464, 1478, 1570, 1586, 1774) and `validation.py` (198, 209, 221, 850, 1586, 1611) — `data.get("x_attention") or data.get("attn_implementation") == "x"` — are redundant once legacy flags are input-only; remove them.
|
||||
|
||||
### H. fp8 operational risk
|
||||
|
||||
16. The `fp8` docstring documents hard requirements (PyTorch ≥ 2.11, SM90+, flash-attn with FA3, torchao ≥ 0.17.0) and a runtime constraint (`config.use_cache = False`). None are validated — misconfig surfaces as a torchao runtime error. `xformers` and `sage` availability/compute-capability guards exist; `fp8` should match.
|
||||
|
||||
### I. Test coverage gaps
|
||||
|
||||
17. `test_attn_implementation.py` exercises the classmethod in isolation plus the constant sets. It does **not**:
|
||||
- Build a full `AxolotlInputConfig(**data)` (so validator ordering — item 12 — is untested).
|
||||
- Verify capability flags can't be overridden from YAML (item 11).
|
||||
- Cover `check_sample_packing_without_attention` with `attn_implementation: eager` (item 6).
|
||||
- Cover `check_ebft_activation_offloading` with `attn_implementation: flex_attention` (item 5).
|
||||
- Cover hub-kernel + legacy flag combo (item 9).
|
||||
- Cover `flash_attention_2` canonicalization (item 7).
|
||||
|
||||
## 4. Fix plan
|
||||
|
||||
Four phases, each commit-sized. Phases 1–2 are local and low-risk; phase 3 is the behaviour-changing cleanup; phase 4 is tests + docs.
|
||||
|
||||
### Phase 1 — Lock down the data model
|
||||
|
||||
1. Drop the `AttnImplementation` enum. `attn_implementation` becomes `str | None`, validated against a canonical allowlist (`eager`, `sdpa`, `flash_attention_2`, `flash_attention_3`, `flex_attention`, `xformers`, `sage`, `s2`, `fp8`) **or** a hub-kernel path (`startswith("kernels-")` / contains `/`). Reject short-form strings like `flash` / `fa2` / `sdp` / `flex` with an explicit error pointing at the canonical name.
|
||||
2. Rewrite `normalize_attn_implementation` so its only job is mapping **legacy booleans → canonical `attn_implementation`** (for BC). Mapping is fixed:
|
||||
- `flash_attention → flash_attention_2`
|
||||
- `sdp_attention → sdpa`
|
||||
- `flex_attention → flex_attention`
|
||||
- `xformers_attention → xformers`
|
||||
- `sage_attention → sage`
|
||||
- `s2_attention → s2`
|
||||
- `eager_attention → eager`
|
||||
Priority for legacy combos stays as in the current branch (`s2 > sage > xformers > flex > flash > sdp > eager`). Emit a one-time `DeprecationWarning` per unique legacy flag seen. After mapping, delete the legacy flag keys from `data` so they never appear on validated `cfg`. If both a canonical `attn_implementation` and any legacy flag are set, raise (no silent precedence).
|
||||
|
||||
**Merge `AttentionValidationMixin.check_attention_fields` into this normalizer and delete the mixin method.** Pydantic v2 runs inherited `mode="before"` validators before subclass ones per MRO, so leaving them as siblings causes the inherited check to reject legacy combos like `s2 + flash` before the normalizer can map them. One validator, one source of conflict detection.
|
||||
|
||||
**Fix the gemma4-hybrid path**: change `data["attn_implementation"] = "flash"` to `data["attn_implementation"] = "flash_attention_2"` (the short name no longer validates after step 1).
|
||||
3. Convert `attn_supports_packing`, `attn_uses_flash_lib`, `attn_needs_dtype_cast` to `@computed_field`. The three capability tables move to `enums.py` as module constants keyed by the canonical `attn_implementation` string (including `flash_attention_3` — missing from the current branch — and known hub kernels):
|
||||
- Packing-capable: `{flash_attention_2, flash_attention_3, flex_attention, xformers, sage, kernels-community/flash-attn3, kernels-community/sage-attention}`.
|
||||
- Flash-lib (axolotl's monkeypatch targets): `{flash_attention_2, flash_attention_3, s2, kernels-community/flash-attn3}`.
|
||||
- No-dtype-cast: `{eager, sdpa}`.
|
||||
Unknown strings: conservative defaults (`packing=False, flash_lib=False, dtype_cast=True`).
|
||||
4. Delete `_ATTN_IMPL_TO_HF` from `model.py` and pass `cfg.attn_implementation` straight through. The gemma4-hybrid branch continues to override to `flash_attention_2` before passing to HF.
|
||||
5. `deprecated=True` on each legacy boolean Field so JSON schema + Pydantic surface the deprecation.
|
||||
|
||||
### Phase 2 — Fix the validators
|
||||
|
||||
6. `check_sample_packing_without_attention`: drop the early-return and gate on `attn_supports_packing`. Warn (or raise — pick one and be consistent) if packing is enabled with a non-packing backend.
|
||||
7. `check_ebft_activation_offloading`: replace `data.get("flex_attention")` with `attn_implementation == "flex_attention"`.
|
||||
8. Sweep items (item 15): remove every `data.get("x_attention") or data.get("attn_implementation") == "x"` dual-check — after phase 1 the legacy side is always `None`. Reduces ~10 lines of noise and eliminates the "which side wins" class of bugs.
|
||||
9. fp8 preflight (item 16): require `env_capabilities.compute_capability ≥ sm_90`, `torch_version ≥ 2.11`, and `torchao_version ≥ 0.17`. Warn if `use_cache` isn't explicitly `False`.
|
||||
|
||||
### Phase 3 — Migrate remaining consumers
|
||||
|
||||
10. `lm_eval/cli.py:120` → `flash_attention=cfg.attn_uses_flash_lib`.
|
||||
11. `lm_eval/__init__.py:26` currently reads `(cfg.attn_implementation == "flash")` — after canonicalization `"flash"` is never stored, so this evaluates `False` for every backend. Change to `cfg.attn_uses_flash_lib`.
|
||||
12. `validation.py:1137-1142` (NPU check) currently iterates `["flash_attention", "sdp_attention", "s2_attention"]` as string keys. Replace with `cfg.attn_implementation in {"flash_attention_2", "flash_attention_3", "sdpa", "s2"}` or the equivalent canonical-string set.
|
||||
13. `tests/e2e/multigpu/test_llama.py:524-526` → `cfg.attn_implementation = "flash_attention_2"` / `"flex_attention"`.
|
||||
14. **Xformers decision** (item 10): the old `cfg.flash_attention = True` side-effect activated `_patch_attention` for btlm/stablelm_epoch+packing/mistral3/llava. Two choices:
|
||||
- Add `xformers` to the set that gates `_patch_attention` (restore old behaviour, keeps patches live).
|
||||
- Document that those patches don't apply to xformers post-refactor and drop the paths if they're dead.
|
||||
Pick one explicitly and leave a commit note. Do not leave it as silent breakage.
|
||||
15. Add a repo-level check (`tests/test_no_legacy_attn_reads.py` or a ruff/grep pre-commit) that fails if anything outside `config.py`'s normalizer reads `cfg.flash_attention` / `cfg.sdp_attention` / etc. Keeps the invariant from rotting.
|
||||
|
||||
### Phase 4 — Tests + docs
|
||||
|
||||
14. Rewrite `test_attn_implementation.py` to build full `AxolotlInputConfig(**data)`, not just the classmethod. Covers validator ordering and the Pydantic-field-override issue.
|
||||
15. Add one test per gap closed above: `attn_implementation: eager + sample_packing`; `attn_implementation: flex_attention + activation_offloading`; short-form `flash` rejected; `flash_attention_2` passthrough; `kernels-community/flash-attn3` capability lookup; `attn_uses_flash_lib: true` in YAML rejected; legacy boolean emits `DeprecationWarning` and is absent from validated `cfg`; fp8 preflight failures.
|
||||
16. Update `docs/attention.qmd` for the single `attn_implementation` field + the deprecation table for legacy flags. One-paragraph migration note in the changelog.
|
||||
17. `examples/` contains ~170 YAML files using legacy flags (`flash_attention: true` etc.). They still validate post-refactor (normalizer maps them with deprecation), but a follow-up sweep to convert them to `attn_implementation: flash_attention_2` is worth scheduling — call this out in the migration note so users know examples will be migrated on a later pass.
|
||||
|
||||
## 5. Ordering & risk
|
||||
|
||||
- Phase 1 is the keystone: it's the largest diff but each step is mechanical once the alias map is in place. No behaviour change for any consumer that was using `attn_implementation` correctly; behaviour change only for consumers that were reading legacy flags (phase 3 step 13 is the explicit decision point).
|
||||
- Phase 2 is independent of phase 1 and can land first as a quick safety net.
|
||||
- Phase 3 step 13 is the only judgment call — flag for review before choosing.
|
||||
- Total: ~10-13 commits beyond what's on the branch, each scoped and individually revertable.
|
||||
@@ -1,7 +1,6 @@
|
||||
include requirements.txt
|
||||
include README.md
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include src/setuptools_axolotl_dynamic_dependencies.py
|
||||
include src/axolotl/utils/chat_templates/templates/*.jinja
|
||||
include AGENTS.md
|
||||
recursive-include docs/agents *.md
|
||||
recursive-include axolotl *.py
|
||||
|
||||
45
README.md
45
README.md
@@ -86,8 +86,8 @@ Features:
|
||||
**Requirements**:
|
||||
|
||||
- NVIDIA GPU (Ampere or newer for `bf16` and Flash Attention) or AMD GPU
|
||||
- Python >=3.11 (3.12 recommended)
|
||||
- PyTorch ≥2.9.1
|
||||
- Python 3.11
|
||||
- PyTorch ≥2.8.0
|
||||
|
||||
### Google Colab
|
||||
|
||||
@@ -95,19 +95,11 @@ Features:
|
||||
|
||||
### Installation
|
||||
|
||||
#### Using pip
|
||||
|
||||
```bash
|
||||
# install uv if you don't already have it installed (restart shell after)
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# change depending on system
|
||||
export UV_TORCH_BACKEND=cu128
|
||||
|
||||
# create a new virtual environment
|
||||
uv venv --python 3.12
|
||||
source .venv/bin/activate
|
||||
|
||||
uv pip install torch==2.10.0 torchvision
|
||||
uv pip install --no-build-isolation axolotl[deepspeed]
|
||||
pip3 install -U packaging==26.0 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
|
||||
# Download example axolotl configs, deepspeed configs
|
||||
axolotl fetch examples
|
||||
@@ -118,7 +110,7 @@ axolotl fetch deepspeed_configs # OPTIONAL
|
||||
|
||||
Installing with Docker can be less error prone than installing in your own environment.
|
||||
```bash
|
||||
docker run --gpus '"all"' --ipc=host --rm -it axolotlai/axolotl:main-latest
|
||||
docker run --gpus '"all"' --rm -it axolotlai/axolotl:main-latest
|
||||
```
|
||||
|
||||
Other installation approaches are described [here](https://docs.axolotl.ai/docs/installation.html).
|
||||
@@ -165,29 +157,6 @@ That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/ge
|
||||
- [API Reference](https://docs.axolotl.ai/docs/api/) - Auto-generated code documentation
|
||||
- [FAQ](https://docs.axolotl.ai/docs/faq.html) - Frequently asked questions
|
||||
|
||||
## AI Agent Support
|
||||
|
||||
Axolotl ships with built-in documentation optimized for AI coding agents (Claude Code, Cursor, Copilot, etc.). These docs are bundled with the pip package — no repo clone needed.
|
||||
|
||||
```bash
|
||||
# Show overview and available training methods
|
||||
axolotl agent-docs
|
||||
|
||||
# Topic-specific references
|
||||
axolotl agent-docs sft # supervised fine-tuning
|
||||
axolotl agent-docs grpo # GRPO online RL
|
||||
axolotl agent-docs preference_tuning # DPO, KTO, ORPO, SimPO
|
||||
axolotl agent-docs reward_modelling # outcome and process reward models
|
||||
axolotl agent-docs pretraining # continual pretraining
|
||||
axolotl agent-docs --list # list all topics
|
||||
|
||||
# Dump config schema for programmatic use
|
||||
axolotl config-schema
|
||||
axolotl config-schema --field adapter
|
||||
```
|
||||
|
||||
If you're working with the source repo, agent docs are also available at `docs/agents/` and the project overview is in `AGENTS.md`.
|
||||
|
||||
## 🤝 Getting Help
|
||||
|
||||
- Join our [Discord community](https://discord.gg/HhrNrHJPRb) for support
|
||||
|
||||
@@ -128,12 +128,15 @@ quartodoc:
|
||||
- monkeypatch.mistral_attn_hijack_flash
|
||||
- monkeypatch.multipack
|
||||
- monkeypatch.relora
|
||||
- monkeypatch.llama_expand_mask
|
||||
- monkeypatch.lora_kernels
|
||||
- monkeypatch.utils
|
||||
- monkeypatch.btlm_attn_hijack_flash
|
||||
- monkeypatch.llama_patch_multipack
|
||||
- monkeypatch.stablelm_attn_hijack_flash
|
||||
- monkeypatch.trainer_fsdp_optim
|
||||
- monkeypatch.transformers_fa_utils
|
||||
- monkeypatch.unsloth_
|
||||
- monkeypatch.data.batch_dataset_fetcher
|
||||
- monkeypatch.mixtral
|
||||
- monkeypatch.gradient_checkpointing.offload_cpu
|
||||
@@ -237,7 +240,6 @@ website:
|
||||
- section: "Getting Started"
|
||||
contents:
|
||||
- docs/getting-started.qmd
|
||||
- docs/choosing_method.qmd
|
||||
- docs/installation.qmd
|
||||
- docs/inference.qmd
|
||||
- section: "Model Guides"
|
||||
@@ -302,9 +304,6 @@ website:
|
||||
contents:
|
||||
- docs/multimodal.qmd
|
||||
- docs/rlhf.qmd
|
||||
- docs/grpo.qmd
|
||||
- docs/ebft.qmd
|
||||
- docs/vllm_serving.qmd
|
||||
- docs/reward_modelling.qmd
|
||||
- docs/lr_groups.qmd
|
||||
- docs/lora_optims.qmd
|
||||
@@ -326,6 +325,7 @@ website:
|
||||
- section: "Advanced Features"
|
||||
contents:
|
||||
- docs/fsdp_qlora.qmd
|
||||
- docs/unsloth.qmd
|
||||
- docs/torchao.qmd
|
||||
- docs/custom_integrations.qmd
|
||||
- docs/sequence_parallelism.qmd
|
||||
@@ -336,7 +336,6 @@ website:
|
||||
- section: "Troubleshooting"
|
||||
contents:
|
||||
- docs/faq.qmd
|
||||
- docs/training_stability.qmd
|
||||
- docs/debugging.qmd
|
||||
- docs/nccl.qmd
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
|
||||
ENV HF_HOME="{{ HF_HOME }}"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-change-held-packages vim curl nano zstd libnccl2 libnccl-dev ibverbs-providers ibverbs-utils infiniband-diags librdmacm-dev librdmacm1 rdmacm-utils slurm-wlm
|
||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev ibverbs-providers ibverbs-utils infiniband-diags librdmacm-dev librdmacm1 rdmacm-utils slurm-wlm
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -22,6 +22,15 @@ WORKDIR /workspace/axolotl
|
||||
RUN git fetch origin +$GITHUB_REF && \
|
||||
git checkout FETCH_HEAD
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
|
||||
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt; \
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||
fi
|
||||
|
||||
RUN uv pip install packaging==26.0 setuptools==78.1.1
|
||||
RUN uv pip install torchvision
|
||||
RUN uv pip uninstall causal_conv1d
|
||||
@@ -31,21 +40,11 @@ RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
fi
|
||||
|
||||
# Override with nightly HF packages for nightly builds
|
||||
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
uv pip install --no-deps \
|
||||
"transformers @ git+https://github.com/huggingface/transformers.git@main" \
|
||||
"peft @ git+https://github.com/huggingface/peft.git@main" \
|
||||
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
||||
"trl @ git+https://github.com/huggingface/trl.git@main" \
|
||||
"datasets @ git+https://github.com/huggingface/datasets.git@main"; \
|
||||
fi
|
||||
|
||||
RUN python scripts/unsloth_install.py --uv | sh
|
||||
RUN python scripts/cutcrossentropy_install.py --uv | sh
|
||||
|
||||
# So we can test the Docker image
|
||||
RUN uv pip install black mypy pre-commit types-requests quartodoc jupyter blobfile tiktoken \
|
||||
codecov codecov-cli pytest pytest-cov pytest-retry pytest-sugar pytest-xdist tbparse
|
||||
RUN uv pip install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
# fix so that git fetch/pull from remote works
|
||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||
|
||||
54
cicd/Dockerfile.jinja
Normal file
54
cicd/Dockerfile.jinja
Normal file
@@ -0,0 +1,54 @@
|
||||
FROM axolotlai/axolotl-base:{{ BASE_TAG }}
|
||||
|
||||
ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
ENV AXOLOTL_EXTRAS="{{ AXOLOTL_EXTRAS }}"
|
||||
ENV AXOLOTL_ARGS="{{ AXOLOTL_ARGS }}"
|
||||
ENV CUDA="{{ CUDA }}"
|
||||
ENV PYTORCH_VERSION="{{ PYTORCH_VERSION }}"
|
||||
ENV GITHUB_REF="{{ GITHUB_REF }}"
|
||||
ENV GITHUB_SHA="{{ GITHUB_SHA }}"
|
||||
ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
|
||||
ENV HF_HOME="{{ HF_HOME }}"
|
||||
ENV AXOLOTL_DATASET_NUM_PROC="8"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev ibverbs-providers ibverbs-utils infiniband-diags librdmacm-dev librdmacm1 rdmacm-utils slurm-wlm
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
|
||||
WORKDIR /workspace/axolotl
|
||||
|
||||
RUN git fetch origin +$GITHUB_REF && \
|
||||
git checkout FETCH_HEAD
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
|
||||
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt; \
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||
fi
|
||||
|
||||
RUN pip install packaging==26.0 setuptools==78.1.1 psutil
|
||||
RUN pip uninstall -y causal_conv1d
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
fi
|
||||
|
||||
RUN python scripts/unsloth_install.py | sh
|
||||
RUN python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
# So we can test the Docker image
|
||||
RUN pip install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
# fix so that git fetch/pull from remote works
|
||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||
git config --get remote.origin.fetch
|
||||
|
||||
# helper for huggingface-login cli
|
||||
RUN git config --global credential.helper store
|
||||
24
cicd/cicd.sh
24
cicd/cicd.sh
@@ -1,25 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__, f'Expected torch $PYTORCH_VERSION but got {torch.__version__}'"
|
||||
python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__"
|
||||
|
||||
set -o pipefail
|
||||
for i in 1 2 3; do
|
||||
if curl --silent --show-error --fail -L \
|
||||
https://axolotl-ci.b-cdn.net/hf-cache.tar.zst \
|
||||
| tar -xpf - -C "${HF_HOME}/hub/" --use-compress-program unzstd --strip-components=1; then
|
||||
echo "HF cache extracted successfully"
|
||||
break
|
||||
fi
|
||||
echo "Attempt $i failed, cleaning up and retrying in 15s..."
|
||||
rm -rf "${HF_HOME}/hub/"*
|
||||
sleep 15
|
||||
done
|
||||
# hf download "NousResearch/Meta-Llama-3-8B"
|
||||
# hf download "NousResearch/Meta-Llama-3-8B-Instruct"
|
||||
# hf download "microsoft/Phi-4-reasoning"
|
||||
# hf download "microsoft/Phi-3.5-mini-instruct"
|
||||
# hf download "microsoft/Phi-3-medium-128k-instruct"
|
||||
# curl -L https://axolotl-ci.b-cdn.net/hf-cache.tar.zst | tar -xpf - -C "${HF_HOME}/hub/" --use-compress-program unzstd --strip-components=1
|
||||
hf download "NousResearch/Meta-Llama-3-8B"
|
||||
hf download "NousResearch/Meta-Llama-3-8B-Instruct"
|
||||
hf download "microsoft/Phi-4-reasoning"
|
||||
hf download "microsoft/Phi-3.5-mini-instruct"
|
||||
|
||||
# Run unit tests with initial coverage report
|
||||
pytest -v --durations=10 -n8 \
|
||||
|
||||
@@ -17,7 +17,7 @@ template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
||||
template_env = jinja2.Environment(
|
||||
loader=template_loader, autoescape=select_autoescape()
|
||||
)
|
||||
dockerfile = os.environ.get("E2E_DOCKERFILE", "Dockerfile-uv.jinja")
|
||||
dockerfile = os.environ.get("E2E_DOCKERFILE", "Dockerfile.jinja")
|
||||
df_template = template_env.get_template(dockerfile)
|
||||
|
||||
df_args = {
|
||||
|
||||
@@ -16,7 +16,7 @@ template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
||||
template_env = jinja2.Environment(
|
||||
loader=template_loader, autoescape=select_autoescape()
|
||||
)
|
||||
dockerfile = os.environ.get("E2E_DOCKERFILE", "Dockerfile-uv.jinja")
|
||||
dockerfile = os.environ.get("E2E_DOCKERFILE", "Dockerfile.jinja")
|
||||
df_template = template_env.get_template(dockerfile)
|
||||
|
||||
df_args = {
|
||||
@@ -68,6 +68,10 @@ def run_cmd(cmd: str, run_folder: str):
|
||||
sp_env["AXOLOTL_DATASET_NUM_PROC"] = "8"
|
||||
|
||||
# Propagate errors from subprocess.
|
||||
exit_code = subprocess.call(cmd.split(), cwd=run_folder, env=sp_env) # nosec
|
||||
if exit_code:
|
||||
raise RuntimeError(f"Command '{cmd}' failed with exit code {exit_code}")
|
||||
try:
|
||||
exit_code = subprocess.call(cmd.split(), cwd=run_folder, env=sp_env) # nosec
|
||||
if exit_code:
|
||||
print(f"Command '{cmd}' failed with exit code {exit_code}")
|
||||
return exit_code
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
print(f"Command '{cmd}' failed with exception {e}")
|
||||
|
||||
@@ -37,7 +37,6 @@ coverage:
|
||||
only_pulls: false
|
||||
flags: null
|
||||
paths: null
|
||||
informational: true
|
||||
|
||||
parsers:
|
||||
gcov:
|
||||
|
||||
@@ -32,7 +32,7 @@ RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
pip install --no-build-isolation -e .[$BASE_EXTRAS,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
pip install --no-build-isolation -e .[$BASE_EXTRAS] $AXOLOTL_ARGS; \
|
||||
fi && \
|
||||
fi && \ python scripts/unsloth_install.py | sh && \
|
||||
python scripts/cutcrossentropy_install.py | sh && \
|
||||
pip install pytest && \
|
||||
pip cache purge
|
||||
|
||||
@@ -22,7 +22,6 @@ RUN apt update && \
|
||||
chmod 700 ~/.ssh && \
|
||||
printf "\n[[ -z \"\$TMUX\" ]] && { tmux attach-session -t ssh_tmux || tmux new-session -s ssh_tmux; exit; }\n" >> ~/.bashrc && \
|
||||
printf "[ ! -z \"\$TERM\" -a -r /etc/motd ] && cat /etc/motd\n" >> ~/.bashrc && \
|
||||
printf "source /workspace/axolotl-venv/bin/activate\n" >> ~/.bashrc && \
|
||||
chmod +x /workspace/axolotl/scripts/cloud-entrypoint.sh && \
|
||||
chmod +x /root/cloud-entrypoint.sh && \
|
||||
echo 'set-option -g history-limit 5000' >> ~/.tmux.conf
|
||||
|
||||
@@ -33,6 +33,7 @@ RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
else \
|
||||
uv pip install --no-build-isolation -e .[$BASE_EXTRAS] $AXOLOTL_ARGS; \
|
||||
fi && \
|
||||
python scripts/unsloth_install.py --uv | sh && \
|
||||
python scripts/cutcrossentropy_install.py --uv | sh && \
|
||||
uv pip install pytest && \
|
||||
uv cache clean
|
||||
|
||||
@@ -36,22 +36,22 @@ RUN uv pip install packaging setuptools wheel psutil \
|
||||
&& uv pip install awscli pydantic
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
MAMBA_SKIP_CUDA_BUILD=TRUE CAUSAL_CONV1D_SKIP_CUDA_BUILD=TRUE uv pip install --no-build-isolation mamba_ssm causal_conv1d; \
|
||||
uv pip install --no-build-isolation "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main"; \
|
||||
uv pip install "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main"; \
|
||||
fi
|
||||
|
||||
# Map Python version (e.g., 3.12 -> cp312)
|
||||
RUN PYTHON_CP="cp$(echo $PYTHON_VERSION | tr -d '.')" && \
|
||||
# Map PyTorch version (e.g., 2.9.1 -> torch2.9, 2.10.0 -> torch2.10)
|
||||
TORCH_TAG="torch$(echo $PYTORCH_VERSION | grep -oP '^\d+\.\d+')" && \
|
||||
LINUX_TAG="manylinux_" && \
|
||||
# Map architecture
|
||||
case "$TARGETARCH" in \
|
||||
amd64) ARCH_TAG="2_24_x86_64.manylinux_2_28_x86_64" ;; \
|
||||
arm64) ARCH_TAG="2_34_aarch64" ;; \
|
||||
amd64) ARCH_TAG="x86_64" ;; \
|
||||
arm64) ARCH_TAG="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: $TARGETARCH"; exit 1 ;; \
|
||||
esac && \
|
||||
WHL_VERSION="v0.7.16" && \
|
||||
WHL_FILE="flash_attn-2.8.3+cu${CUDA}${TORCH_TAG}-${PYTHON_CP}-${PYTHON_CP}-${LINUX_TAG}${ARCH_TAG}.whl" && \
|
||||
WHL_FILE="flash_attn-2.8.3+cu${CUDA}${TORCH_TAG}-${PYTHON_CP}-${PYTHON_CP}-linux_${ARCH_TAG}.whl" && \
|
||||
wget -nv "https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/${WHL_VERSION}/${WHL_FILE}" && \
|
||||
uv pip install --no-cache-dir "${WHL_FILE}" && \
|
||||
rm "${WHL_FILE}"
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
# GRPO — Agent Reference
|
||||
|
||||
Online RL with verifiable reward functions. For full config reference, async features, and scaling, see [grpo.qmd](../grpo.qmd). For vLLM setup, see [vllm_serving.qmd](../vllm_serving.qmd).
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Terminal 1 (GPU 0) Terminal 2 (GPU 1)
|
||||
┌──────────────────────┐ ┌──────────────────────────────────┐
|
||||
│ vLLM Server │ HTTP │ Trainer │
|
||||
│ Serves base model │◄────────────►│ 1. Send prompts to vLLM │
|
||||
│ + LoRA adapter │ /generate │ 2. Score completions (rewards) │
|
||||
│ │ /set_lora │ 3. Compute advantages │
|
||||
│ Punica kernels for │ │ 4. PPO-clip gradient update │
|
||||
│ LoRA inference │ │ 5. Sync LoRA weights to vLLM │
|
||||
└──────────────────────┘ └──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Components Required
|
||||
|
||||
1. A YAML config with `rl: grpo`
|
||||
2. A reward module (Python file with reward functions)
|
||||
3. A running vLLM server (`axolotl vllm-serve config.yaml`)
|
||||
|
||||
## Reward Function Signature
|
||||
|
||||
```python
|
||||
def my_reward(completions, **kwargs) -> list[float]:
|
||||
# completions[i][0]["content"] = text of i-th completion
|
||||
# **kwargs contains dataset columns not removed by transform
|
||||
return [score_for_each_completion]
|
||||
```
|
||||
|
||||
Multiple rewards: `reward_funcs: [r1, r2]` with `reward_weights: [1.0, 0.5]`.
|
||||
|
||||
## Key Async Features
|
||||
|
||||
| Feature | Config | Purpose |
|
||||
|---------|--------|---------|
|
||||
| Async prefetch | `async_prefetch: true` | Overlap generation with training |
|
||||
| LoRA sync | `vllm_lora_sync: true` | Fast adapter sync via filesystem |
|
||||
| Streaming scoring | `streaming_partial_batch: true` | Score one group at a time |
|
||||
| Zero-adv skip | `skip_zero_advantage_batches: true` | Skip batches with no learning signal |
|
||||
| Replay buffer | `replay_buffer_size: 100` | Cache high-signal groups |
|
||||
| IS correction | `vllm_importance_sampling_correction: true` | Fix off-policy distribution shift |
|
||||
|
||||
## Health Checks
|
||||
|
||||
- `rewards/*/mean` > 0.15 within 20 steps (else: test reward function standalone)
|
||||
- `reward_std` > 0 on most steps (else: no learning signal)
|
||||
- `entropy` 0.05-0.5 (< 0.01 = mode collapse)
|
||||
- `grad_norm` 0.001-1.0 (> 10 = unstable, 0.0 = zero-advantage skip)
|
||||
|
||||
See [training_stability.qmd](../training_stability.qmd) for detailed diagnostics.
|
||||
|
||||
## File Map
|
||||
|
||||
```
|
||||
src/axolotl/
|
||||
cli/train.py # Entry point
|
||||
cli/vllm_serve.py # Entry point for vLLM server
|
||||
core/trainers/grpo/
|
||||
trainer.py # AxolotlGRPOTrainer
|
||||
sampler.py # Sampling utilities
|
||||
core/builders/rl.py # HFRLTrainerBuilder — routes rl type → trainer
|
||||
scripts/vllm_serve_lora.py # vLLM serve script with LoRA sync support
|
||||
utils/schemas/trl.py # TRL config schema (all trl: options)
|
||||
|
||||
docs/grpo.qmd # Full user docs: async, rewards, scaling, config reference
|
||||
docs/vllm_serving.qmd # vLLM server modes, LoRA sync, weight sync
|
||||
```
|
||||
@@ -1,198 +0,0 @@
|
||||
# Model Architectures — Agent Reference
|
||||
|
||||
Model-specific quirks, required settings, and known issues. Check this before debugging training failures on specific model families.
|
||||
|
||||
## VLM (Vision Language Model) Quick Start
|
||||
|
||||
All VLM configs require these four lines:
|
||||
```yaml
|
||||
processor_type: AutoProcessor
|
||||
skip_prepare_dataset: true
|
||||
remove_unused_columns: false
|
||||
sample_packing: false
|
||||
```
|
||||
|
||||
Decision tree for VLM config:
|
||||
```text
|
||||
Is the model multimodal (has vision/audio encoder)?
|
||||
├─ YES: Add `freeze_mm_modules: true` if training text only
|
||||
│ Add `chat_template: <model_template>` (e.g. gemma4, qwen3_5, gemma3)
|
||||
│ LoRA: use regex `lora_target_modules` to restrict to language model
|
||||
└─ NO: Train as a regular text model
|
||||
|
||||
Is the model MoE (e.g. Gemma4 26B-A4B, Qwen3.5 35B-A3B)?
|
||||
├─ YES: Add `lora_target_parameters` for expert LoRA
|
||||
│ Consider ScatterMoE kernels (see Plugins section)
|
||||
└─ NO: Standard LoRA config
|
||||
```
|
||||
|
||||
## Plugins & Optimizations
|
||||
|
||||
### Cut Cross Entropy (CCE)
|
||||
|
||||
Computes loss from hidden states + lm_head weight without materializing the full logits tensor, saving significant VRAM. Install if not already present:
|
||||
|
||||
```bash
|
||||
uv pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@main"
|
||||
```
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
```
|
||||
|
||||
### ScatterMoE Kernels
|
||||
|
||||
Fuses expert + LoRA computation into a single kernel for MoE models. Significant speedup for models with many experts.
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_scattermoe: true
|
||||
experts_implementation: scattermoe
|
||||
|
||||
# Expert LoRA targets (3D parameter tensors, not nn.Linear):
|
||||
lora_target_parameters:
|
||||
- experts.gate_up_proj
|
||||
- experts.down_proj
|
||||
```
|
||||
|
||||
Supported: Gemma4 (`gemma4_text`), Mixtral, Qwen MoE variants. The plugin auto-detects model type and routing function. Without ScatterMoE, expert LoRA still works but runs base expert matmul and LoRA as separate operations.
|
||||
|
||||
## Gemma 4
|
||||
|
||||
**Models**: `google/gemma-4-26B-A4B` (MoE), `google/gemma-4-31B` (dense), `google/gemma-4-E2B`, `google/gemma-4-E4B`
|
||||
|
||||
**Architecture**: Multimodal wrapper (`Gemma4ForConditionalGeneration`) over a text backbone (`Gemma4TextModel`), with optional vision/audio encoders. All Gemma4 HF repos have `model_type: "gemma4"` — even text-only variants load as multimodal with a vision tower.
|
||||
|
||||
### Required settings
|
||||
|
||||
```yaml
|
||||
# Always needed for Gemma4:
|
||||
freeze_mm_modules: true # Freeze vision/audio encoders for text-only training
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false # Shared per-layer norms cause "marked ready twice" with reentrant
|
||||
|
||||
# LoRA target — restrict to language model only (DO NOT use lora_target_linear: true):
|
||||
lora_target_modules: 'model.language_model.layers.[\d]+.(_checkpoint_wrapped_module.)?(mlp|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||
```
|
||||
|
||||
### Auto-detection
|
||||
|
||||
Axolotl auto-detects Gemma4 and applies:
|
||||
- `use_reentrant: false` for gradient checkpointing
|
||||
- `ddp_find_unused_parameters: true` for DDP (skipped when `activation_offloading: true`)
|
||||
|
||||
### Multi-GPU
|
||||
|
||||
| Strategy | Works? | Notes |
|
||||
|----------|--------|-------|
|
||||
| DDP | Yes | Auto-sets `ddp_find_unused_parameters=True` |
|
||||
| DDP + activation_offloading | Yes | `find_unused_parameters` is skipped (conflicts with checkpoint wrappers) |
|
||||
| FSDP1 | No | OOM during dequantization/sharding with QLoRA |
|
||||
| FSDP2 | Yes | Use `Gemma4TextDecoderLayer` (not `Gemma4DecoderLayer`) as wrap class |
|
||||
| FSDP2 + activation_offloading | Yes | Lowest VRAM (~26 GiB/GPU for 26B-A4B) |
|
||||
|
||||
FSDP2 config:
|
||||
```yaml
|
||||
fsdp:
|
||||
- full_shard
|
||||
- auto_wrap
|
||||
fsdp_config:
|
||||
fsdp_version: 2
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_transformer_layer_cls_to_wrap: Gemma4TextDecoderLayer
|
||||
```
|
||||
|
||||
### MoE (26B-A4B)
|
||||
|
||||
- `enable_moe_block: true`, 256 experts, top-k routing
|
||||
- No separate `SparseMoeBlock` — MoE is embedded in each decoder layer
|
||||
- Expert LoRA targets 3D parameter tensors:
|
||||
```yaml
|
||||
lora_target_parameters:
|
||||
- experts.gate_up_proj
|
||||
- experts.down_proj
|
||||
```
|
||||
- ScatterMoE kernel acceleration:
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_scattermoe: true
|
||||
experts_implementation: scattermoe
|
||||
```
|
||||
|
||||
### VLM (Vision) Training
|
||||
|
||||
All Gemma4 models load as `Gemma4ForConditionalGeneration` with a vision tower. No custom `ProcessingStrategy` needed — the base class auto-detects the image token.
|
||||
|
||||
```yaml
|
||||
base_model: google/gemma-4-E2B-it # or E4B-it, 26B-A4B
|
||||
processor_type: AutoProcessor
|
||||
freeze_mm_modules: true
|
||||
chat_template: gemma4
|
||||
|
||||
skip_prepare_dataset: true
|
||||
remove_unused_columns: false
|
||||
sample_packing: false
|
||||
```
|
||||
|
||||
A starting VLM loss of ~8-15 is typical. In most runs, loss converges below 1.0 within ~30-50 steps, though results may vary across configurations.
|
||||
|
||||
For the 26B-A4B MoE variant with ScatterMoE + expert LoRA + CCE, add:
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_scattermoe: true
|
||||
experts_implementation: scattermoe
|
||||
lora_target_parameters:
|
||||
- experts.gate_up_proj
|
||||
- experts.down_proj
|
||||
```
|
||||
|
||||
### Common issues
|
||||
|
||||
| Symptom | Cause | Fix |
|
||||
|---------|-------|-----|
|
||||
| `mm_token_type_ids is required` in DDP | `model.config` not accessible through DDP wrapper | Already fixed — `unwrap_model()` in `compute_loss` and `prediction_step` |
|
||||
| `marked a variable ready twice` in DDP | `ddp_find_unused_parameters=True` + activation_offloading checkpoint wrappers | Auto-handled — `find_unused_parameters` is skipped when `activation_offloading: true` |
|
||||
| Loss ~12 instead of ~0.5 | Using `lora_target_linear: true` (applies LoRA to vision/audio modules) | Use the regex `lora_target_modules` pattern instead |
|
||||
| FSDP2 `Could not find Gemma4AudioLayer` | Auto-wrap detects `_no_split_modules` including audio layers that don't exist | Explicitly set `fsdp_transformer_layer_cls_to_wrap: Gemma4TextDecoderLayer` |
|
||||
| `Gemma4ClippableLinear not supported` by PEFT | Vision tower uses a non-standard linear wrapper | Axolotl patches this automatically via `_patch_peft_clippable_linear()` |
|
||||
|
||||
### E2B/E4B dense models
|
||||
|
||||
These have `hidden_size_per_layer_input: 256` (per-layer input embeddings) and `attention_k_eq_v: False`. Known issue: loss starts higher than expected (~12 vs ~0.5 for 26B). Root cause under investigation — may be related to the per-layer input mechanism or the `Gemma4ForConditionalGeneration` loss computation.
|
||||
|
||||
## Gemma 3
|
||||
|
||||
**Models**: `google/gemma-3-*`
|
||||
|
||||
- `ddp_find_unused_parameters: true` needed (multimodal unused params)
|
||||
- `use_reentrant: false` recommended
|
||||
- Attention mask must be dropped for sample packing (handled automatically)
|
||||
- Multi-GPU test currently skipped (`tests/e2e/multigpu/test_gemma3.py`)
|
||||
|
||||
## Qwen 3.5 MoE
|
||||
|
||||
**Models**: `Qwen/Qwen3.5-35B-A3B`
|
||||
|
||||
- Hybrid architecture: DeltaNet linear attention (30 layers) + full attention (10 layers)
|
||||
- 256 experts, 8 active per token
|
||||
- Known weight scale drift in late DeltaNet layers (36-38) due to AdamW + rare expert interaction
|
||||
- Fix: `normalize_weight_scales` config to detect and rescale outliers:
|
||||
```yaml
|
||||
normalize_weight_scales:
|
||||
- name_pattern: 'linear_attn\.conv1d\.weight'
|
||||
threshold: 1.3
|
||||
```
|
||||
|
||||
## General MoE Notes
|
||||
|
||||
- `lora_target_linear: true` with multimodal MoE models will apply LoRA to ALL linear modules including vision/audio encoders — use regex `lora_target_modules` to restrict to language model only
|
||||
- Rare experts get larger effective learning rate from AdamW (small second-moment estimates) — can cause weight drift in recurrent/SSM components. Use `normalize_weight_scales` with `dry_run: true` to detect.
|
||||
- For ScatterMoE kernel support, set `experts_implementation: scattermoe` and add the KernelsPlugin
|
||||
@@ -1,181 +0,0 @@
|
||||
# New Model Support — Agent Reference
|
||||
|
||||
Guide for debugging and adding support for new model architectures in axolotl. Based on lessons learned from Gemma4, Gemma3, Qwen2-VL, and other multimodal/MoE models.
|
||||
|
||||
## Quick Validation Checklist
|
||||
|
||||
When testing a new model, run through these checks in order:
|
||||
|
||||
1. **Does the model load?** `axolotl preprocess config.yaml` — catches config schema errors
|
||||
2. **Does LoRA apply?** Check for "Unsupported layer type" warnings from PEFT
|
||||
3. **Is the initial loss sane?** First-step loss for a pretrained model should be 0.5–2.0 for SFT
|
||||
4. **Does sample packing work?** Compare loss with `sample_packing: true` vs `false` — should be similar
|
||||
5. **Is CCE active?** Check for "Applying Cut Cross Entropy" log and verify peak VRAM is lower
|
||||
|
||||
## Loss Debugging
|
||||
|
||||
### Expected initial loss
|
||||
A pretrained model doing SFT should start with loss roughly in the 0.5–2.0 range. If loss starts above 3.0, something is wrong. If it's near `log(vocab_size)` (≈ 12 for 262K vocab), the model is predicting at random — attention masking or model weights are broken.
|
||||
|
||||
### Direct comparison technique
|
||||
The fastest way to isolate a loss issue — bypass the trainer entirely:
|
||||
|
||||
```python
|
||||
# Load model via axolotl's pipeline (applies all patches)
|
||||
from axolotl.cli.config import load_cfg
|
||||
from axolotl.utils.config import normalize_config, prepare_plugins
|
||||
from axolotl.loaders.tokenizer import load_tokenizer
|
||||
from axolotl.loaders.model import ModelLoader
|
||||
|
||||
cfg = load_cfg("your_config.yaml")
|
||||
normalize_config(cfg)
|
||||
prepare_plugins(cfg)
|
||||
tokenizer = load_tokenizer(cfg)
|
||||
model, _ = ModelLoader(cfg, tokenizer).load()
|
||||
|
||||
# Forward pass on preprocessed data
|
||||
model.train()
|
||||
out = model(input_ids, labels=labels)
|
||||
print(f"Direct loss: {out.loss.item()}") # Compare to trainer's reported loss
|
||||
```
|
||||
|
||||
If direct loss is correct (~1.0) but trainer reports 3–4x higher, check `model_accepts_loss_kwargs` (see below).
|
||||
|
||||
### `model_accepts_loss_kwargs` inflation
|
||||
HF Trainer checks if the model's `forward()` has `**kwargs` and sets `model_accepts_loss_kwargs=True`. This changes loss normalization: the trainer does NOT divide loss by `gradient_accumulation_steps` before logging. The gradient is correct — only the logged loss is inflated.
|
||||
|
||||
**Symptom**: Logged loss ≈ actual_loss × gradient_accumulation_steps.
|
||||
|
||||
**Which models are affected**: Any model with `**kwargs` in forward (common in multimodal models for extra inputs like `mm_token_type_ids`, `pixel_values`, etc.).
|
||||
|
||||
**Fix location**: `src/axolotl/core/trainers/base.py` `__init__()` — after `super().__init__()`, check if the unwrapped model actually has `num_items_in_batch` in its forward signature. If not, set `self.model_accepts_loss_kwargs = False`.
|
||||
|
||||
## Multimodal Models (ForConditionalGeneration)
|
||||
|
||||
Many recent models use `ForConditionalGeneration` as the top-level class, not `ForCausalLM`:
|
||||
- Gemma3 → `Gemma3ForConditionalGeneration`
|
||||
- Gemma4 → `Gemma4ForConditionalGeneration`
|
||||
- Qwen2-VL → `Qwen2VLForConditionalGeneration`
|
||||
- LLaVA → `LlavaForConditionalGeneration`
|
||||
|
||||
### Why this matters
|
||||
|
||||
| Component | Targets `ForCausalLM` | Needs `ForConditionalGeneration` |
|
||||
|-----------|----------------------|--------------------------------|
|
||||
| CCE patches | ✅ (default) | ❌ silently inactive if not patched |
|
||||
| PEFT LoRA | ✅ | May fail on custom layer types |
|
||||
| HF Trainer label handling | ✅ | May need extra inputs |
|
||||
|
||||
### Required extra inputs
|
||||
Multimodal models require special inputs during training even for text-only data:
|
||||
|
||||
| Model | Required Input | Value for Text-Only |
|
||||
|-------|---------------|-------------------|
|
||||
| Gemma4 | `mm_token_type_ids` | `torch.zeros_like(input_ids)` |
|
||||
| Gemma3 | `token_type_ids` | `torch.zeros_like(input_ids)` |
|
||||
|
||||
Auto-inject in `compute_loss()` when not provided by the data collator. See `core/trainers/base.py`.
|
||||
|
||||
### Custom layer types and PEFT
|
||||
Vision towers often use custom module wrappers that PEFT doesn't support:
|
||||
|
||||
| Model | Custom Layer | Wraps | Fix |
|
||||
|-------|-------------|-------|-----|
|
||||
| Gemma4 | `Gemma4ClippableLinear` | `nn.Linear` | Redirect to `.linear` child |
|
||||
|
||||
Fix location: `src/axolotl/loaders/adapter.py` `_patch_peft_clippable_linear()`.
|
||||
|
||||
## Sample Packing
|
||||
|
||||
### How packed sequence detection works (transformers ≥ 5.x)
|
||||
`transformers.masking_utils._preprocess_mask_arguments()` detects packed sequences from `position_ids` resets. But **only when `attention_mask is None`**:
|
||||
|
||||
```python
|
||||
# From masking_utils.py:
|
||||
if position_ids is not None and attention_mask is None and past_key_values is None:
|
||||
packed_sequence_mask = find_packed_sequence_indices(position_ids)
|
||||
```
|
||||
|
||||
If the collator provides an all-ones `attention_mask`, packing detection is **skipped** and the model builds a single causal mask spanning all packed sequences → cross-sequence attention leakage → very high loss.
|
||||
|
||||
### Fix for models using `create_causal_mask_mapping`
|
||||
For Gemma3, Gemma4, and similar models that use the new transformers masking system, remove `attention_mask` from inputs when sample packing is active:
|
||||
|
||||
```python
|
||||
# In compute_loss():
|
||||
if (
|
||||
self.args.sample_packing
|
||||
and model_type in ("gemma4", "gemma3")
|
||||
and "attention_mask" in inputs
|
||||
and "position_ids" in inputs
|
||||
):
|
||||
del inputs["attention_mask"]
|
||||
```
|
||||
|
||||
Fix location: `src/axolotl/core/trainers/base.py` `compute_loss()`.
|
||||
|
||||
### Models that DON'T need this fix
|
||||
Older models that use `_prepare_4d_causal_attention_mask` (Llama, Mistral, Qwen2, etc.) handle sample packing via axolotl's multipack attention monkeypatch instead. Only models using the new `create_causal_mask_mapping` / `create_causal_mask` masking system need the `attention_mask` removal.
|
||||
|
||||
## Attention Backend Selection
|
||||
|
||||
| Backend | Config | head_dim limit | torch_compile | Notes |
|
||||
|---------|--------|---------------|---------------|-------|
|
||||
| FA2 | `attn_implementation: flash_attention_2` | 256 | ✅ | Fastest when supported |
|
||||
| FA4 | auto with `attn_implementation: flash_attention_2` | 256 (SM90+) | ✅ | Auto-detected on H100+ |
|
||||
| SDPA | `attn_implementation: sdpa` | None | ✅ | Universal fallback |
|
||||
| flex | `attn_implementation: flex_attention` | None | ⚠️ Triton OOM for large head_dim | Good for variable head dims |
|
||||
| eager | `attn_implementation: eager` | None | ✅ | Slowest, always works |
|
||||
|
||||
**Check model support**: Look at `_supports_flash_attn_2`, `_supports_flex_attn`, `_supports_sdpa` attributes on the model class.
|
||||
|
||||
**head_dim gotcha**: The 256 limit is specific to flash-attn CUDA kernels, NOT PyTorch-level. SDPA and flex_attention both handle arbitrary head_dim. Models with `global_head_dim > 256` (Gemma4: 512) must use SDPA or flex.
|
||||
|
||||
**flex + compile gotcha**: `torch_compile` with flex_attention can hit Triton shared memory OOM for large head_dim. Falls back to eager per-function (not a crash, but slower). Unsloth disables flex for Gemma4 for this reason.
|
||||
|
||||
## Cut Cross Entropy (CCE)
|
||||
|
||||
### How CCE patches work
|
||||
CCE replaces the model's `forward()` with a fused version that computes loss from hidden states + lm_head weight without materializing the full logits tensor. This saves ~`batch × seq_len × vocab_size × dtype_bytes` of VRAM.
|
||||
|
||||
### Adding CCE for a new model
|
||||
1. Check if the model type is in `cut_cross_entropy.transformers.patch.PATCH_FNS`
|
||||
2. If not, axolotl's generic fallback (`integrations/cut_cross_entropy/__init__.py` `patch_llama_like()`) patches `{Prefix}ForCausalLM.forward` with `cce_forward`
|
||||
3. For multimodal models (`ForConditionalGeneration`), a model-specific patch is needed in `ml-cross-entropy` repo
|
||||
4. The multimodal `cce_forward` must accept all extra kwargs (pixel_values, mm_token_type_ids, etc.) and pop any that would conflict before calling `self.model()`
|
||||
|
||||
### Common CCE pitfall
|
||||
If CCE appears active (log says "Applying Cut Cross Entropy") but peak VRAM doesn't decrease, check which class was patched. If the model loads as `ForConditionalGeneration` but CCE patched `ForCausalLM`, the patch is silently inactive.
|
||||
|
||||
## MoE Models
|
||||
|
||||
### Dense MLP vs MoE experts
|
||||
Some MoE models (e.g., Gemma4) have BOTH dense MLP layers and MoE expert layers at every decoder layer:
|
||||
- `gate_proj/up_proj/down_proj` → targets the **dense MLP** (`Gemma4TextMLP`)
|
||||
- `experts.gate_up_proj/experts.down_proj` → targets the **MoE experts** (`Gemma4TextExperts`)
|
||||
|
||||
LoRA on the dense MLP works normally. Expert LoRA via `lora_target_parameters` requires PEFT support for the specific expert module type (may warn "Unsupported layer type").
|
||||
|
||||
### ScatterMoE kernels
|
||||
`use_scattermoe: true` with `experts_implementation: scattermoe` registers fused expert kernels via transformers' `ExpertsInterface`. Significant speedup for MoE models. Requires the kernels plugin:
|
||||
```yaml
|
||||
plugins:
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_scattermoe: true
|
||||
experts_implementation: scattermoe
|
||||
```
|
||||
|
||||
## Where to Add Model-Specific Fixes
|
||||
|
||||
| What | Where | Example |
|
||||
|------|-------|---------|
|
||||
| Missing forward inputs | `core/trainers/base.py` `compute_loss()` | mm_token_type_ids injection |
|
||||
| Attention mask fixes | `core/trainers/base.py` `compute_loss()` | Sample packing mask removal |
|
||||
| Loss logging fixes | `core/trainers/base.py` `__init__()` | model_accepts_loss_kwargs override |
|
||||
| PEFT/LoRA patches | `loaders/adapter.py` | ClippableLinear redirect |
|
||||
| Attention patches | `monkeypatch/attention/` | FA4 tuple fix |
|
||||
| Model-specific patches | `loaders/patch_manager.py` `_apply_model_specific_patches()` | Llama4, Kimi, NemotronH |
|
||||
| CCE patches | `ml-cross-entropy` repo `transformers/` | Per-model cce_forward |
|
||||
| Example configs | `examples/<model>/` | Validated YAML |
|
||||
| Config validation | `utils/schemas/validation.py` | Compatibility checks |
|
||||
@@ -1,121 +0,0 @@
|
||||
# Preference Learning (RLHF) — Agent Reference
|
||||
|
||||
Reference for DPO, IPO, KTO, ORPO, and SimPO. For config templates and dataset format examples, see [rlhf.qmd](../rlhf.qmd). For GRPO, see [grpo.qmd](../grpo.qmd). For EBFT, see [ebft.qmd](../ebft.qmd).
|
||||
|
||||
## Method Overview
|
||||
|
||||
| Method | Data Requirement | Key Idea | Best For |
|
||||
|--------|-----------------|----------|----------|
|
||||
| **DPO** | Paired (chosen + rejected) | Implicit reward via preference pairs | General alignment, most common |
|
||||
| **IPO** | Paired (chosen + rejected) | DPO with different loss (avoids overfitting) | When DPO overfits |
|
||||
| **KTO** | Unpaired (completion + binary label) | Kahneman-Tversky loss, no pairs needed | When you only have thumbs-up/down |
|
||||
| **ORPO** | Paired (chosen + rejected) | Combined SFT + preference, no ref model | Single-stage alignment, saves VRAM |
|
||||
| **SimPO** | Paired (chosen + rejected) | Length-normalized, no ref model | Simple setup, length-robust |
|
||||
|
||||
Default: start with DPO. All methods require `sample_packing: false`.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌──────────────┐ ┌───────────────┐ ┌───────────────┐
|
||||
│ Policy Model │ │ Reference │ │ Preference │
|
||||
│ (trainable) │ │ Model (frozen)│ │ Dataset │
|
||||
└──────┬───────┘ └──────┬────────┘ └──────┬────────┘
|
||||
└──────────┬───────┘ │
|
||||
v │
|
||||
Forward pass on chosen + rejected <─────┘
|
||||
│
|
||||
Preference Loss (DPO/IPO/KTO/...)
|
||||
│
|
||||
Backprop + Update
|
||||
|
||||
Exception: ORPO and SimPO do NOT use a reference model (~50% less VRAM).
|
||||
```
|
||||
|
||||
No vLLM server needed (unlike GRPO). Offline RL with pre-collected preference data.
|
||||
|
||||
## Method Selection
|
||||
|
||||
1. Paired preference data (chosen + rejected)?
|
||||
- Default → `rl: dpo`
|
||||
- Overfitting → `rl: dpo, dpo_loss_type: ["ipo"]`
|
||||
- VRAM-limited → `rl: orpo` (no ref model)
|
||||
- Length-sensitive → `rl: simpo` (no ref model)
|
||||
2. Only binary labels (good/bad)? → `rl: kto`
|
||||
3. Single-stage training (no separate SFT)? → `rl: orpo`
|
||||
|
||||
| | DPO | IPO | KTO | ORPO | SimPO |
|
||||
|---|---|---|---|---|---|
|
||||
| **Reference model** | Yes | Yes | Yes | No | No |
|
||||
| **VRAM overhead** | ~2x model | ~2x model | ~2x model | ~1x model | ~1x model |
|
||||
| **TRL trainer class** | DPOTrainer | DPOTrainer | KTOTrainer | ORPOTrainer | CPOTrainer |
|
||||
|
||||
## Prompt Strategy Resolution
|
||||
|
||||
The `type` field resolves to a Python function:
|
||||
|
||||
```
|
||||
type: "chatml.intel"
|
||||
→ axolotl.prompt_strategies.dpo.chatml.intel(cfg, **kwargs)
|
||||
→ returns transform_fn(sample) → {"prompt", "chosen", "rejected"}
|
||||
|
||||
type: "chat_template.default"
|
||||
→ axolotl.prompt_strategies.dpo.chat_template.default(cfg, dataset_idx, **kwargs)
|
||||
|
||||
type: {"field_prompt": "prompt", ...} (dict)
|
||||
→ axolotl.prompt_strategies.dpo.user_defined.default(...)
|
||||
```
|
||||
|
||||
Module base: `axolotl.prompt_strategies.{rl_method}` — replace `dpo` with `kto` or `orpo`.
|
||||
|
||||
## Healthy Training Indicators
|
||||
|
||||
| Metric | Healthy Range | Problem |
|
||||
|--------|--------------|---------|
|
||||
| `train/loss` | Decreasing, 0.3-0.7 | Flat or increasing = broken data or too high LR |
|
||||
| `rewards/chosen` | Increasing | Flat = model not learning preferences |
|
||||
| `rewards/rejected` | Decreasing | Increasing = model prefers wrong responses |
|
||||
| `rewards/margins` | Positive and increasing | Negative = prefers rejected over chosen |
|
||||
| `rewards/accuracies` | > 0.5, toward 0.7+ | < 0.5 = worse than random |
|
||||
| `logps/rejected` | Decreasing | Increasing = reward hacking |
|
||||
| `grad_norm` | 0.01 - 10.0 | > 100 = exploding gradients |
|
||||
|
||||
Method-specific: DPO/IPO watch `rewards/margins`; KTO loss is noisier; ORPO monitor SFT + odds ratio components; SimPO check length-normalized reward separation.
|
||||
|
||||
## Known Issues
|
||||
|
||||
| Issue | Fix |
|
||||
|-------|-----|
|
||||
| Sample packing crash | Set `sample_packing: false` (required for all preference methods) |
|
||||
| KTO `KeyError: 'label'` | Ensure dataset has boolean `label` column |
|
||||
| ORPO/KTO `KeyError` during tokenization | Add `remove_unused_columns: false` |
|
||||
| ORPO template not applied | ORPO requires explicit `chat_template` setting |
|
||||
| OOM with ref model (DPO/IPO/KTO) | Use LoRA/QLoRA, or switch to ORPO/SimPO (no ref model) |
|
||||
| IPO + label_smoothing | Do not set `dpo_label_smoothing` when `rl: ipo` |
|
||||
|
||||
Full troubleshooting: [training_stability.qmd](../training_stability.qmd)
|
||||
|
||||
## File Map
|
||||
|
||||
```
|
||||
src/axolotl/
|
||||
core/trainers/dpo/ # DPO trainer, args, strategy
|
||||
core/builders/rl.py # HFRLTrainerBuilder — routes rl type → trainer class
|
||||
core/training_args.py # AxolotlKTOConfig, AxolotlORPOConfig, AxolotlCPOConfig
|
||||
prompt_strategies/
|
||||
dpo/ # DPO/IPO/SimPO dataset strategies
|
||||
chat_template.py # chat_template.default, chat_template.argilla_chat
|
||||
chatml.py # chatml.default/intel/icr/argilla_chat/prompt_pairs/ultra
|
||||
llama3.py # llama3 variants (same subtypes as chatml)
|
||||
user_defined.py # Custom field mapping
|
||||
passthrough.py # No transform
|
||||
kto/ # KTO dataset strategies (chatml, llama3, user_defined)
|
||||
orpo/ # ORPO dataset strategies (chat_template.argilla)
|
||||
utils/schemas/enums.py # RLType enum (dpo, ipo, kto, orpo, simpo, grpo, gdpo, ebft)
|
||||
utils/schemas/config.py # All rl/dpo/kto/orpo/simpo config fields
|
||||
|
||||
docs/rlhf.qmd # Full user docs: all dataset formats, config templates
|
||||
docs/choosing_method.qmd # SFT vs DPO vs GRPO decision guide
|
||||
examples/qwen2/dpo.yaml # DPO example
|
||||
examples/llama-3/qlora-1b-kto.yaml # KTO example
|
||||
```
|
||||
@@ -1,75 +0,0 @@
|
||||
# Pretraining / Continual Pretraining — Agent Reference
|
||||
|
||||
Train on raw text with no input masking. Two approaches depending on dataset size.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Continual pretraining on domain-specific corpora
|
||||
- Adapting a base model to a new language or domain before fine-tuning
|
||||
- Pretraining-style data where the entire text is the training signal
|
||||
|
||||
## Choosing an Approach
|
||||
|
||||
| | Non-streaming (`type: completion`) | Streaming (`pretraining_dataset`) |
|
||||
|---|---|---|
|
||||
| **Dataset size** | Fits in memory | Too large to fit in memory |
|
||||
| **Tokenization** | Pre-tokenized before training | On-demand during training |
|
||||
| **Config key** | `datasets:` | `pretraining_dataset:` |
|
||||
| **Long text handling** | Splits texts exceeding `sequence_len` | Concatenates into fixed-length sequences |
|
||||
| **Benefit** | Can preprocess on CPU, transfer to GPU | Start training immediately, no preprocessing |
|
||||
|
||||
## Non-Streaming: `type: completion`
|
||||
|
||||
For smaller datasets that fit in memory. Pre-tokenizes the entire dataset.
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: my_corpus
|
||||
type: completion
|
||||
# field: text # Column name (default: "text")
|
||||
```
|
||||
|
||||
## Streaming: `pretraining_dataset`
|
||||
|
||||
For large corpora. Streams data on-demand without loading everything into memory.
|
||||
|
||||
```yaml
|
||||
pretraining_dataset:
|
||||
- path: HuggingFaceFW/fineweb-edu
|
||||
type: pretrain
|
||||
text_column: text
|
||||
split: train
|
||||
|
||||
max_steps: 1000 # Required — axolotl can't infer dataset size
|
||||
streaming_multipack_buffer_size: 10000 # Buffer for sample packing
|
||||
pretrain_multipack_attn: true # Prevent cross-attention between packed samples
|
||||
```
|
||||
|
||||
`max_steps` is required for streaming — one step = `sequence_len * micro_batch_size * gradient_accumulation_steps * num_gpus` tokens.
|
||||
|
||||
Full streaming docs: [streaming.qmd](../streaming.qmd)
|
||||
|
||||
## Dataset Format
|
||||
|
||||
```json
|
||||
{"text": "The complete document text goes here."}
|
||||
```
|
||||
|
||||
## Key Settings
|
||||
|
||||
- `sample_packing: true` + `pad_to_sequence_len: true` — pack documents into fixed-length sequences
|
||||
- `flash_attention: true` — required for sample packing
|
||||
- No adapter — typically full fine-tune for pretraining
|
||||
- `train_on_inputs: true` — default for completion (all tokens trained on)
|
||||
|
||||
## File Map
|
||||
|
||||
```
|
||||
src/axolotl/
|
||||
prompt_strategies/completion.py # Non-streaming: completion prompt strategy (no masking)
|
||||
utils/data/sft.py # Non-streaming: dataset loading and processing
|
||||
utils/data/streaming.py # Streaming: encode_streaming(), wrap_streaming_dataset()
|
||||
utils/schemas/config.py # Config fields: pretraining_dataset, pretrain_multipack_attn, etc.
|
||||
|
||||
examples/streaming/pretrain.yaml # Full streaming pretraining example config
|
||||
```
|
||||
@@ -1,48 +0,0 @@
|
||||
# Reward Modelling — Agent Reference
|
||||
|
||||
Train models to score responses for use as reward signals in RL. For full docs, see [reward_modelling.qmd](../reward_modelling.qmd).
|
||||
|
||||
## Types
|
||||
|
||||
### Outcome Reward Models (ORM)
|
||||
|
||||
Train a classifier to predict preference over entire interactions. Uses `AutoModelForSequenceClassification`.
|
||||
|
||||
```yaml
|
||||
base_model: google/gemma-2-2b
|
||||
model_type: AutoModelForSequenceClassification
|
||||
num_labels: 1
|
||||
reward_model: true
|
||||
chat_template: gemma
|
||||
datasets:
|
||||
- path: argilla/distilabel-intel-orca-dpo-pairs
|
||||
type: bradley_terry.chat_template
|
||||
```
|
||||
|
||||
Dataset format: `{"system": "...", "input": "...", "chosen": "...", "rejected": "..."}`
|
||||
|
||||
### Process Reward Models (PRM)
|
||||
|
||||
Train a token classifier to score each reasoning step. Uses `AutoModelForTokenClassification`.
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen2.5-3B
|
||||
model_type: AutoModelForTokenClassification
|
||||
num_labels: 2
|
||||
process_reward_model: true
|
||||
datasets:
|
||||
- path: trl-lib/math_shepherd
|
||||
type: stepwise_supervised
|
||||
```
|
||||
|
||||
Dataset format: see [stepwise_supervised.qmd](../dataset-formats/stepwise_supervised.qmd).
|
||||
|
||||
## File Map
|
||||
|
||||
```
|
||||
src/axolotl/
|
||||
core/builders/causal.py # Handles reward_model flag in trainer builder
|
||||
prompt_strategies/bradley_terry/ # Bradley-Terry prompt strategies
|
||||
prompt_strategies/stepwise_supervised.py # PRM dataset strategy
|
||||
utils/schemas/config.py # reward_model, process_reward_model config fields
|
||||
```
|
||||
@@ -1,139 +0,0 @@
|
||||
# SFT — Agent Reference
|
||||
|
||||
Supervised fine-tuning pipeline reference. For config templates and dataset format examples, see [getting-started.qmd](../getting-started.qmd) and [dataset-formats/](../dataset-formats/).
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
YAML Config → axolotl train config.yaml
|
||||
|
||||
1. Load base model (+ quantization if QLoRA/8-bit)
|
||||
2. Apply adapter layers (LoRA/QLoRA) if configured
|
||||
3. Load + tokenize dataset(s)
|
||||
- Apply prompt template (chat_template / alpaca / custom)
|
||||
- Mask inputs (train_on_inputs: false)
|
||||
- Pack samples into sequences (sample_packing: true)
|
||||
4. Training loop (HuggingFace Trainer)
|
||||
- forward → loss → backward → optimizer step → lr scheduler step
|
||||
5. Save model / adapter weights + tokenizer
|
||||
|
||||
Multi-GPU: FSDP or DeepSpeed shards model across GPUs automatically.
|
||||
```
|
||||
|
||||
## Components Required
|
||||
|
||||
1. A YAML config — model, dataset(s), adapter settings, hyperparameters
|
||||
2. A dataset — HuggingFace Hub, local JSONL/JSON/Parquet, or S3/GCS path
|
||||
3. (Optional) A custom prompt strategy — for non-standard dataset formats
|
||||
|
||||
No external server processes needed (unlike GRPO which requires vLLM).
|
||||
|
||||
## Dataset Format Decision Tree
|
||||
|
||||
```
|
||||
Is your data in chat/message format?
|
||||
├─ YES: OpenAI message format (role/content)?
|
||||
│ ├─ YES ──────────────────────> type: chat_template (recommended)
|
||||
│ └─ NO (custom field names) ──> type: chat_template + message_property_mappings
|
||||
└─ NO: Instruction/response pairs?
|
||||
├─ YES ──> type: alpaca (instruction, input, output)
|
||||
└─ NO: Raw text?
|
||||
├─ YES with segments ─────> type: input_output (template-free masking)
|
||||
└─ YES continuous ────────> type: completion (pretraining-style)
|
||||
```
|
||||
|
||||
Full format specs: [dataset-formats/](../dataset-formats/)
|
||||
|
||||
## Model Size to Adapter Choice
|
||||
|
||||
| Model Size | LoRA | QLoRA (4-bit) | Full Fine-Tune | VRAM (approx) |
|
||||
|-----------|------|---------------|----------------|---------------|
|
||||
| 1-3B | Preferred | Low-budget option | Single GPU OK | 8-16 GB (LoRA) |
|
||||
| 7-8B | Preferred | Good balance | Needs multi-GPU | 16-24 GB (LoRA) |
|
||||
| 13-14B | Preferred | Good balance | Multi-GPU required | 24-40 GB (LoRA) |
|
||||
| 30-70B | LoRA or QLoRA | Preferred for single GPU | Multi-node | 40-80 GB (QLoRA) |
|
||||
|
||||
## Hyperparameter Ranges
|
||||
|
||||
| Parameter | LoRA | QLoRA | Full FT |
|
||||
|-----------|------|-------|---------|
|
||||
| `learning_rate` | 1e-4 to 3e-4 | 1e-4 to 3e-4 | 1e-5 to 5e-5 |
|
||||
| `lora_r` | 16-64 | 16-64 | N/A |
|
||||
| `lora_alpha` | 1-2x `lora_r` | 1-2x `lora_r` | N/A |
|
||||
| `micro_batch_size` | 2-8 | 2-4 | 1-2 |
|
||||
| `gradient_accumulation_steps` | 2-8 | 4-16 | 4-16 |
|
||||
| `num_epochs` | 1-3 | 1-3 | 1-3 |
|
||||
| `optimizer` | `adamw_8bit` | `adamw_bnb_8bit` | `adamw_torch_fused` |
|
||||
|
||||
Effective batch = micro_batch * grad_accum * num_gpus. Lower LR for larger models.
|
||||
|
||||
## Healthy Training Indicators
|
||||
|
||||
| Metric | Healthy | Problem |
|
||||
|--------|---------|---------|
|
||||
| `train_loss` | Decreasing, starting ~2-4 for chat models | Flat or increasing from step 1 — data or LR issue |
|
||||
| `eval_loss` | Decreasing, tracks train_loss | Increasing while train_loss decreases — overfitting |
|
||||
| `grad_norm` | 0.1-10, relatively stable | Spikes >100 — instability. 0.0 — frozen weights |
|
||||
| `learning_rate` | Follows scheduler curve | Flat or NaN — config issue |
|
||||
|
||||
Watch for: loss never decreasing (check `train_on_inputs`, dataset, LR), loss goes to 0 quickly (overfitting), eval_loss diverging (reduce epochs, add regularization). See [training_stability.qmd](../training_stability.qmd).
|
||||
|
||||
## Known Issues
|
||||
|
||||
| Issue | Fix |
|
||||
|-------|-----|
|
||||
| OOM during training | Reduce `micro_batch_size`, enable `gradient_checkpointing`, reduce `sequence_len` |
|
||||
| `sample_packing` + SDPA + bf16 = 0.0 loss | Use `attn_implementation: flash_attention_2` or disable `sample_packing` |
|
||||
| Missing chat template error | Set `chat_template: chatml` explicitly |
|
||||
| Label masking wrong | Run `axolotl preprocess config.yaml --debug` and inspect labels |
|
||||
| Loss NaN | Use `bf16: auto`, lower LR, check data for empty samples |
|
||||
| Tokenizer pad token / infinite loss | Set `special_tokens: pad_token: "<\|end_of_text\|>"` |
|
||||
| FSDP save hangs | Use `fsdp_state_dict_type: FULL_STATE_DICT` |
|
||||
| DeepSpeed CheckpointError | Set `use_reentrant: true` in `gradient_checkpointing_kwargs` |
|
||||
|
||||
## Profiling
|
||||
|
||||
To profile training and identify optimization opportunities:
|
||||
|
||||
```yaml
|
||||
# Profile steps 3-7 (after warmup/autotuning settles)
|
||||
profiler_steps_start: 3
|
||||
profiler_steps: 5
|
||||
```
|
||||
|
||||
This produces `profiler_trace.json` (Chrome trace) and `snapshot.pickle` (memory snapshot) in `output_dir`.
|
||||
View the Chrome trace at `chrome://tracing`.
|
||||
|
||||
To programmatically inspect the trace:
|
||||
```bash
|
||||
python scripts/analyze_profile.py output_dir/
|
||||
```
|
||||
|
||||
The trace shows per-kernel CUDA times, memory allocations, and operator-level breakdown. Look for:
|
||||
- **Large matmul kernels**: candidates for fusion or quantization
|
||||
- **Memory copies (H2D/D2H)**: unnecessary data movement
|
||||
- **Small frequent kernels**: candidates for kernel fusion
|
||||
- **Gaps between kernels**: pipeline bubbles from CPU overhead
|
||||
|
||||
Full troubleshooting: [training_stability.qmd](../training_stability.qmd), [debugging.qmd](../debugging.qmd)
|
||||
|
||||
## File Map
|
||||
|
||||
```
|
||||
src/axolotl/
|
||||
cli/train.py # Entry point for `axolotl train`
|
||||
cli/preprocess.py # Entry point for `axolotl preprocess`
|
||||
core/builders/causal.py # HFCausalTrainerBuilder — wires config → SFT trainer
|
||||
core/trainers/base.py # AxolotlTrainer — base trainer class
|
||||
core/trainers/mixins/ # Packing, optimizer, scheduler, checkpoints
|
||||
prompt_strategies/ # Format handlers: chat_template, alpaca, completion, input_output
|
||||
utils/schemas/config.py # AxolotlInputConfig — main config schema
|
||||
utils/schemas/datasets.py # SFTDataset, DatasetConfig
|
||||
utils/schemas/peft.py # LoraConfig — LoRA parameters
|
||||
integrations/liger/ # Liger kernel plugin
|
||||
|
||||
examples/llama-3/ # LoRA, QLoRA, full FT example configs
|
||||
docs/getting-started.qmd # Quickstart with config templates
|
||||
docs/optimizations.qmd # Flash attention, gradient checkpointing, sample packing
|
||||
docs/multi-gpu.qmd # FSDP and DeepSpeed setup
|
||||
```
|
||||
@@ -3,71 +3,28 @@ title: Attention
|
||||
description: Supported attention modules in Axolotl
|
||||
---
|
||||
|
||||
Axolotl routes attention via a single config field:
|
||||
## SDP Attention
|
||||
|
||||
This is the default built-in attention in PyTorch.
|
||||
|
||||
```yaml
|
||||
attn_implementation: <backend>
|
||||
sdp_attention: true
|
||||
```
|
||||
|
||||
`attn_implementation` is passed through to `transformers` verbatim (via
|
||||
`model.config._attn_implementation`). Accepted values are the HF-native
|
||||
backends, axolotl-registered backends, or a hub-kernel path.
|
||||
For more details: [PyTorch docs](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
|
||||
## Backends
|
||||
## Flash Attention
|
||||
|
||||
| `attn_implementation` | Description |
|
||||
|---|---|
|
||||
| `eager` | Plain PyTorch attention. No packing support. |
|
||||
| `sdpa` | PyTorch `scaled_dot_product_attention`. No packing support. |
|
||||
| `flash_attention_2` | Dao-AILab Flash Attention 2. |
|
||||
| `flash_attention_3` | Dao-AILab Flash Attention 3 (Hopper+). |
|
||||
| `flex_attention` | Torch Flex Attention (requires torch ≥ 2.6). |
|
||||
| `xformers` | xFormers memory-efficient attention. |
|
||||
| `sage` | SageAttention (QK int8 / PV fp16). |
|
||||
| `s2` | Shifted-Sparse Attention (LLaMA only, FA2 under the hood). |
|
||||
| `fp8` | torchao FP8 low-precision attention (requires SM90+, torch ≥ 2.11). Loaded as SDPA and patched post-load. |
|
||||
| `kernels-community/flash-attn3` | HF hub FA3 kernel. |
|
||||
| `kernels-community/sage-attention` | HF hub SageAttention kernel. |
|
||||
| Other `<org>/<name>` path | Any hub-kernel path supported by `transformers`. |
|
||||
|
||||
Short-form aliases (`flash`, `fa2`, `flex`, `sdp`, etc.) are **not accepted** —
|
||||
set the canonical name above.
|
||||
|
||||
### Capability flags
|
||||
|
||||
Axolotl derives three boolean capability flags from `attn_implementation` and
|
||||
exposes them on the validated config:
|
||||
|
||||
- `cfg.attn_supports_packing` — backend supports varlen sample packing via
|
||||
`position_ids`. Gates multipack patches and `sample_packing_drop_attention_mask`.
|
||||
- `cfg.attn_uses_flash_lib` — backend needs the `flash_attn` (Dao-AILab)
|
||||
monkeypatches (FA4 auto, LLaMA flash hijack, ring-FA).
|
||||
- `cfg.attn_needs_dtype_cast` — backend requires fp16/bf16 embeddings
|
||||
(everything except `eager` and `sdpa`).
|
||||
|
||||
These are **computed** — they cannot be overridden from YAML.
|
||||
|
||||
## Per-backend notes
|
||||
|
||||
### SDPA
|
||||
|
||||
Default PyTorch attention. See
|
||||
[PyTorch docs](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html).
|
||||
Axolotl supports Flash Attention 2, 3, and 4. The best available version is used automatically
|
||||
based on your installed packages and GPU.
|
||||
|
||||
```yaml
|
||||
attn_implementation: sdpa
|
||||
flash_attention: true
|
||||
```
|
||||
|
||||
### Flash Attention
|
||||
For more details: [Flash Attention](https://github.com/Dao-AILab/flash-attention/)
|
||||
|
||||
Axolotl supports FA2, FA3, and FA4. The best available version is used
|
||||
automatically based on your installed packages and GPU.
|
||||
|
||||
```yaml
|
||||
attn_implementation: flash_attention_2 # or flash_attention_3
|
||||
```
|
||||
|
||||
#### Flash Attention 2
|
||||
### Flash Attention 2
|
||||
|
||||
Requirements: Ampere, Ada, or Hopper GPUs (Turing or lower not supported)
|
||||
|
||||
@@ -82,20 +39,20 @@ Alternatively, try reinstall or downgrade a version.
|
||||
|
||||
:::
|
||||
|
||||
#### Flash Attention 3
|
||||
### Flash Attention 3
|
||||
|
||||
Requirements: Hopper only and CUDA 12.8 (recommended)
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Dao-AILab/flash-attention.git
|
||||
cd flash-attention/hopper
|
||||
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
#### Flash Attention 4
|
||||
### Flash Attention 4
|
||||
|
||||
Requirements: Hopper or Blackwell GPUs. Auto-applied when `attn_uses_flash_lib`
|
||||
is true and FA4 is importable.
|
||||
Requirements: Hopper or Blackwell GPUs
|
||||
|
||||
```bash
|
||||
pip install flash-attn-4
|
||||
@@ -106,6 +63,7 @@ Or from source:
|
||||
```bash
|
||||
git clone https://github.com/Dao-AILab/flash-attention.git
|
||||
cd flash-attention/flash_attn/cute
|
||||
|
||||
pip install -e .
|
||||
|
||||
# FA2's flash_attn package includes a cute/ stub that shadows FA4.
|
||||
@@ -128,113 +86,93 @@ and falls back to FA2/3.
|
||||
|
||||
:::
|
||||
|
||||
For more details: [flash-attention/flash_attn/cute](https://github.com/Dao-AILab/flash-attention/tree/main/flash_attn/cute)
|
||||
|
||||
### AMD
|
||||
|
||||
Requirements: ROCm 6.0 and above. See
|
||||
[Flash Attention AMD docs](https://github.com/Dao-AILab/flash-attention/tree/main?tab=readme-ov-file#amd-rocm-support).
|
||||
Requirements: ROCm 6.0 and above.
|
||||
|
||||
### Flex Attention
|
||||
See [Flash Attention AMD docs](https://github.com/Dao-AILab/flash-attention/tree/main?tab=readme-ov-file#amd-rocm-support).
|
||||
|
||||
## Flex Attention
|
||||
|
||||
A flexible PyTorch API for attention used in combination with `torch.compile`.
|
||||
|
||||
```yaml
|
||||
attn_implementation: flex_attention
|
||||
torch_compile: true # recommended
|
||||
flex_attention: true
|
||||
|
||||
# recommended
|
||||
torch_compile: true
|
||||
```
|
||||
|
||||
Requires torch ≥ 2.6. See [PyTorch docs](https://pytorch.org/blog/flexattention/).
|
||||
::: {.callout-note}
|
||||
|
||||
### SageAttention
|
||||
We recommend using latest stable version of PyTorch for best performance.
|
||||
|
||||
Requirements: Ampere, Ada, or Hopper GPUs.
|
||||
:::
|
||||
|
||||
For more details: [PyTorch docs](https://pytorch.org/blog/flexattention/)
|
||||
|
||||
## SageAttention
|
||||
|
||||
Attention kernels with QK Int8 and PV FP16 accumulator.
|
||||
|
||||
```yaml
|
||||
attn_implementation: sage
|
||||
sage_attention: true
|
||||
```
|
||||
|
||||
Requirements: Ampere, Ada, or Hopper GPUs
|
||||
|
||||
```bash
|
||||
pip install sageattention==2.2.0 --no-build-isolation
|
||||
```
|
||||
|
||||
::: {.callout-warning}
|
||||
|
||||
Only LoRA/QLoRA recommended. Full finetuning has been observed to drop loss to 0. See
|
||||
[GitHub Issue](https://github.com/thu-ml/SageAttention/issues/198).
|
||||
Only LoRA/QLoRA recommended at the moment. We found loss drop to 0 for full finetuning. See [GitHub Issue](https://github.com/thu-ml/SageAttention/issues/198).
|
||||
|
||||
:::
|
||||
|
||||
For more details: [Sage Attention](https://github.com/thu-ml/SageAttention).
|
||||
For more details: [Sage Attention](https://github.com/thu-ml/SageAttention)
|
||||
|
||||
### xFormers
|
||||
::: {.callout-note}
|
||||
|
||||
We do not support SageAttention 3 at the moment. If you are interested on adding this or improving SageAttention implementation, please make an Issue.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
## xFormers
|
||||
|
||||
```yaml
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
|
||||
Recommended for Turing GPUs or below (e.g. Colab T4).
|
||||
We recommend using with Turing GPUs or below (such as on Colab).
|
||||
|
||||
:::
|
||||
|
||||
### Shifted Sparse Attention
|
||||
For more details: [xFormers](https://github.com/facebookresearch/xformers)
|
||||
|
||||
## Shifted Sparse Attention
|
||||
|
||||
::: {.callout-warning}
|
||||
|
||||
Planned for deprecation. Prefer one of the backends above.
|
||||
We plan to deprecate this! If you use this feature, we recommend switching to methods above.
|
||||
|
||||
:::
|
||||
|
||||
Requirements: LLaMA model architecture. Loaded as FA2 under the hood and
|
||||
patched to implement shifted-sparse attention. Does not support sample packing.
|
||||
Requirements: LLaMA model architecture
|
||||
|
||||
```yaml
|
||||
attn_implementation: s2
|
||||
flash_attention: true
|
||||
s2_attention: true
|
||||
```
|
||||
|
||||
### FP8
|
||||
::: {.callout-tip}
|
||||
|
||||
torchao low-precision attention. Loaded as SDPA and patched post-load.
|
||||
|
||||
Requirements: SM90+ (Hopper/Blackwell), PyTorch ≥ 2.11, torchao ≥ 0.17,
|
||||
flash-attn with FA3. KV caching must be disabled.
|
||||
|
||||
```yaml
|
||||
attn_implementation: fp8
|
||||
```
|
||||
|
||||
### Hub kernels
|
||||
|
||||
```yaml
|
||||
attn_implementation: kernels-community/flash-attn3
|
||||
```
|
||||
|
||||
Passed through to `transformers`; axolotl does not install the kernel itself.
|
||||
For recognized hub paths the capability flags are set automatically; for
|
||||
arbitrary paths axolotl uses conservative defaults (`attn_supports_packing=False`,
|
||||
`attn_uses_flash_lib=False`).
|
||||
|
||||
## Migrating from legacy boolean flags
|
||||
|
||||
The following legacy config fields are **deprecated** and will be removed in a
|
||||
future release. Each emits a `DeprecationWarning` when set and is stripped from
|
||||
the validated config.
|
||||
|
||||
| Legacy | Canonical |
|
||||
|---|---|
|
||||
| `flash_attention: true` | `attn_implementation: flash_attention_2` |
|
||||
| `sdp_attention: true` | `attn_implementation: sdpa` |
|
||||
| `xformers_attention: true` | `attn_implementation: xformers` |
|
||||
| `flex_attention: true` | `attn_implementation: flex_attention` |
|
||||
| `sage_attention: true` | `attn_implementation: sage` |
|
||||
| `s2_attention: true` | `attn_implementation: s2` |
|
||||
| `eager_attention: true` | `attn_implementation: eager` |
|
||||
|
||||
Combining `attn_implementation` with a legacy flag (e.g. `attn_implementation:
|
||||
flash_attention_2` **and** `flash_attention: true`) raises — pick one.
|
||||
|
||||
::: {.callout-note}
|
||||
|
||||
Existing example configs under `examples/` still use the legacy flags. They
|
||||
continue to work with a deprecation warning; they will be migrated in a
|
||||
follow-up pass.
|
||||
No sample packing support!
|
||||
|
||||
:::
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
---
|
||||
title: "Which Fine-Tuning Method Should I Use?"
|
||||
description: "A decision guide for choosing the right fine-tuning method, adapter, and hardware configuration in Axolotl."
|
||||
format:
|
||||
html:
|
||||
toc: true
|
||||
toc-depth: 3
|
||||
number-sections: true
|
||||
execute:
|
||||
enabled: false
|
||||
---
|
||||
|
||||
## Overview {#sec-overview}
|
||||
|
||||
Axolotl supports four broad categories of fine-tuning, each suited to different data types, objectives, and resource constraints.
|
||||
|
||||
| Method | What It Does | Data You Need |
|
||||
|--------|-------------|---------------|
|
||||
| **Supervised Fine-Tuning (SFT)** | Teaches the model to produce specific outputs given inputs | Input-output pairs (instructions, conversations, completions) |
|
||||
| **Preference Learning (DPO/KTO/ORPO)** | Steers the model toward preferred outputs and away from dispreferred ones | Chosen/rejected response pairs (DPO, ORPO) or binary labels (KTO) |
|
||||
| **Reinforcement Learning (GRPO)** | Optimizes the model against a reward signal through online generation | A reward function (code or model-based) and a prompt dataset |
|
||||
| **Reward Modeling** | Trains a model to score responses, for use as a reward signal in RL | Preference pairs ranked by quality |
|
||||
|
||||
Each method is configured through a YAML file with `rl: <method>` (or omitted for SFT). All methods support LoRA, QLoRA, and full fine-tuning unless otherwise noted.
|
||||
|
||||
## Decision Tree {#sec-decision-tree}
|
||||
|
||||
Use the following flowchart to choose your method. Start at the top and follow the path that matches your situation.
|
||||
|
||||
```
|
||||
Do you have a reward function (code-based or model-based)?
|
||||
├── YES
|
||||
│ └── Use GRPO (rl: grpo)
|
||||
│ The model generates its own completions and learns from reward scores.
|
||||
│ Best for: math, code, reasoning, tasks with verifiable answers.
|
||||
│ See: rlhf.qmd#grpo
|
||||
│
|
||||
└── NO
|
||||
│
|
||||
Do you have preference pairs (chosen vs. rejected responses)?
|
||||
├── YES
|
||||
│ │
|
||||
│ Are they paired (same prompt, one chosen, one rejected)?
|
||||
│ ├── YES → Use DPO (rl: dpo)
|
||||
│ │ Direct optimization without a separate reward model.
|
||||
│ │ See: rlhf.qmd#dpo
|
||||
│ │
|
||||
│ └── NO (only binary good/bad labels)
|
||||
│ └── Use KTO (rl: kto)
|
||||
│ Works with unpaired preference data.
|
||||
│ See: rlhf.qmd#kto
|
||||
│
|
||||
└── NO
|
||||
│
|
||||
Do you have input-output examples?
|
||||
├── YES → Use SFT
|
||||
│ The simplest and most common method.
|
||||
│ See: getting-started.qmd
|
||||
│
|
||||
└── NO
|
||||
└── You need to create training data first.
|
||||
Consider generating preference pairs with an LLM judge,
|
||||
or writing a reward function for GRPO.
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
**When in doubt, start with SFT.** It is the most straightforward method and works well for most tasks. You can always move to preference learning or RL later to further refine behavior.
|
||||
:::
|
||||
|
||||
### Method Comparison at a Glance
|
||||
|
||||
| Criterion | SFT | DPO | KTO | GRPO |
|
||||
|-----------|-----|-----|-----|------|
|
||||
| Data complexity | Low (input-output pairs) | Medium (preference pairs) | Medium (binary labels) | Low (prompts + reward code) |
|
||||
| Compute cost | Low | Medium | Medium | High (requires vLLM server) |
|
||||
| Learning signal | Supervised | Contrastive | Contrastive | Online reward |
|
||||
| Online generation | No | No | No | Yes |
|
||||
| Reward model needed | No | No | No | No (uses reward functions) |
|
||||
| Best for | Task adaptation, instruction following | Safety, style alignment | Unpaired preference data | Reasoning, math, code |
|
||||
|
||||
::: {.callout-note}
|
||||
**ORPO** is an alternative to DPO that combines SFT and preference optimization in a single training stage, removing the need for a separate SFT step. Configure with `rl: orpo`. See [rlhf.qmd](rlhf.qmd) for details.
|
||||
:::
|
||||
|
||||
## Adapter Selection {#sec-adapter-selection}
|
||||
|
||||
Once you have chosen a method, decide how to apply the parameter updates. The three main options trade off VRAM usage against model quality.
|
||||
|
||||
### QLoRA
|
||||
|
||||
- **How it works**: The base model is loaded in 4-bit (NF4) quantization. Small low-rank adapter matrices are trained in higher precision on top.
|
||||
- **VRAM savings**: Roughly 4x reduction in model memory compared to full fine-tuning.
|
||||
- **Quality**: Slight degradation due to quantization noise, but often negligible for task-specific fine-tuning.
|
||||
- **When to use**: When your GPU cannot fit the model in full precision, or when you want fast experimentation.
|
||||
|
||||
```yaml
|
||||
adapter: qlora
|
||||
load_in_4bit: true
|
||||
lora_r: 32
|
||||
lora_alpha: 64
|
||||
lora_target_linear: true
|
||||
```
|
||||
|
||||
### LoRA
|
||||
|
||||
- **How it works**: The base model is loaded at full precision (or 8-bit). Low-rank adapter matrices are trained alongside.
|
||||
- **VRAM savings**: Roughly 2-3x reduction compared to full fine-tuning (model weights are frozen, only adapters + optimizer states for adapters are stored).
|
||||
- **Quality**: Very close to full fine-tuning for most tasks, especially with higher rank values.
|
||||
- **When to use**: When you have enough VRAM for the base model but not for full optimizer states.
|
||||
|
||||
```yaml
|
||||
adapter: lora
|
||||
lora_r: 32
|
||||
lora_alpha: 64
|
||||
lora_target_linear: true
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
For GRPO training, LoRA is strongly recommended. The vLLM server needs to sync weights from the trainer, and LoRA sync (`trl.vllm_lora_sync: true`) is far more efficient than syncing full merged weights. See [vLLM Serving](vllm_serving.qmd) for details.
|
||||
:::
|
||||
|
||||
### Full Fine-Tuning
|
||||
|
||||
- **How it works**: All model parameters are updated during training. No adapters.
|
||||
- **VRAM savings**: None. Requires memory for model weights, gradients, and optimizer states (roughly 4x model size in bf16 with AdamW).
|
||||
- **Quality**: Highest potential quality, especially for large distribution shifts.
|
||||
- **When to use**: When you have ample GPU memory or multi-GPU setups, and need maximum performance. Also required for pre-training.
|
||||
|
||||
```yaml
|
||||
# No adapter or load_in_* lines needed
|
||||
micro_batch_size: 1
|
||||
gradient_accumulation_steps: 16
|
||||
```
|
||||
|
||||
### Quick Comparison
|
||||
|
||||
| | QLoRA | LoRA | Full |
|
||||
|---|---|---|---|
|
||||
| Trainable params | ~0.1-1% | ~0.1-1% | 100% |
|
||||
| Model memory | ~25% of full | ~50-100% of full | 100% |
|
||||
| Optimizer memory | Tiny (adapters only) | Tiny (adapters only) | 2x model size (AdamW) |
|
||||
| Training speed | Slower (dequantization overhead) | Baseline | Faster per-step (no adapter overhead) |
|
||||
| Inference | Merge or serve with adapter | Merge or serve with adapter | Direct |
|
||||
| Multi-GPU required? | Rarely | For 13B+ models | For 7B+ models |
|
||||
|
||||
## Hardware Mapping {#sec-hardware-mapping}
|
||||
|
||||
The tables below provide approximate GPU memory requirements. Actual usage depends on context length, batch size, and optimizer choice.
|
||||
|
||||
### SFT / Preference Learning
|
||||
|
||||
| Model Size | QLoRA (4-bit) | LoRA (bf16) | Full (bf16 + AdamW) |
|
||||
|------------|--------------|-------------|---------------------|
|
||||
| 1-3B | 6-8 GB | 8-12 GB | 24-32 GB |
|
||||
| 7-8B | 10-14 GB | 16-24 GB | 60-80 GB |
|
||||
| 13-14B | 16-20 GB | 28-40 GB | 120+ GB |
|
||||
| 30-34B | 24-32 GB | 64-80 GB | 2-4x 80 GB |
|
||||
| 70-72B | 40-48 GB | 2x 80 GB | 4-8x 80 GB |
|
||||
|
||||
::: {.callout-important}
|
||||
These estimates assume a short context length (512-2048 tokens) and micro_batch_size of 1-2. Longer sequences and larger batches increase memory significantly due to activations. Use [gradient checkpointing](gradient_checkpointing.qmd) to reduce activation memory at the cost of ~30% slower training.
|
||||
:::
|
||||
|
||||
### GRPO (RL Training)
|
||||
|
||||
GRPO requires additional GPU(s) for the vLLM generation server. Plan for at least two GPUs: one for training, one for vLLM.
|
||||
|
||||
| Model Size | Training GPU (LoRA, bf16) | vLLM GPU | Total GPUs |
|
||||
|------------|--------------------------|----------|------------|
|
||||
| 0.5-3B | 1x 24 GB | 1x 24 GB | 2x 24 GB |
|
||||
| 7-8B | 1x 80 GB | 1x 80 GB | 2x 80 GB |
|
||||
| 13-14B | 1-2x 80 GB | 1-2x 80 GB | 2-4x 80 GB |
|
||||
| 30-72B | 2-4x 80 GB (FSDP/DeepSpeed) | 2-4x 80 GB (tensor parallel) | 4-8x 80 GB |
|
||||
|
||||
::: {.callout-tip}
|
||||
For single-GPU GRPO, use `vllm_mode: colocate` with `vllm_enable_sleep_mode: true`. The vLLM engine shares the GPU and offloads VRAM when not generating. This works for smaller models (up to ~3B on a 24 GB GPU) but is slower than the two-GPU server mode.
|
||||
:::
|
||||
|
||||
### Multi-GPU Threshold
|
||||
|
||||
You need multi-GPU training when:
|
||||
|
||||
- **Full fine-tuning** of models 7B+ (use FSDP or DeepSpeed ZeRO)
|
||||
- **LoRA** of models 30B+ (or 13B+ with long contexts)
|
||||
- **GRPO** almost always (separate vLLM server), unless using colocate mode
|
||||
|
||||
See [Multi-GPU Training](multi-gpu.qmd) for FSDP and DeepSpeed configuration.
|
||||
|
||||
## Quick Links {#sec-quick-links}
|
||||
|
||||
| Method | Config Key | Documentation | Example Config |
|
||||
|--------|-----------|---------------|----------------|
|
||||
| SFT | *(default, no `rl:` key)* | [Getting Started](getting-started.qmd) | `examples/llama-3/lora-1b.yml` |
|
||||
| DPO | `rl: dpo` | [RLHF - DPO](rlhf.qmd#dpo) | See rlhf.qmd |
|
||||
| KTO | `rl: kto` | [RLHF - KTO](rlhf.qmd#kto) | See rlhf.qmd |
|
||||
| ORPO | `rl: orpo` | [RLHF - ORPO](rlhf.qmd#orpo) | See rlhf.qmd |
|
||||
| GRPO | `rl: grpo` | [RLHF - GRPO](rlhf.qmd#grpo), [vLLM Serving](vllm_serving.qmd) | See rlhf.qmd |
|
||||
| Reward Modeling | `rl: reward_trainer` | [Reward Modelling](reward_modelling.qmd) | See reward_modelling.qmd |
|
||||
|
||||
### Related Guides
|
||||
|
||||
- [Configuration Reference](config-reference.qmd) -- Full list of all config options
|
||||
- [Dataset Formats](dataset-formats) -- How to structure your training data
|
||||
- [Optimizations](optimizations.qmd) -- Flash attention, gradient checkpointing, mixed precision
|
||||
- [Multi-GPU Training](multi-gpu.qmd) -- FSDP and DeepSpeed setup
|
||||
- [vLLM Serving](vllm_serving.qmd) -- Setting up vLLM for GRPO training
|
||||
@@ -108,14 +108,6 @@ datasets:
|
||||
type: chat_template
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
`chat_template_jinja` also accepts a file path to a `.jinja2` file instead of an inline string:
|
||||
|
||||
```yaml
|
||||
chat_template_jinja: ./path/to/my_template.jinja2
|
||||
```
|
||||
:::
|
||||
|
||||
::: {.callout-important}
|
||||
Please make sure that your `tokenizer.eos_token` is same as EOS (End-of-Sequence) token in template. Otherwise, set `eos_token` under `special_tokens: `.
|
||||
:::
|
||||
@@ -302,113 +294,6 @@ datasets:
|
||||
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
||||
:::
|
||||
|
||||
#### Content parts with per-part training control
|
||||
|
||||
Instead of using character offsets with `train_detail`, you can split a message's content into a list of parts, each with its own training flag. This is useful when you want to mask specific sections of a response (e.g., mask reasoning but train on the answer).
|
||||
|
||||
```{.json filename="data.jsonl"}
|
||||
{
|
||||
"messages": [
|
||||
{"role": "user", "content": [{"type": "text", "text": "What is 2+2?"}]},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{"type": "text", "text": "Let me think step by step...", "train": false},
|
||||
{"type": "text", "text": " The answer is 4.", "train": true}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The configuration is the same as standard `chat_template` — no extra fields needed:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: ...
|
||||
type: chat_template
|
||||
roles_to_train: ["assistant"]
|
||||
```
|
||||
|
||||
Each content part supports:
|
||||
|
||||
- `type`: `"text"` (required)
|
||||
- `text`: the text value (also accepts `content` or `value` as the key)
|
||||
- `train`: `true`/`false` (optional) — whether to train on this part
|
||||
- `weight`: `0`/`1` (optional) — alternative to `train`
|
||||
|
||||
If a part has no `train` or `weight` flag, it inherits the turn-level training decision (from `roles_to_train`, `message_field_training`, or `train_on_inputs`).
|
||||
|
||||
::: {.callout-warning title="Whitespace at part boundaries"}
|
||||
BPE tokenizers (used by Llama, Qwen, Mistral, GPT, etc.) prepend spaces to word tokens. For example, `" answer"` is a single token — the space is part of it. This means **where you place whitespace between content parts matters**:
|
||||
|
||||
**Split BEFORE spaces** (space goes with the next part):
|
||||
|
||||
```json
|
||||
[
|
||||
{"type": "text", "text": "Let me think...", "train": false},
|
||||
{"type": "text", "text": " The answer is 4.", "train": true}
|
||||
]
|
||||
```
|
||||
|
||||
**DON'T put trailing spaces** on a part (the space merges with the next word into one token that straddles the boundary, and straddling tokens are masked):
|
||||
|
||||
```json
|
||||
[
|
||||
{"type": "text", "text": "Let me think... ", "train": false},
|
||||
{"type": "text", "text": "The answer is 4.", "train": true}
|
||||
]
|
||||
```
|
||||
|
||||
In the bad example, `" The"` becomes a single token that spans both parts. Because it straddles the boundary, it is conservatively **masked** (not trained) — even though the second part has `train: true`.
|
||||
|
||||
**Newlines** typically merge with preceding punctuation (e.g., `":\n"` is one token). Keep newlines with the preceding part:
|
||||
|
||||
```json
|
||||
[
|
||||
{"type": "text", "text": "Thinking:\n", "train": false},
|
||||
{"type": "text", "text": "The answer is 4.", "train": true}
|
||||
]
|
||||
```
|
||||
|
||||
Axolotl will log a warning if it detects trailing whitespace at a boundary between parts with different training flags.
|
||||
:::
|
||||
|
||||
::: {.callout-note}
|
||||
When all content parts in a message are strings, they are concatenated before being passed to the chat template. This means content parts work with **any** Jinja template — the template sees a plain string, and the per-part training flags are applied during tokenization.
|
||||
:::
|
||||
|
||||
##### Per-part training on reasoning_content
|
||||
|
||||
For templates that support a separate `reasoning_content` field (e.g., `qwen3`), the same content-parts format works on `reasoning_content`. This is useful for masking incorrect reasoning steps while training on self-corrections:
|
||||
|
||||
```{.json filename="data.jsonl"}
|
||||
{
|
||||
"messages": [
|
||||
{"role": "user", "content": [{"type": "text", "text": "What is 2+2?"}]},
|
||||
{
|
||||
"role": "assistant",
|
||||
"reasoning_content": [
|
||||
{"type": "text", "text": "Hmm maybe 2+2=5.", "train": false},
|
||||
{"type": "text", "text": " Wait no, 2+2=4.", "train": true}
|
||||
],
|
||||
"content": [
|
||||
{"type": "text", "text": "The answer is 4.", "train": true}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `reasoning_content` and `content` fields are handled independently — each has its own token boundaries and per-part masking. No additional configuration is needed beyond what the template already requires.
|
||||
|
||||
::: {.callout-tip}
|
||||
When `reasoning_content` is provided as a separate field, `split_thinking` is not needed — the reasoning is already separated from the content in the data.
|
||||
:::
|
||||
|
||||
The same whitespace rules apply to `reasoning_content` parts as to `content` parts — split before spaces, keep newlines with the preceding part.
|
||||
|
||||
|
||||
#### Reasoning split
|
||||
|
||||
(For Qwen3 template only) Enable reasoning split, where the reasoning is split from the content and passed as a separate field into the template.
|
||||
|
||||
@@ -22,47 +22,90 @@ For `pretraining_dataset:` specifically, please refer to the [Pre-training secti
|
||||
|
||||
## Pre-training
|
||||
|
||||
Pre-training trains on raw text corpora with no input masking. The dataset format is simple:
|
||||
When aiming to train on large corpora of text datasets, pre-training is your go-to choice. Due to the size of these datasets, downloading the entire-datasets before beginning training would be prohibitively time-consuming. Axolotl supports [streaming](https://huggingface.co/docs/datasets/en/stream) to only load batches into memory at a time.
|
||||
|
||||
A sample format for a pre-training dataset is as follows:
|
||||
|
||||
```json
|
||||
{"text": "first row"}
|
||||
{"text": "second row"}
|
||||
...
|
||||
```
|
||||
|
||||
Axolotl supports two approaches:
|
||||
It is typically recommended to save your dataset as `.jsonl` due to its flexibility and simplicity.
|
||||
|
||||
### Streaming (large datasets)
|
||||
Axolotl supports loading from a Hugging Face hub repo or from local files.
|
||||
|
||||
For large corpora that don't fit in memory, use `pretraining_dataset` with [streaming](../streaming.qmd). Data is tokenized on-demand during training.
|
||||
### Pre-training from Hugging Face hub datasets
|
||||
|
||||
As an example, to train using a Hugging Face dataset `hf_org/name`, you can pass the following config:
|
||||
|
||||
```yaml
|
||||
pretraining_dataset: hf_org/name
|
||||
```
|
||||
|
||||
### Pre-training from local dataset files
|
||||
|
||||
Given a few corpus files: `A.jsonl`, `B.jsonl`, and `C.jsonl`, your config will look like the below:
|
||||
|
||||
```yaml
|
||||
pretraining_dataset:
|
||||
- path: HuggingFaceFW/fineweb-edu
|
||||
type: pretrain
|
||||
text_column: text
|
||||
split: train
|
||||
- path: json
|
||||
data_files:
|
||||
- A.jsonl
|
||||
- B.jsonl
|
||||
- C.jsonl
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
Streaming requires `max_steps` in your config — Axolotl cannot infer the dataset size. One step = `sequence_len * micro_batch_size * gradient_accumulation_steps * num_gpus` tokens.
|
||||
:::
|
||||
While we recommend `.jsonl`, you can also use the other formats (`csv`, `parquet`, `arrow`, `SQL`, `Webdataset`) that are supported by [`Dataset.load_dataset`](https://huggingface.co/docs/datasets/loading#local-and-remote-files)
|
||||
|
||||
See [Streaming Datasets](../streaming.qmd) for full configuration details.
|
||||
### Pre-training without streaming
|
||||
|
||||
### Non-streaming (smaller datasets)
|
||||
In the case that the dataset is small and can be loaded entirely into memory, another approach to running pre-training is to use the `completion` format. This would mean that the entire dataset is pre-tokenized instead of on-demand in streaming.
|
||||
|
||||
For datasets that fit in memory, use `type: completion` under `datasets:`. The entire dataset is pre-tokenized before training, which can be done on a CPU-only machine.
|
||||
One benefit of this is that the tokenization can be performed separately on a CPU-only machine, and then transferred to a GPU machine for training to save costs.
|
||||
|
||||
From Hugging Face:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: my_corpus
|
||||
- path: hf_org/name
|
||||
type: completion
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
With `completion`, texts exceeding `sequence_len` are split into multiple samples automatically.
|
||||
From local files:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: A.jsonl
|
||||
type: completion
|
||||
|
||||
- path: B.jsonl
|
||||
type: completion
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
For `completion` only, Axolotl would split texts if it exceeds the context length into multiple smaller prompts. If you are interested in having this for `pretraining_dataset` too, please let us know or help make a PR!
|
||||
:::
|
||||
|
||||
### Pre-training dataset configuration tips
|
||||
|
||||
#### Setting max_steps
|
||||
|
||||
When using streaming for large datasets, Axolotl does not know in advance how large the dataset is and does not know when to stop.
|
||||
|
||||
Therefore, it is necessary to set `max_steps: int` in your config for pre-training to run, so that Axolotl knows when to stop training.
|
||||
|
||||
One step is equal to `sequence_len * micro_batch_size * gradient_accumulation_steps * total_num_gpus` tokens.
|
||||
|
||||
#### Group_by_length
|
||||
|
||||
It is recommended to leave this off if downloading from Hugging Face hub as it would download the entire dataset which can be very large.
|
||||
|
||||
### Reference
|
||||
|
||||
Please see docs [here](pretraining.qmd).
|
||||
|
||||
## Supervised fine-tuning (SFT)
|
||||
|
||||
Supervised fine-tuning is the process of training models to respond to an instruction or chat input.
|
||||
|
||||
@@ -4,9 +4,29 @@ description: Data format for a pre-training completion task.
|
||||
order: 1
|
||||
---
|
||||
|
||||
::: {.callout-note}
|
||||
Pre-training documentation has been consolidated:
|
||||
For pretraining, there is no prompt template or roles. The only required field is `text`:
|
||||
|
||||
```{.json filename="data.jsonl"}
|
||||
{"text": "first row"}
|
||||
{"text": "second row"}
|
||||
...
|
||||
```
|
||||
|
||||
:::{.callout-note}
|
||||
|
||||
### Streaming is recommended for large datasets
|
||||
|
||||
Axolotl usually loads the entire dataset into memory. This will be challenging for large datasets. Use the following config to enable streaming:
|
||||
|
||||
```{.yaml filename="config.yaml"}
|
||||
pretraining_dataset:
|
||||
- name:
|
||||
path:
|
||||
split:
|
||||
text_column: # column in dataset with the data, usually `text`
|
||||
type: pretrain
|
||||
trust_remote_code:
|
||||
skip: # number of rows of data to skip over from the beginning
|
||||
```
|
||||
|
||||
- **Streaming pretraining** (large datasets): See [Streaming Datasets](../streaming.qmd#pretraining-with-streaming)
|
||||
- **Non-streaming pretraining** (`type: completion`): See [Dataset Formats](index.qmd#pre-training)
|
||||
:::
|
||||
|
||||
@@ -6,10 +6,6 @@ description: How to debug Axolotl
|
||||
|
||||
This document provides some tips and tricks for debugging Axolotl. It also provides an example configuration for debugging with VSCode. A good debugging setup is essential to understanding how Axolotl code works behind the scenes.
|
||||
|
||||
::: {.callout-tip}
|
||||
For training-specific debugging (loss spikes, NaN gradients, OOM errors, RL training stability), see [Training Stability & Debugging](training_stability.qmd).
|
||||
:::
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [General Tips](#general-tips)
|
||||
@@ -76,9 +72,8 @@ datasets:
|
||||
Make sure you have an [editable install](https://setuptools.pypa.io/en/latest/userguide/development_mode.html) of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:
|
||||
|
||||
```bash
|
||||
export UV_TORCH_BACKEND=cu128 # or cu130
|
||||
uv sync --extra flash-attn --extra deepspeed --group dev --group test
|
||||
source .venv/bin/activate
|
||||
pip3 install packaging
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
```
|
||||
|
||||
#### Remote Hosts
|
||||
@@ -90,7 +85,7 @@ If you developing on a remote host, you can easily use VSCode to debug remotely.
|
||||
|
||||
The easiest way to get started is to modify the [.vscode/launch.json](../.vscode/launch.json) file in this project. This is just an example configuration, so you may need to modify or copy it to suit your needs.
|
||||
|
||||
For example, to mimic the command `cd devtools && CUDA_VISIBLE_DEVICES=0 axolotl train dev_chat_template.yml`, you would use the below configuration[^1]. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to `devtools` and set the `env` variable `HF_HOME` to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.
|
||||
For example, to mimic the command `cd devtools && CUDA_VISIBLE_DEVICES=0 accelerate launch -m axolotl.cli.train dev_chat_template.yml`, you would use the below configuration[^1]. Note that we add additional flags that override the axolotl config and incorporate the tips above (see the comments). We also set the working directory to `devtools` and set the `env` variable `HF_HOME` to a temporary folder that is later partially deleted. This is because we want to delete the HF dataset cache before each run in order to ensure that the data preprocessing code is run from scratch.
|
||||
|
||||
```json
|
||||
// .vscode/launch.json
|
||||
@@ -209,17 +204,17 @@ cd axolotl
|
||||
Next, run the desired docker image and mount the current directory. Below is a docker command you can run to do this:[^2]
|
||||
|
||||
```bash
|
||||
docker run --privileged --gpus '"all"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src="${PWD}",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl-uv:main-latest
|
||||
docker run --privileged --gpus '"all"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=bind,src="${PWD}",target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface axolotlai/axolotl:main-py3.10-cu118-2.0.1
|
||||
```
|
||||
|
||||
>[!Tip]
|
||||
> To understand which containers are available, see the [Docker section of the README](../README.md#docker) and the [DockerHub repo](https://hub.docker.com/r/axolotlai/axolotl/tags). For details of how the Docker containers are built, see axolotl's [Docker CI builds](../.github/workflows/main.yml).
|
||||
|
||||
You will now be in the container. Next, install Axolotl with dev dependencies:
|
||||
You will now be in the container. Next, perform an editable install of Axolotl:
|
||||
|
||||
```bash
|
||||
uv sync --extra flash-attn --extra deepspeed --group dev --group test
|
||||
source .venv/bin/activate
|
||||
pip3 install packaging
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
```
|
||||
|
||||
### Attach To Container
|
||||
@@ -247,6 +242,6 @@ style="border-radius: 10px; display: block; margin: auto;" width="560" height="3
|
||||
</div>
|
||||
<br>
|
||||
|
||||
[^1]: The VSCode config uses `accelerate.commands.launch` as the Python module entry point, which is what `axolotl train` invokes under the hood.
|
||||
[^1]: The config actually mimics the command `CUDA_VISIBLE_DEVICES=0 python -m accelerate.commands.launch -m axolotl.cli.train devtools/chat_template.yml`, but this is the same thing.
|
||||
|
||||
[^2]: Many of the below flags are recommended best practices by Nvidia when using nvidia-container-toolkit. You can read more about these flags [here](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html).
|
||||
|
||||
@@ -6,30 +6,23 @@ format:
|
||||
toc-depth: 4
|
||||
---
|
||||
|
||||
This section describes the different Docker images that are released by AxolotlAI at
|
||||
[Docker Hub](https://hub.docker.com/u/axolotlai).
|
||||
This section describes the different Docker images that are released by AxolotlAI at [Docker Hub](https://hub.docker.com/u/axolotlai).
|
||||
|
||||
::: {.callout-important}
|
||||
For Blackwell GPUs, please use the tags with PyTorch 2.9.1 and CUDA 12.8.
|
||||
:::
|
||||
|
||||
::: {.callout-tip}
|
||||
Each image below is available in a **uv variant** that uses [uv](https://docs.astral.sh/uv/) with
|
||||
a relocatable venv (`/workspace/axolotl-venv`) instead of Miniconda + pip. Append `-uv` to the image name
|
||||
(e.g. `axolotlai/axolotl-base-uv`). Tags follow the same format. We recommend the uv images for new deployments.
|
||||
For Blackwell GPUs, please use the tags with PyTorch 2.7.1 and CUDA 12.8.
|
||||
:::
|
||||
|
||||
## Base
|
||||
|
||||
The base image is the most minimal image that can install Axolotl. It is based on the `nvidia/cuda` image.
|
||||
It includes python, torch, git, git-lfs, awscli, pydantic, and more.
|
||||
The base image is the most minimal image that can install Axolotl. It is based on the `nvidia/cuda` image. It includes python, torch, git, git-lfs, awscli, pydantic, and more.
|
||||
|
||||
#### Image
|
||||
|
||||
| Variant | Image | Docker Hub |
|
||||
|---------|-------|------------|
|
||||
| pip | `axolotlai/axolotl-base` | [Link](https://hub.docker.com/r/axolotlai/axolotl-base) |
|
||||
| uv | `axolotlai/axolotl-base-uv` | [Link](https://hub.docker.com/r/axolotlai/axolotl-base-uv) |
|
||||
```
|
||||
axolotlai/axolotl-base
|
||||
```
|
||||
|
||||
Link: [Docker Hub](https://hub.docker.com/r/axolotlai/axolotl-base)
|
||||
|
||||
#### Tags format
|
||||
|
||||
@@ -39,10 +32,8 @@ main-base-py{python_version}-cu{cuda_version}-{pytorch_version}
|
||||
|
||||
Tags examples:
|
||||
|
||||
- `main-base-py3.11-cu128-2.8.0`
|
||||
- `main-base-py3.11-cu128-2.9.1`
|
||||
- `main-base-py3.12-cu128-2.10.0`
|
||||
- `main-base-py3.12-cu130-2.9.1`
|
||||
- `main-base-py3.12-cu130-2.10.0`
|
||||
|
||||
## Main
|
||||
|
||||
@@ -50,10 +41,11 @@ The main image is the image that is used to run Axolotl. It is based on the `axo
|
||||
|
||||
#### Image
|
||||
|
||||
| Variant | Image | Docker Hub |
|
||||
|---------|-------|------------|
|
||||
| pip | `axolotlai/axolotl` | [Link](https://hub.docker.com/r/axolotlai/axolotl) |
|
||||
| uv | `axolotlai/axolotl-uv` | [Link](https://hub.docker.com/r/axolotlai/axolotl-uv) |
|
||||
```
|
||||
axolotlai/axolotl
|
||||
```
|
||||
|
||||
Link: [Docker Hub](https://hub.docker.com/r/axolotlai/axolotl)
|
||||
|
||||
#### Tags format {#sec-main-tags}
|
||||
|
||||
@@ -61,7 +53,7 @@ The main image is the image that is used to run Axolotl. It is based on the `axo
|
||||
# on push to main
|
||||
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
||||
|
||||
# latest main (currently torch 2.9.1, python 3.11, cuda 12.8)
|
||||
# latest main (currently torch 2.6.0, python 3.11, cuda 12.4)
|
||||
main-latest
|
||||
|
||||
# nightly build
|
||||
@@ -79,12 +71,11 @@ There may be some extra tags appended to the image, like `-vllm` which installs
|
||||
|
||||
Tags examples:
|
||||
|
||||
- `main-py3.11-cu128-2.8.0`
|
||||
- `main-py3.11-cu128-2.9.1`
|
||||
- `main-py3.12-cu128-2.10.0`
|
||||
- `main-py3.12-cu130-2.9.1`
|
||||
- `main-py3.12-cu130-2.10.0`
|
||||
- `main-latest`
|
||||
- `main-20260315-py3.11-cu128-2.9.1`
|
||||
- `main-20250303-py3.11-cu124-2.6.0`
|
||||
- `main-20250303-py3.11-cu126-2.6.0`
|
||||
- `0.12.0`
|
||||
|
||||
## Cloud
|
||||
@@ -99,10 +90,11 @@ Jupyter lab is run by default. Set `JUPYTER_DISABLE=1` in the environment variab
|
||||
|
||||
#### Image
|
||||
|
||||
| Variant | Image | Docker Hub |
|
||||
|---------|-------|------------|
|
||||
| pip | `axolotlai/axolotl-cloud` | [Link](https://hub.docker.com/r/axolotlai/axolotl-cloud) |
|
||||
| uv | `axolotlai/axolotl-cloud-uv` | [Link](https://hub.docker.com/r/axolotlai/axolotl-cloud-uv) |
|
||||
```
|
||||
axolotlai/axolotl-cloud
|
||||
```
|
||||
|
||||
Link: [Docker Hub](https://hub.docker.com/r/axolotlai/axolotl-cloud)
|
||||
|
||||
#### Tags format
|
||||
|
||||
|
||||
556
docs/ebft.qmd
556
docs/ebft.qmd
@@ -1,556 +0,0 @@
|
||||
---
|
||||
title: "EBFT Training"
|
||||
description: "Energy-Based Fine-Tuning uses feature-matching rewards from internal representations to train language models without external reward functions."
|
||||
order: 9
|
||||
back-to-top-navigation: true
|
||||
toc: true
|
||||
toc-expand: 2
|
||||
toc-depth: 4
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Energy-Based Fine-Tuning (EBFT) is a training method that optimizes language models by matching the **internal feature representations** of generated text to those of ground-truth completions. Instead of relying on external reward models or hand-crafted reward functions, EBFT extracts hidden states from intermediate layers of a frozen copy of the model and uses cosine similarity between generated and reference features as the reward signal.
|
||||
|
||||
Paper: ["Matching Features, Not Tokens: Energy-Based Fine-Tuning of Language Models"](https://arxiv.org/abs/2603.12248) (Jelassi et al., 2026)
|
||||
|
||||
### How EBFT Differs from Other RL Methods
|
||||
|
||||
| Method | Reward Signal | Requires | Best For |
|
||||
|--------|--------------|----------|----------|
|
||||
| **GRPO** | External reward function(s) | Custom reward code or reward model | Tasks with verifiable answers (math, code) |
|
||||
| **DPO** | Preference pairs (chosen vs rejected) | Paired preference data | Alignment with human preferences |
|
||||
| **EBFT** | Feature similarity to ground truth | Ground-truth completions | Any task with reference outputs |
|
||||
|
||||
EBFT's key advantage is that it needs only ground-truth completions -- no reward engineering, no preference annotation, and no reward model training. The model's own internal representations serve as the reward signal. This makes it particularly effective for:
|
||||
|
||||
- Code generation (match features of known-good solutions)
|
||||
- Instruction following with reference outputs
|
||||
- Continual pretraining on unstructured text (strided mode)
|
||||
- Multi-turn dialogue with reference conversations
|
||||
|
||||
### Reward Formulation
|
||||
|
||||
The EBFT reward for each generated completion is:
|
||||
|
||||
```
|
||||
reward = alignment_coef * cosine_similarity(gen_features, gt_features)
|
||||
- diversity_coef * mean_pairwise_similarity(gen_features)
|
||||
```
|
||||
|
||||
- **Alignment**: How closely the generated output's internal representations match the ground truth. Higher is better.
|
||||
- **Diversity**: Penalizes generated samples that are too similar to each other (prevents mode collapse). Lower is better.
|
||||
- **CFM loss** (Cross-Feature Matching): Tracks `||mean(gen_features) - gt_features||^2` as a diagnostic. This is the quantity that EBFT ultimately minimizes.
|
||||
|
||||
## Modes
|
||||
|
||||
EBFT supports three operational modes, each suited to different use cases.
|
||||
|
||||
### Structured Mode (Sync)
|
||||
|
||||
Uses vLLM on a separate GPU for generation, with sequential generate-score-train steps. This is the simplest mode and recommended for getting started.
|
||||
|
||||
```
|
||||
GPU 0: vLLM Server (generates completions, receives weight syncs)
|
||||
GPU 1: Trainer (feature extraction, reward computation, GRPO training)
|
||||
```
|
||||
|
||||
**When to use**: Standard instruction-following or QA datasets where you have prompt/completion pairs. Requires 2 GPUs.
|
||||
|
||||
### Structured Mode (Async)
|
||||
|
||||
Same architecture as sync, but overlaps generation of the next batch with training on the current batch. Faster throughput at the cost of slightly stale weights during generation.
|
||||
|
||||
**When to use**: Same data as sync mode, but when you want faster training and can tolerate weight staleness (controlled by `vllm_sync_interval`).
|
||||
|
||||
### Strided Mode
|
||||
|
||||
Runs entirely on a single GPU with no vLLM dependency. Places anchor points throughout a document and generates short rollouts at each anchor using block-parallel attention patterns.
|
||||
|
||||
```
|
||||
Single GPU: Base model + LoRA adapter
|
||||
- Strided block-parallel generation (flex_attention)
|
||||
- Feature extraction via disable_adapter()
|
||||
- No vLLM needed
|
||||
```
|
||||
|
||||
**When to use**: Unstructured text data (raw code, prose, documents) where there is no natural prompt/completion split. Also works with structured data that includes prompt boundaries. Requires only 1 GPU.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Structured Mode
|
||||
|
||||
This minimal example fine-tunes Qwen2-0.5B on code data using EBFT with vLLM generation.
|
||||
|
||||
**Step 1**: Create a config file `ebft_quickstart.yaml`:
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen2-0.5B-Instruct
|
||||
|
||||
rl: ebft
|
||||
|
||||
ebft:
|
||||
feature_layers: [0.25, 0.5, 0.75]
|
||||
embed_method: last_token
|
||||
alignment_coef: 1.0
|
||||
diversity_coef: 1.0
|
||||
|
||||
trl:
|
||||
num_generations: 4
|
||||
max_completion_length: 256
|
||||
temperature: 0.7
|
||||
use_vllm: true
|
||||
vllm_server_host: 0.0.0.0
|
||||
vllm_server_port: 8000
|
||||
vllm_lora_sync: true
|
||||
vllm_sync_interval: 3
|
||||
use_data_producer: true
|
||||
async_prefetch: false
|
||||
scale_rewards: true
|
||||
loss_type: grpo
|
||||
|
||||
vllm:
|
||||
gpu_memory_utilization: 0.5
|
||||
max_model_len: 1024
|
||||
|
||||
datasets:
|
||||
- path: nvidia/OpenCodeInstruct
|
||||
type: ebft_opencode.transform
|
||||
split: train[:500]
|
||||
|
||||
# Standard training settings (see getting-started.qmd for details)
|
||||
adapter: lora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_target_linear: true
|
||||
sequence_len: 1024
|
||||
micro_batch_size: 2
|
||||
gradient_accumulation_steps: 4
|
||||
max_steps: 20
|
||||
learning_rate: 5.0e-6
|
||||
bf16: auto
|
||||
attn_implementation: flash_attention_2
|
||||
gradient_checkpointing: true
|
||||
output_dir: ./outputs/ebft-quickstart
|
||||
```
|
||||
|
||||
**Step 2**: Start vLLM on GPU 0:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve ebft_quickstart.yaml
|
||||
```
|
||||
|
||||
**Step 3**: Wait approximately 30 seconds for vLLM to initialize, then start training on GPU 1:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train ebft_quickstart.yaml
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
The `micro_batch_size` must be divisible by `num_generations`. For example, with `num_generations: 4`, valid values are 4, 8, 12, etc.
|
||||
:::
|
||||
|
||||
### Dataset Format
|
||||
|
||||
Structured mode datasets must produce two fields after the transform:
|
||||
|
||||
- `prompt`: Either a string or a list of chat messages (`[{"role": "user", "content": "..."}]`)
|
||||
- `ground_truth`: A string containing the reference completion
|
||||
|
||||
Example raw dataset row:
|
||||
|
||||
```json
|
||||
{
|
||||
"input": "Write a function to compute fibonacci numbers.",
|
||||
"output": "def fibonacci(n):\n if n <= 1:\n return n\n return fibonacci(n-1) + fibonacci(n-2)"
|
||||
}
|
||||
```
|
||||
|
||||
The `ebft_opencode.transform` converts this to the required `{prompt, ground_truth}` format automatically.
|
||||
|
||||
## Feature Extraction
|
||||
|
||||
EBFT extracts hidden states from intermediate transformer layers and pools them into per-sequence embeddings. These embeddings are compared between generated and ground-truth completions to compute rewards.
|
||||
|
||||
### Feature Layers
|
||||
|
||||
The `feature_layers` parameter specifies which layers to extract, as fractions of total model depth:
|
||||
|
||||
```yaml
|
||||
ebft:
|
||||
feature_layers: [0.25, 0.5, 0.75] # Quarter, middle, three-quarter depth
|
||||
```
|
||||
|
||||
For a 32-layer model, this extracts layers 8, 16, and 24. The hidden states from all selected layers are concatenated along the feature dimension, producing embeddings of size `num_layers * hidden_dim`.
|
||||
|
||||
::: {.callout-tip}
|
||||
Using multiple layers captures both low-level syntactic features (early layers) and high-level semantic features (later layers). The default `[0.25, 0.5, 0.75]` works well across model sizes.
|
||||
:::
|
||||
|
||||
### Embed Methods
|
||||
|
||||
The `embed_method` controls how per-token hidden states are pooled into a single vector per sequence:
|
||||
|
||||
| Method | Description | Output Shape | Notes |
|
||||
|--------|-------------|-------------|-------|
|
||||
| `last_token` | Hidden state at the last non-padding token | `(B, D)` | Default. Good for autoregressive models where the last token summarizes the sequence. |
|
||||
| `mean_pooling` | Mean of all non-padding token states | `(B, D)` | Considers the entire sequence equally. |
|
||||
| `completion_mean` | Mean over completion tokens only (excludes prompt) | `(B, D)` | Focuses reward signal on generated content. Requires prompt length information. |
|
||||
| `concat` | Concatenation of states at 25%, 50%, 75% positions | `(B, 3*D)` | Captures positional structure. Higher dimensional. |
|
||||
|
||||
```yaml
|
||||
ebft:
|
||||
embed_method: completion_mean # Focus on completion features
|
||||
```
|
||||
|
||||
### SVD Whitening
|
||||
|
||||
Whitening decorrelates the feature dimensions so that no single direction dominates the feature-matching loss. This is computed via SVD on the generated embeddings, with the same transform applied to the ground-truth embeddings.
|
||||
|
||||
```yaml
|
||||
ebft:
|
||||
use_whitening: true
|
||||
```
|
||||
|
||||
When whitening is enabled, the reward computation applies a whitening matrix `W = U @ diag(1/S) @ U^T` derived from the SVD of generated embeddings. This ensures all feature dimensions contribute equally to the alignment reward.
|
||||
|
||||
::: {.callout-note}
|
||||
Singular values scale with `sqrt(batch_size)`, so reward magnitudes are batch-size dependent. This is acceptable because the number of samples per prompt (`n_samples_per_prompt` or `num_generations`) is fixed during training.
|
||||
:::
|
||||
|
||||
### Alignment and Diversity Coefficients
|
||||
|
||||
The two reward components are weighted by coefficients:
|
||||
|
||||
```yaml
|
||||
ebft:
|
||||
alignment_coef: 1.0 # Weight for cosine similarity with ground truth
|
||||
diversity_coef: 1.0 # Weight for pairwise similarity penalty
|
||||
```
|
||||
|
||||
Both values are scaled by 2 internally (per paper equation 7). The final reward per sample is:
|
||||
|
||||
```
|
||||
reward_j = 2 * alignment_coef * cos(gen_j, gt)
|
||||
- 2 * diversity_coef * (1/(n-1)) * sum_{j' != j} dot(gen_j, gen_j')
|
||||
```
|
||||
|
||||
Setting `diversity_coef: 0.0` disables the diversity penalty entirely, which may be appropriate when `num_generations` is small (e.g., 2).
|
||||
|
||||
## Strided Mode
|
||||
|
||||
Strided mode is designed for training on unstructured text data where there is no natural prompt/completion boundary. Instead of generating full completions with vLLM, it places **anchor points** at regular intervals throughout each document and generates short rollouts at each anchor using block-parallel attention.
|
||||
|
||||
### How Block-Parallel Generation Works
|
||||
|
||||
Given a document of length `S` tokens:
|
||||
|
||||
1. **Anchor placement**: Starting at position `anchor_offset`, place anchors every `stride` tokens. Each anchor defines a block.
|
||||
2. **Context window**: Each block sees `context_length` tokens of preceding context from the original document.
|
||||
3. **Generation**: At each anchor, generate `generate_max_len` tokens autoregressively, conditioned only on the context window.
|
||||
4. **Parallelism**: All blocks are processed in a single forward pass using a specialized attention mask that prevents information leakage between blocks.
|
||||
|
||||
```
|
||||
Document: [tok0, tok1, ..., tok_S]
|
||||
| | |
|
||||
anchor_0 anchor_1 anchor_2
|
||||
| | |
|
||||
[ctx][gen] [ctx][gen] [ctx][gen]
|
||||
```
|
||||
|
||||
The attention mask ensures:
|
||||
|
||||
- Prompt tokens use standard causal attention
|
||||
- Each generated block attends to its own context window and its own preceding generated tokens
|
||||
- Blocks do not attend to each other's generated tokens
|
||||
|
||||
When `flex_attention` is available (PyTorch >= 2.5), the mask is compiled into efficient fused kernels. Otherwise, a dense 4D attention mask is used as a fallback.
|
||||
|
||||
### Strided Mode Configuration
|
||||
|
||||
```yaml
|
||||
base_model: meta-llama/Llama-3.2-1B
|
||||
rl: ebft
|
||||
|
||||
ebft:
|
||||
mode: strided
|
||||
stride: 8 # Tokens between anchor points
|
||||
context_length: 8 # Context window per block
|
||||
generate_max_len: 8 # Tokens to generate per block
|
||||
n_samples_per_prompt: 4 # Independent rollouts per document
|
||||
temperature: 0.6
|
||||
feature_layers: [0.25, 0.5, 0.75]
|
||||
embed_method: last_token
|
||||
use_whitening: true
|
||||
alignment_coef: 1.0
|
||||
diversity_coef: 1.0
|
||||
rl_coef: 1.0 # RL policy gradient loss weight
|
||||
ce_coef: 0.03 # Cross-entropy loss on GT tokens
|
||||
advantage_estimator: rloo # rloo, group_norm, or reinforce
|
||||
min_completion_prefix: 8 # Skip anchors in prompt region
|
||||
|
||||
datasets:
|
||||
- path: nvidia/OpenCodeInstruct
|
||||
type: ebft_strided_structured.transform
|
||||
split: train[:1%]
|
||||
|
||||
sequence_len: 2048
|
||||
micro_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
|
||||
adapter: lora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_target_linear: true
|
||||
|
||||
bf16: auto
|
||||
attn_implementation: flex_attention
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: true # Required with flex_attention
|
||||
```
|
||||
|
||||
Run with a single command (no vLLM needed):
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl train config.yaml
|
||||
```
|
||||
|
||||
### Advantage Estimators
|
||||
|
||||
Strided mode supports three advantage estimation methods:
|
||||
|
||||
| Estimator | Formula | Requirements |
|
||||
|-----------|---------|-------------|
|
||||
| `rloo` | Leave-one-out baseline: `reward_j - mean(rewards_{-j})` | `n_samples_per_prompt >= 2` |
|
||||
| `group_norm` | Group normalization: `(reward_j - mean) / std` | `n_samples_per_prompt >= 2` |
|
||||
| `reinforce` | Raw reward as advantage (no baseline) | Works with `n_samples_per_prompt = 1` |
|
||||
|
||||
::: {.callout-warning}
|
||||
When `n_samples_per_prompt: 1`, the trainer automatically falls back to `reinforce` and disables the diversity penalty (which requires multiple samples).
|
||||
:::
|
||||
|
||||
### Strided Mode Constraints
|
||||
|
||||
- **`flex_attention: true`** is strongly recommended. Without it, dense 4D masks consume significantly more memory.
|
||||
- **`torch_compile: true`** must NOT be set. `flex_attention` compiles its own kernels internally; adding `torch_compile` causes conflicts and OOM.
|
||||
- **Gradient checkpointing** must use `use_reentrant: true`. Non-reentrant checkpointing causes `CheckpointError` with `flex_attention` block masks.
|
||||
- **`activation_offloading`** is incompatible with `flex_attention`.
|
||||
|
||||
### Cross-Entropy Loss
|
||||
|
||||
Strided mode supports an optional cross-entropy loss term on ground-truth tokens. This acts as a regularizer to prevent the model from drifting too far from the original distribution:
|
||||
|
||||
```yaml
|
||||
ebft:
|
||||
ce_coef: 0.03 # Small CE coefficient
|
||||
rl_coef: 1.0 # RL loss coefficient
|
||||
```
|
||||
|
||||
The total loss is `rl_coef * rl_loss + ce_coef * ce_loss`. For structured mode, `ce_coef` is typically `0.0` since vLLM generation provides sufficient learning signal.
|
||||
|
||||
## Dataset Formats
|
||||
|
||||
EBFT provides several built-in dataset transforms in `src/axolotl/prompt_strategies/ebft/`.
|
||||
|
||||
### Built-In Transforms
|
||||
|
||||
| Transform | Input Format | Output Fields | Use Case |
|
||||
|-----------|-------------|---------------|----------|
|
||||
| `ebft_opencode.transform` | `{input, output}` | `{prompt, ground_truth}` | OpenCodeInstruct, structured QA |
|
||||
| `ebft_strided_structured.transform` | `{input, output}` | `{input_ids, labels, prompt_length}` | Strided mode with structured data |
|
||||
| `ebft_strided_chat.transform` | `{messages: [...]}` | `{input_ids, labels, prompt_length}` | Strided mode with chat data |
|
||||
| `ebft_chat_multiturn.transform` | `{messages: [...]}` | `{prompt, ground_truth, remaining_turns}` | Multi-turn: first-turn target |
|
||||
| `ebft_chat_multiturn.transform_last_turn` | `{messages: [...]}` | `{prompt, ground_truth}` | Multi-turn: last-turn target |
|
||||
| `ebft_chat_multiturn.transform_all_turns` | `{messages: [...]}` | `{prompt[], ground_truth[]}` | Multi-turn: one example per turn |
|
||||
| `ebft_reasoning.transform` | `{messages: [...]}` (with `<think>`) | `{prompt, ground_truth}` | Reasoning/thinking datasets |
|
||||
|
||||
### Structured Mode Datasets
|
||||
|
||||
For structured (sync/async) mode, the transform must produce `prompt` and `ground_truth` fields:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: nvidia/OpenCodeInstruct
|
||||
type: ebft_opencode.transform
|
||||
split: train[:500]
|
||||
```
|
||||
|
||||
### Multi-Turn Datasets
|
||||
|
||||
Multi-turn transforms extract conversation data for sequential rollout. The `transform` variant targets the first assistant turn, while `transform_last_turn` targets the final turn:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: your/multiturn-dataset
|
||||
type: ebft_chat_multiturn.transform
|
||||
```
|
||||
|
||||
When `remaining_turns` is present in the dataset output, the trainer performs sequential rollouts: it generates the first assistant turn with vLLM, then continues generating subsequent turns by building up the conversation history.
|
||||
|
||||
### Strided Mode Datasets
|
||||
|
||||
Strided transforms tokenize the full document and produce `input_ids`, `labels`, and `prompt_length`:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: nvidia/OpenCodeInstruct
|
||||
type: ebft_strided_structured.transform
|
||||
split: train[:1%]
|
||||
```
|
||||
|
||||
### Custom Transforms
|
||||
|
||||
To use your own dataset format, write a transform function:
|
||||
|
||||
```python
|
||||
def transform(cfg, **kwargs):
|
||||
def transform_fn(example, tokenizer=None):
|
||||
return {
|
||||
"prompt": [{"role": "user", "content": example["question"]}],
|
||||
"ground_truth": example["answer"],
|
||||
}
|
||||
return transform_fn, {"remove_columns": "__all__"}
|
||||
```
|
||||
|
||||
The `"__all__"` sentinel removes all original dataset columns after the mapping step. Reference this transform in your config:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: your/dataset
|
||||
type: your_module.transform
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
### Common Parameters (All Modes)
|
||||
|
||||
These parameters are set under the `ebft:` key in the YAML config.
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `mode` | `"structured"` or `"strided"` | `"structured"` | EBFT operating mode |
|
||||
| `feature_layers` | `list[float]` | `[0.25, 0.5, 0.75]` | Fractional layer depths for feature extraction |
|
||||
| `embed_method` | `string` | `"last_token"` | Pooling method: `last_token`, `mean_pooling`, `completion_mean`, or `concat` |
|
||||
| `use_whitening` | `bool` | `false` | Apply SVD whitening to feature embeddings before reward computation |
|
||||
| `alignment_coef` | `float` | `1.0` | Weight for alignment reward (cosine similarity with ground truth) |
|
||||
| `diversity_coef` | `float` | `1.0` | Weight for diversity penalty (pairwise dot product between samples) |
|
||||
| `ce_coef` | `float` | `0.0` | Cross-entropy loss coefficient on ground-truth tokens |
|
||||
| `adaptive_max_tokens` | `bool` | `true` | Dynamically set vLLM `max_tokens` based on ground-truth length (structured mode) |
|
||||
| `gt_length_multiplier` | `float` | `1.5` | Multiplier for ground-truth token count when computing adaptive max tokens (min 0.1) |
|
||||
|
||||
### Strided Mode Parameters
|
||||
|
||||
These additional parameters apply only when `mode: strided`.
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `stride` | `int` | `8` | Number of tokens between anchor points (must be >= 1) |
|
||||
| `context_length` | `int` | `8` | Context window size for each generated block (must be >= 1) |
|
||||
| `generate_max_len` | `int` | `8` | Number of tokens to generate per block (must be >= 1) |
|
||||
| `n_samples_per_prompt` | `int` | `4` | Number of independent rollouts per document (must be >= 1) |
|
||||
| `temperature` | `float` | `0.6` | Sampling temperature for strided generation |
|
||||
| `top_p` | `float` | `1.0` | Top-p nucleus sampling threshold |
|
||||
| `rl_coef` | `float` | `1.0` | RL policy gradient loss coefficient |
|
||||
| `advantage_estimator` | `string` | `"rloo"` | Advantage estimation method: `rloo`, `group_norm`, or `reinforce` |
|
||||
| `min_completion_prefix` | `int` | `0` | Minimum tokens into the completion span before placing anchors |
|
||||
|
||||
### Structured Mode TRL Parameters
|
||||
|
||||
These are set under the `trl:` key and control the GRPO training loop.
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `num_generations` | `int` | -- | Number of completions generated per prompt |
|
||||
| `max_completion_length` | `int` | -- | Maximum tokens per generated completion |
|
||||
| `temperature` | `float` | `0.7` | Sampling temperature for vLLM generation |
|
||||
| `use_vllm` | `bool` | -- | Enable vLLM generation backend |
|
||||
| `vllm_lora_sync` | `bool` | `false` | Sync LoRA adapters via filesystem (recommended) |
|
||||
| `vllm_sync_interval` | `int` | `1` | Steps between weight syncs to vLLM |
|
||||
| `use_data_producer` | `bool` | -- | Required for sync mode with LoRA sync |
|
||||
| `async_prefetch` | `bool` | `false` | Enable async generation (overlaps with training) |
|
||||
| `streaming_partial_batch` | `bool` | `false` | Score groups incrementally (async mode) |
|
||||
| `skip_zero_advantage_batches` | `bool` | `false` | Skip micro-batches where all advantages are zero |
|
||||
| `scale_rewards` | `bool` | -- | Normalize rewards within each prompt group |
|
||||
| `loss_type` | `string` | `"grpo"` | Loss type for policy optimization |
|
||||
| `epsilon` | `float` | `0.2` | Clipping parameter for importance sampling |
|
||||
|
||||
### Stop Tokens
|
||||
|
||||
vLLM needs explicit stop token IDs for generation. Common configurations:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
generation_kwargs:
|
||||
stop_token_ids: [151645, 151643] # Qwen: <|im_end|>, <|endoftext|>
|
||||
```
|
||||
|
||||
### Multi-Turn Chat Settings
|
||||
|
||||
For multi-turn conversations with Qwen3.5, disable thinking mode to prevent `<think>` tags in completions:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
chat_template_kwargs:
|
||||
enable_thinking: false
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Key Metrics
|
||||
|
||||
EBFT logs several custom metrics to wandb and the training console. Here is what to watch for:
|
||||
|
||||
| Metric | Healthy Range | Interpretation |
|
||||
|--------|--------------|----------------|
|
||||
| `ebft/alignment` | 0.3 -- 0.9, trending upward | Cosine similarity between generated and ground-truth features. Higher means the model is learning to produce representations that match the reference. |
|
||||
| `ebft/diversity` | 0.01 -- 0.1 | Mean pairwise similarity between different generations for the same prompt. Values above 1.0 indicate mode collapse. |
|
||||
| `ebft/cfm_loss` | Below 10, trending downward | Cross-Feature Matching loss. This is the core quantity being minimized. Consistently above 100 indicates instability. |
|
||||
| `ebft/reward` | Trending upward (may start negative) | Combined reward signal. If stuck at -1.0, the diversity penalty is dominating alignment. |
|
||||
| `grad_norm` | 0.1 -- 3.0 | Gradient magnitude. Values of 0.0 indicate zero-advantage skip (normal). Values above 10 suggest instability. |
|
||||
| `entropy` | 0.05 -- 0.5 | Policy entropy. Values below 0.01 suggest mode collapse. |
|
||||
| `IS ratio min` | Above 0.1 | Importance sampling ratio minimum. Near-zero values mean the policy is too far off-policy; increase `vllm_sync_interval`. |
|
||||
|
||||
### Console Log Example
|
||||
|
||||
During training, you will see periodic EBFT reward logs:
|
||||
|
||||
```
|
||||
ebft reward | align +0.412 ^ | divers +0.023 v | cfm 4.231 v | reward +0.389 ^
|
||||
```
|
||||
|
||||
The arrows indicate the desired direction: alignment and reward should trend upward, while diversity and CFM loss should trend downward.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
| Symptom | Likely Cause | Fix |
|
||||
|---------|-------------|-----|
|
||||
| `alignment` stays below 0.1 | Feature layers not capturing useful information | Try different `feature_layers` or `embed_method` |
|
||||
| `diversity` exceeds 1.0 | Mode collapse -- generations are too similar | Increase `diversity_coef` or `temperature` |
|
||||
| `reward` stuck at -1.0 | Diversity penalty dominates alignment | Reduce `diversity_coef` or increase `alignment_coef` |
|
||||
| `grad_norm` consistently 0.0 | All micro-batches have zero advantage | Increase `num_generations` or check data quality |
|
||||
| `CheckpointError` in strided mode | Incompatible gradient checkpointing settings | Set `use_reentrant: true` in `gradient_checkpointing_kwargs` |
|
||||
| OOM during training | Logits tensor too large | Reduce `sequence_len` or `micro_batch_size`; strided mode uses chunked lm_head to mitigate this |
|
||||
| vLLM 500 errors | `truncate_prompt_tokens` not supported | Ensure you are using `axolotl vllm-serve` (not `trl vllm-serve`) |
|
||||
|
||||
### Feature Network Memory
|
||||
|
||||
In PEFT (LoRA) mode, the feature network shares base weights with the actor model by using the `disable_adapter()` context manager. This saves an entire model copy in VRAM (approximately 1--16 GB depending on model size). For non-PEFT training, a separate frozen deepcopy is created.
|
||||
|
||||
::: {.callout-note}
|
||||
The `disable_adapter()` approach relies on an invariant: `merge_adapter()` is never called on the base weights. All weight sync paths (LoRA sync, HTTP, NCCL) compute merged weights as new tensors or save the adapter to the filesystem, leaving base weights unmodified.
|
||||
:::
|
||||
|
||||
## Examples
|
||||
|
||||
Complete example configurations are available in `examples/ebft/`:
|
||||
|
||||
| Config | Model | Mode | Description |
|
||||
|--------|-------|------|-------------|
|
||||
| `llama-1b-ebft-strided-structured.yaml` | Llama 3.2 1B | Strided | Single-GPU strided training on code data |
|
||||
| `qwen3-4b-ebft-structured.yaml` | Qwen3 4B | Structured (sync) | Two-GPU structured training |
|
||||
| `qwen3-4b-ebft-structured-async.yaml` | Qwen3 4B | Structured (async) | Two-GPU async training with prefetch |
|
||||
| `qwen3-8b-ebft-structured.yaml` | Qwen3 8B | Structured (sync) | Two-GPU structured training for larger model |
|
||||
| `qwen35-4b-ebft-structured.yaml` | Qwen3.5 4B | Structured (sync) | Two-GPU with Qwen3.5 |
|
||||
| `qwen35-4b-ebft-structured-async.yaml` | Qwen3.5 4B | Structured (async) | Two-GPU async with Qwen3.5 |
|
||||
| `qwen35-9b-ebft-structured.yaml` | Qwen3.5 9B | Structured (sync) | Two-GPU structured for 9B model |
|
||||
@@ -170,26 +170,17 @@ More details can be found in [Merging LoRA weights](inference.qmd#sec-merging).
|
||||
|
||||
## Next Steps {#sec-next-steps}
|
||||
|
||||
Now that you have the basics, explore these guides based on what you want to do:
|
||||
Now that you have the basics, you might want to:
|
||||
|
||||
**Choose your path:**
|
||||
- Try different model architectures
|
||||
- Experiment with hyperparameters
|
||||
- Use more advanced training methods
|
||||
- Scale up to larger models
|
||||
|
||||
- [Choosing a Fine-Tuning Method](choosing_method.qmd) — SFT vs LoRA vs QLoRA vs GRPO vs DPO, with hardware recommendations
|
||||
Check our other guides for details on these topics:
|
||||
|
||||
**Core guides:**
|
||||
|
||||
- [Dataset Loading](dataset_loading.qmd) — Loading datasets from various sources
|
||||
- [Dataset Formats](dataset-formats) — Working with different data formats
|
||||
- [Optimizations](optimizations.qmd) — Flash attention, gradient checkpointing, sample packing
|
||||
- [Training Stability & Debugging](training_stability.qmd) — Monitoring metrics, fixing NaN, OOM debugging
|
||||
|
||||
**Advanced training methods:**
|
||||
|
||||
- [RLHF / Preference Learning](rlhf.qmd) — DPO, KTO, GRPO, EBFT
|
||||
- [GRPO Training](grpo.qmd) — RL with custom rewards and vLLM generation
|
||||
- [vLLM Serving](vllm_serving.qmd) — Setting up vLLM for GRPO
|
||||
|
||||
**Scaling up:**
|
||||
|
||||
- [Multi-GPU Training](multi-gpu.qmd) — DeepSpeed, FSDP, DDP
|
||||
- [Multi-Node Training](multi-node.qmd) — Distributed training across machines
|
||||
- [Configuration Guide](config-reference.qmd) - Full configuration options
|
||||
- [Dataset Loading](dataset_loading.qmd) - Loading datasets from various sources
|
||||
- [Dataset Formats](dataset-formats) - Working with different data formats
|
||||
- [Multi-GPU Training](multi-gpu.qmd)
|
||||
- [Multi-Node Training](multi-node.qmd)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Gradient Checkpointing, Activation Offloading, and Layer Offloading
|
||||
title: Gradient Checkpointing and Activation Offloading
|
||||
---
|
||||
|
||||
Gradient checkpointing and activation offloading are techniques used to optimize the performance of deep learning
|
||||
@@ -27,33 +27,3 @@ The `activation_offloading: legacy` naively offloads activations to CPU and with
|
||||
|
||||
For resource constrained environments with limited CPU memory, `activation_offloading: disk` offloads
|
||||
activations to disk instead of CPU RAM so that much larger context lengths can be trained with minimal memory.
|
||||
|
||||
### Enabling Layer Offloading
|
||||
|
||||
```yaml
|
||||
layer_offloading: true
|
||||
```
|
||||
|
||||
Layer offloading reduces GPU memory usage by moving frozen (non-trainable) decoder layer parameters to CPU
|
||||
and streaming them back to GPU one layer at a time during the forward and backward passes. This is
|
||||
particularly useful for LoRA/QLoRA training where most of the model's parameters are frozen — only the
|
||||
trainable adapter weights stay on GPU permanently.
|
||||
|
||||
During training, forward and backward hooks on each decoder layer handle the transfer automatically:
|
||||
|
||||
- **Forward pass:** Before a layer executes, its frozen params are loaded to GPU. The next layer is
|
||||
prefetched asynchronously on a separate CUDA stream for overlap.
|
||||
- **Backward pass:** Same pattern in reverse — the current layer's frozen params are loaded and the
|
||||
previous layer is prefetched.
|
||||
|
||||
After each layer finishes, its frozen params are offloaded back to CPU pinned memory.
|
||||
|
||||
This approach trades some CPU-GPU transfer overhead for significant GPU memory savings — the freed memory
|
||||
is roughly equal to the size of all frozen parameters across all decoder layers, minus one layer's worth
|
||||
that is kept on GPU at any given time.
|
||||
|
||||
**Requirements:**
|
||||
|
||||
- CUDA GPU (CPU-only training is not supported for this feature)
|
||||
- Works with any HuggingFace model architecture that uses decoder layers (Llama, Mistral, Qwen, etc.)
|
||||
- Best combined with LoRA/QLoRA where most parameters are frozen
|
||||
|
||||
611
docs/grpo.qmd
611
docs/grpo.qmd
@@ -1,611 +0,0 @@
|
||||
---
|
||||
title: "GRPO Training"
|
||||
description: "Group Relative Policy Optimization — a reinforcement learning method for training language models with verifiable reward functions."
|
||||
order: 8
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Group Relative Policy Optimization (GRPO) is a reinforcement learning method that improves language models by generating multiple completions per prompt, scoring them with reward functions, and using the relative ranking within each group to compute advantage estimates. Unlike DPO, which requires pre-collected preference pairs, GRPO generates its own training data online and can work with any programmatic reward signal (math correctness, format compliance, code execution results, etc.).
|
||||
|
||||
Use GRPO when you have a task with a verifiable reward signal and want the model to discover solution strategies on its own. Use DPO when you already have human preference data. Use SFT when you have gold-standard completions to imitate directly.
|
||||
|
||||
Axolotl's GRPO implementation builds on TRL and adds async generation, streaming scoring, importance sampling correction, replay buffers, and multi-GPU scaling via FSDP and DeepSpeed.
|
||||
|
||||
|
||||
## Architecture
|
||||
|
||||
GRPO training uses a two-process architecture: a vLLM server for fast generation and a trainer process for scoring and gradient updates.
|
||||
|
||||
```
|
||||
Terminal 1 (GPU 0) Terminal 2 (GPU 1)
|
||||
┌──────────────────────┐ ┌──────────────────────────────────┐
|
||||
│ vLLM Server │ │ Trainer │
|
||||
│ │ HTTP │ │
|
||||
│ Serves base model │◄────────────►│ Background thread: │
|
||||
│ + LoRA adapter │ /generate │ Send prompts to vLLM │
|
||||
│ │ /set_lora │ Pad & collate completions │
|
||||
│ Punica kernels for │ │ │
|
||||
│ LoRA inference │ │ Main thread: │
|
||||
│ │ │ Score completions (rewards) │
|
||||
└──────────────────────┘ │ Compute policy log-probs │
|
||||
│ Calculate advantages │
|
||||
│ PPO-clip gradient update │
|
||||
│ Sync LoRA weights to vLLM │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Data flow for each training step:**
|
||||
|
||||
1. The background thread sends prompts to vLLM, which generates `num_generations` completions per prompt.
|
||||
2. The main thread scores completions using your reward functions.
|
||||
3. Advantages are computed within each prompt group (group-relative normalization).
|
||||
4. Policy log-probabilities are computed by running a forward pass on the training model.
|
||||
5. The PPO-clip loss is computed and gradients are applied.
|
||||
6. Periodically, LoRA adapter weights are synced back to vLLM so future generations reflect the updated policy.
|
||||
|
||||
With async prefetch enabled, step 1 for the *next* batch runs concurrently with steps 2-6 for the *current* batch.
|
||||
|
||||
|
||||
## Quick Start
|
||||
|
||||
A GRPO training run requires three components: a YAML config, a reward module (Python file), and a running vLLM server.
|
||||
|
||||
### 1. Write a reward module
|
||||
|
||||
Create a file called `rewards.py` in your working directory:
|
||||
|
||||
```python
|
||||
# rewards.py
|
||||
import re
|
||||
|
||||
|
||||
def accuracy_reward(completions, answer, **kwargs) -> list[float]:
|
||||
"""Check if the completion contains the correct numerical answer."""
|
||||
rewards = []
|
||||
for completion, correct in zip(completions, answer):
|
||||
text = completion[0]["content"]
|
||||
# Extract the last number from the completion
|
||||
numbers = re.findall(r"-?\d+(?:\.\d+)?", text)
|
||||
predicted = numbers[-1] if numbers else ""
|
||||
rewards.append(1.0 if predicted == str(correct) else 0.0)
|
||||
return rewards
|
||||
|
||||
|
||||
def format_reward(completions, **kwargs) -> list[float]:
|
||||
"""Reward completions that use a structured thinking format."""
|
||||
rewards = []
|
||||
for completion in completions:
|
||||
text = completion[0]["content"]
|
||||
has_think = "<think>" in text and "</think>" in text
|
||||
has_answer = "<answer>" in text and "</answer>" in text
|
||||
rewards.append(1.0 if has_think and has_answer else 0.0)
|
||||
return rewards
|
||||
|
||||
|
||||
def prompt_transform(cfg, *args, **kwargs):
|
||||
"""Convert GSM8K dataset rows into chat prompts."""
|
||||
def transform_fn(example, tokenizer=None):
|
||||
label = example["answer"].split("####")[-1].strip().replace(",", "")
|
||||
return {
|
||||
"prompt": [
|
||||
{"role": "system", "content": "Solve the math problem. Show your reasoning in <think> tags and your final numerical answer in <answer> tags."},
|
||||
{"role": "user", "content": example["question"]},
|
||||
],
|
||||
"answer": label,
|
||||
}
|
||||
return transform_fn, {"remove_columns": ["question"]}
|
||||
```
|
||||
|
||||
### 2. Write the config
|
||||
|
||||
Create `config.yaml`:
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen2.5-1.5B-Instruct
|
||||
|
||||
rl: grpo
|
||||
chat_template: tokenizer_default
|
||||
|
||||
vllm:
|
||||
host: 0.0.0.0
|
||||
port: 8000
|
||||
gpu_memory_utilization: 0.85
|
||||
dtype: auto
|
||||
max_model_len: 2048
|
||||
|
||||
adapter: lora
|
||||
lora_r: 32
|
||||
lora_alpha: 64
|
||||
lora_target_linear: true
|
||||
|
||||
trl:
|
||||
use_vllm: true
|
||||
use_data_producer: true
|
||||
vllm_server_host: 0.0.0.0
|
||||
vllm_server_port: 8000
|
||||
vllm_server_timeout: 300
|
||||
vllm_lora_sync: true
|
||||
num_generations: 8
|
||||
max_completion_length: 512
|
||||
temperature: 0.7
|
||||
reward_funcs:
|
||||
- rewards.accuracy_reward
|
||||
- rewards.format_reward
|
||||
reward_weights:
|
||||
- 1.0
|
||||
- 0.5
|
||||
|
||||
datasets:
|
||||
- path: openai/gsm8k
|
||||
name: main
|
||||
type: rewards.prompt_transform
|
||||
split: train
|
||||
|
||||
skip_prepare_dataset: true
|
||||
val_set_size: 0.0
|
||||
sequence_len: 512
|
||||
micro_batch_size: 2
|
||||
gradient_accumulation_steps: 4
|
||||
max_steps: 200
|
||||
learning_rate: 5.0e-6
|
||||
optimizer: adamw_torch_fused
|
||||
lr_scheduler: cosine
|
||||
warmup_steps: 10
|
||||
|
||||
bf16: true
|
||||
attn_implementation: flash_attention_2
|
||||
gradient_checkpointing: true
|
||||
|
||||
special_tokens:
|
||||
pad_token: "<|endoftext|>"
|
||||
|
||||
output_dir: ./grpo-output
|
||||
logging_steps: 1
|
||||
```
|
||||
|
||||
### 3. Start vLLM and train
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start vLLM server on GPU 0
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
|
||||
# Wait 30-90 seconds for model loading and CUDA graph capture
|
||||
|
||||
# Terminal 2: Train on GPU 1
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train config.yaml
|
||||
```
|
||||
|
||||
:::{.callout-tip}
|
||||
Use `tmux` or separate terminal sessions to manage the two processes. The vLLM server must remain running for the entire training duration.
|
||||
:::
|
||||
|
||||
|
||||
## Custom Reward Functions
|
||||
|
||||
### Function signature
|
||||
|
||||
TRL calls reward functions with this signature:
|
||||
|
||||
```python
|
||||
def my_reward(completions, **kwargs) -> list[float]:
|
||||
```
|
||||
|
||||
- `completions` is a list of single-element lists, where each element is a dict `{"role": "assistant", "content": "..."}`. So `completions[i][0]["content"]` gives you the text of the i-th completion.
|
||||
- `**kwargs` contains all dataset columns that were *not* removed by the dataset transform. This is how you pass ground truth answers, metadata, or any other information to your reward function.
|
||||
- Return a `list[float]` with the same length as `completions`. You may return `None` for individual elements to exclude them from aggregation.
|
||||
|
||||
### Example: accuracy reward with answer extraction
|
||||
|
||||
```python
|
||||
def accuracy_reward(completions, answer, **kwargs) -> list[float]:
|
||||
rewards = []
|
||||
for completion, correct_answer in zip(completions, answer):
|
||||
text = completion[0]["content"]
|
||||
# Extract answer from <answer>...</answer> tags
|
||||
match = re.search(r"<answer>(.*?)</answer>", text, re.DOTALL)
|
||||
predicted = match.group(1).strip() if match else ""
|
||||
rewards.append(1.0 if predicted == str(correct_answer) else 0.0)
|
||||
return rewards
|
||||
```
|
||||
|
||||
### Example: length penalty
|
||||
|
||||
```python
|
||||
def length_penalty(completions, **kwargs) -> list[float]:
|
||||
"""Penalize very short or very long completions."""
|
||||
rewards = []
|
||||
for completion in completions:
|
||||
length = len(completion[0]["content"])
|
||||
if length < 50:
|
||||
rewards.append(-0.5)
|
||||
elif length > 2000:
|
||||
rewards.append(-0.2)
|
||||
else:
|
||||
rewards.append(0.0)
|
||||
return rewards
|
||||
```
|
||||
|
||||
### Multiple rewards and weighting
|
||||
|
||||
You can combine multiple reward functions with different weights:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reward_funcs:
|
||||
- rewards.accuracy_reward
|
||||
- rewards.format_reward
|
||||
- rewards.length_penalty
|
||||
reward_weights:
|
||||
- 1.0 # accuracy is most important
|
||||
- 0.5 # format compliance
|
||||
- 0.1 # mild length preference
|
||||
```
|
||||
|
||||
Rewards are combined by the `multi_objective_aggregation` strategy:
|
||||
|
||||
- `sum_then_normalize` (default): weights and sums all rewards first, then normalizes across the group.
|
||||
- `normalize_then_sum` (GDPO): normalizes each reward independently, then sums. This prevents one reward from dominating and is recommended when using multiple reward functions with different scales.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
multi_objective_aggregation: normalize_then_sum
|
||||
```
|
||||
|
||||
### Dataset transforms
|
||||
|
||||
The dataset transform converts raw HuggingFace dataset rows into chat-format prompts:
|
||||
|
||||
```python
|
||||
def prompt_transform(cfg, *args, **kwargs):
|
||||
def map_fn(example, tokenizer=None):
|
||||
return {
|
||||
"prompt": [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": example["question"]},
|
||||
],
|
||||
# Keep 'answer' column for the reward function
|
||||
"answer": example["answer"],
|
||||
}
|
||||
# Remove columns consumed by the transform; keep columns needed by rewards
|
||||
return map_fn, {"remove_columns": ["question"]}
|
||||
```
|
||||
|
||||
The transform returns a tuple of `(map_function, kwargs_dict)`. The `remove_columns` in the kwargs dict removes columns that are no longer needed. Columns that your reward functions reference via `**kwargs` (like `answer`) must *not* be removed.
|
||||
|
||||
:::{.callout-warning}
|
||||
The reward module must be importable from the directory where you run `axolotl train`. If your reward file is `rewards.py`, the import path is `rewards.accuracy_reward`. If it is inside a package `my_rewards/scoring.py`, use `my_rewards.scoring.accuracy_reward`.
|
||||
:::
|
||||
|
||||
### Reward models (neural network rewards)
|
||||
|
||||
Instead of a Python function, you can pass a HuggingFace model path as a reward function. TRL will load it as a reward model and use its scalar output as the reward:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reward_funcs:
|
||||
- OpenAssistant/reward-model-deberta-v3-large-v2
|
||||
- rewards.format_reward
|
||||
reward_weights:
|
||||
- 1.0
|
||||
- 0.3
|
||||
```
|
||||
|
||||
### Using math_verify
|
||||
|
||||
The `math_verify` library provides robust mathematical answer verification but uses `signal.alarm()` internally, which only works in the main thread. If you use `math_verify` in a reward function, set `reward_num_workers` to use subprocess workers:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reward_num_workers: 4
|
||||
```
|
||||
|
||||
Each worker runs in its own subprocess with its own main thread, so `signal.alarm()` works correctly.
|
||||
|
||||
|
||||
## vLLM Setup
|
||||
|
||||
GRPO requires a running vLLM server for generation. For a complete guide on server modes, LoRA sync, weight synchronization, and restart procedures, see [vLLM Serving](vllm_serving.qmd).
|
||||
|
||||
The minimal setup:
|
||||
|
||||
```yaml
|
||||
vllm:
|
||||
host: 0.0.0.0
|
||||
port: 8000
|
||||
gpu_memory_utilization: 0.85
|
||||
|
||||
trl:
|
||||
use_vllm: true
|
||||
vllm_lora_sync: true # Recommended with LoRA — faster sync, no NCCL contention
|
||||
vllm_sync_interval: 5 # Sync weights every 5 steps
|
||||
```
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml # GPU 0: vLLM
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train config.yaml # GPU 1: training
|
||||
```
|
||||
|
||||
:::{.callout-warning}
|
||||
vLLM must be restarted between experiments — stale weight syncs corrupt server state. See [Restart Requirements](vllm_serving.qmd#sec-restart).
|
||||
:::
|
||||
|
||||
|
||||
## Async Training Features
|
||||
|
||||
Async GRPO overlaps generation and training to reduce wall-clock time. While the model trains on the current batch, the next batch is already being generated by vLLM.
|
||||
|
||||
### Enabling async prefetch
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
use_data_producer: true
|
||||
async_prefetch: true
|
||||
prefetch_depth: 1
|
||||
vllm_sync_interval: 2
|
||||
```
|
||||
|
||||
- `use_data_producer: true` enables the data producer protocol (required for all async features).
|
||||
- `async_prefetch: true` runs generation in a background thread.
|
||||
- `prefetch_depth` controls how many batches to prefetch ahead (1 is usually sufficient).
|
||||
- `vllm_sync_interval` controls how often LoRA weights are synced to vLLM (every N optimizer steps). Lower values mean fresher generations but more sync overhead.
|
||||
|
||||
:::{.callout-tip}
|
||||
Because the background thread generates with slightly stale model weights, async mode benefits from importance sampling correction (see next section). Enable `vllm_importance_sampling_correction: true` when using `async_prefetch: true`.
|
||||
:::
|
||||
|
||||
### Streaming partial batch
|
||||
|
||||
Instead of scoring the entire batch at once, streaming mode scores one prompt group at a time. This reduces peak memory during scoring and enables finer-grained zero-advantage skipping.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
streaming_partial_batch: true
|
||||
streaming_min_groups: 1
|
||||
```
|
||||
|
||||
`streaming_min_groups` controls the minimum number of prompt groups scored per chunk. Setting it to 1 gives maximum granularity.
|
||||
|
||||
### Zero-advantage batch skipping
|
||||
|
||||
When all advantages in a micro-batch are zero (every completion in the group got the same reward), there is no learning signal. This feature skips the forward/backward pass entirely for such micro-batches.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
skip_zero_advantage_batches: true # default
|
||||
```
|
||||
|
||||
This is enabled by default and logged as `skipped_zero_adv_batches` in training metrics. It is a safety net, not a major optimization -- it only saves significant time when the model cannot solve any prompts in the batch.
|
||||
|
||||
### Replay buffer
|
||||
|
||||
The replay buffer caches rollout groups that had learning signal (non-zero reward variance) and replaces zero-signal groups in later batches. This improves data utilization when many prompts yield no reward variance.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
replay_buffer_size: 100
|
||||
replay_recompute_logps: true
|
||||
```
|
||||
|
||||
:::{.callout-warning}
|
||||
When `replay_recompute_logps: false`, replayed data uses stale log-probabilities which creates an IS mismatch. Keep the default `true` unless you have a specific reason to disable it.
|
||||
:::
|
||||
|
||||
### Deferred re-rolling
|
||||
|
||||
Prompts where the model gets zero reward for all generations are buffered and re-injected into later batches, when the model may have improved enough to produce useful completions.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reroll_start_fraction: 0.5 # Start re-rolling after 50% of training
|
||||
reroll_max_groups: 1 # Max groups to replace per batch
|
||||
```
|
||||
|
||||
Set `reroll_start_fraction: 1.0` to disable. This is most useful for tasks where the model starts weak but steadily improves.
|
||||
|
||||
### Parallel reward workers
|
||||
|
||||
Reward functions that use `signal.alarm()` (like `math_verify`) only work in the main thread. Parallel reward workers run each function in its own subprocess:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reward_num_workers: 4
|
||||
```
|
||||
|
||||
Work is sharded across workers by prompt group. For simple reward functions, a single worker is usually sufficient -- the overhead of IPC can exceed the computation time.
|
||||
|
||||
|
||||
## Importance Sampling and Off-Policy Correction
|
||||
|
||||
When using async prefetch, completions are generated from a slightly older policy. IS correction adjusts the gradient to account for this mismatch.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
vllm_importance_sampling_correction: true
|
||||
importance_sampling_level: token # 'token' recommended (especially with Liger kernel)
|
||||
off_policy_mask_threshold: 0.5 # KL threshold — masks sequences that are too off-policy
|
||||
```
|
||||
|
||||
Use `token` level IS. Sequence-level has numerical issues with Liger's chunked computation. The `off_policy_mask_threshold` (OPSM) is a safety net that drops sequences where KL divergence exceeds the threshold — 0.5 is a reasonable starting point.
|
||||
|
||||
For detailed coverage of IS modes (`token_mask`, `token_truncate`, etc.), capping, and bias-corrected KL, see [vLLM Serving — IS Correction](vllm_serving.qmd#sec-weight-sync).
|
||||
|
||||
|
||||
## Scaling
|
||||
|
||||
### FP8 training
|
||||
|
||||
FP8 quantization halves model VRAM usage with minimal impact on training quality. It does not significantly speed up computation for small models but allows larger models to fit in memory.
|
||||
|
||||
```yaml
|
||||
fp8: true
|
||||
torch_compile: true
|
||||
```
|
||||
|
||||
:::{.callout-warning}
|
||||
FP8 requires patching for zero-padding edge cases. The `act_quant_kernel` can produce NaN when input is all zeros (padding positions). If you see NaN in grad norms, check whether your padding token embedding is non-zero.
|
||||
:::
|
||||
|
||||
### FSDP (Fully Sharded Data Parallel)
|
||||
|
||||
FSDP distributes model parameters across multiple GPUs for training while vLLM runs on a separate GPU:
|
||||
|
||||
```yaml
|
||||
fsdp:
|
||||
- full_shard
|
||||
- auto_wrap
|
||||
fsdp_config:
|
||||
fsdp_transformer_layer_cls_to_wrap: Qwen2DecoderLayer
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
```
|
||||
|
||||
Launch with:
|
||||
|
||||
```bash
|
||||
# GPU 0: vLLM
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
|
||||
# GPUs 0,1: Training (FSDP will use both visible GPUs)
|
||||
CUDA_VISIBLE_DEVICES=0,1 axolotl train config.yaml
|
||||
```
|
||||
|
||||
:::{.callout-warning}
|
||||
`async_prefetch: true` can deadlock with FSDP because background threads perform unsynchronized FSDP collectives across ranks. With multi-GPU FSDP, only rank 0 generates in the background thread and results are broadcast to all ranks. If you still see hangs, set `async_prefetch: false`.
|
||||
:::
|
||||
|
||||
### DeepSpeed ZeRO-3
|
||||
|
||||
```yaml
|
||||
deepspeed: deepspeed_configs/zero3_bf16.json
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: true # Required -- non-reentrant causes CheckpointError with ZeRO-3
|
||||
```
|
||||
|
||||
:::{.callout-note}
|
||||
DeepSpeed ZeRO-3 requires `use_reentrant: true` for gradient checkpointing. This is the opposite of the FSDP recommendation. Non-reentrant checkpointing causes tensor metadata mismatches during recomputation with ZeRO-3's parameter partitioning.
|
||||
:::
|
||||
|
||||
### Multi-GPU considerations
|
||||
|
||||
| Concern | Recommendation |
|
||||
|---------|---------------|
|
||||
| vLLM GPU allocation | Dedicate one or more GPUs to vLLM; do not share with trainer GPUs |
|
||||
| Weight sync contention | Use `vllm_lora_sync: true` to avoid NCCL contention between training and vLLM |
|
||||
| FSDP + async | Use `async_prefetch: false` or rely on rank-0-only background generation |
|
||||
| DeepSpeed + gradient checkpoint | Must use `use_reentrant: true` |
|
||||
| OOM during scoring | Reduce `micro_batch_size` or `num_generations`. The logits tensor scales with `batch_size * vocab_size` |
|
||||
|
||||
|
||||
## Monitoring and Debugging
|
||||
|
||||
For detailed metric ranges, failure diagnosis, and OOM debugging, see [Training Stability & Debugging](training_stability.qmd).
|
||||
|
||||
Quick health checks during GRPO training:
|
||||
|
||||
- `rewards/*/mean` should be > 0.15 within 20 steps — if it stays at 0, test your reward function standalone
|
||||
- `reward_std` should be > 0 on most steps — all-zero means no learning signal
|
||||
- `entropy` in 0.05-0.5 — below 0.01 suggests mode collapse
|
||||
- `grad_norm` in 0.001-1.0 — > 10 is unstable, 0.0 is expected when zero-advantage skip fires
|
||||
|
||||
:::{.callout-tip}
|
||||
Pipe training output to a log file: `axolotl train config.yaml 2>&1 | tee /tmp/training.log`
|
||||
:::
|
||||
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
All GRPO-specific options live under the `trl:` key in your config. Standard training options (`learning_rate`, `micro_batch_size`, etc.) are set at the top level as usual.
|
||||
|
||||
### Core GRPO
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `use_vllm` | bool | `false` | Enable vLLM for generation |
|
||||
| `vllm_mode` | `"server"` or `"colocate"` | `null` | vLLM deployment mode |
|
||||
| `vllm_server_host` | str | `"0.0.0.0"` | vLLM server hostname |
|
||||
| `vllm_server_port` | int | `8000` | vLLM server port |
|
||||
| `vllm_server_timeout` | int | `null` | Timeout (seconds) for vLLM responses |
|
||||
| `num_generations` | int | `null` | Completions generated per prompt |
|
||||
| `generation_batch_size` | int | `null` | Number of unique prompts per generation step |
|
||||
| `max_completion_length` | int | `null` | Maximum tokens per completion |
|
||||
| `beta` | float | `null` | KL penalty coefficient |
|
||||
| `num_iterations` | int | `null` | Iterations per batch (mu in the GRPO paper) |
|
||||
| `epsilon` | float | `null` | PPO clipping lower bound |
|
||||
| `epsilon_high` | float | `null` | PPO clipping upper bound |
|
||||
| `loss_type` | str | `null` | Loss formulation: `grpo`, `bnpo`, or `dr_grpo` |
|
||||
| `scale_rewards` | bool | `true` | Normalize rewards by standard deviation |
|
||||
| `mask_truncated_completions` | bool | `false` | Exclude truncated completions from loss |
|
||||
|
||||
### Reward functions
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `reward_funcs` | list[str] | `null` | Import paths to reward functions or HF model IDs |
|
||||
| `reward_weights` | list[float] | `null` | Relative weights for each reward function |
|
||||
| `multi_objective_aggregation` | str | `null` | `"sum_then_normalize"` (GRPO) or `"normalize_then_sum"` (GDPO) |
|
||||
| `rollout_func` | str | `null` | Import path to custom rollout function for OpenEnv-style tasks |
|
||||
|
||||
### Generation parameters
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `temperature` | float | `null` | Sampling temperature |
|
||||
| `top_p` | float | `null` | Nucleus sampling probability |
|
||||
| `top_k` | int | `null` | Top-k sampling |
|
||||
| `min_p` | float | `null` | Minimum probability threshold |
|
||||
| `repetition_penalty` | float | `null` | Penalty for repeated tokens |
|
||||
| `generation_kwargs` | dict | `null` | Additional vLLM SamplingParams (e.g., `stop_token_ids`) |
|
||||
| `chat_template_kwargs` | dict | `null` | Chat template kwargs (e.g., `{enable_thinking: false}`) |
|
||||
| `vllm_guided_decoding_regex` | str | `null` | Regex constraint for guided decoding |
|
||||
|
||||
### Async pipeline
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `use_data_producer` | bool | `false` | Enable data producer protocol (required for async features) |
|
||||
| `async_prefetch` | bool | `false` | Generate next batch in background thread |
|
||||
| `prefetch_depth` | int | `null` | Number of batches to prefetch ahead |
|
||||
| `vllm_sync_interval` | int | `null` | Sync LoRA weights to vLLM every N steps |
|
||||
| `vllm_lora_sync` | bool | `false` | Use filesystem LoRA sync instead of NCCL merge |
|
||||
| `streaming_partial_batch` | bool | `null` | Score prompt groups incrementally |
|
||||
| `streaming_min_groups` | int | `null` | Minimum groups per streaming chunk |
|
||||
| `skip_zero_advantage_batches` | bool | `true` | Skip micro-batches with zero learning signal |
|
||||
| `reward_num_workers` | int | `1` | Subprocess workers for reward computation |
|
||||
| `vllm_enable_sleep_mode` | bool | `null` | Offload vLLM weights when idle (colocate mode) |
|
||||
|
||||
### Importance sampling
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `vllm_importance_sampling_correction` | bool | `null` | Enable IS correction for async distribution shift |
|
||||
| `importance_sampling_level` | `"token"` or `"sequence"` | `null` | Granularity of IS ratios. Use `token` with Liger |
|
||||
| `vllm_importance_sampling_mode` | str | `null` | `token_mask`, `token_truncate`, `sequence_mask`, or `sequence_truncate` |
|
||||
| `vllm_importance_sampling_cap` | float | `null` | Cap C for IS ratio clipping/masking |
|
||||
| `off_policy_mask_threshold` | float | `null` | KL threshold for off-policy sequence masking (OPSM) |
|
||||
| `use_bias_correction_kl` | bool | `null` | Apply IS correction to KL divergence term |
|
||||
|
||||
### Replay and re-roll
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `replay_buffer_size` | int | `0` | Max cached high-signal groups. 0 = disabled |
|
||||
| `replay_recompute_logps` | bool | `true` | Recompute log-probs for replayed data with current model |
|
||||
| `reroll_start_fraction` | float | `1.0` | Start re-rolling failed prompts after this fraction of training. 1.0 = disabled |
|
||||
| `reroll_max_groups` | int | `1` | Max prompt groups to replace with re-rolls per batch |
|
||||
|
||||
### Reference model
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `sync_ref_model` | bool | `false` | Periodically sync reference model with training model |
|
||||
| `ref_model_mixup_alpha` | float | `0.9` | EMA coefficient for reference model sync |
|
||||
| `ref_model_sync_steps` | int | `64` | Sync reference model every N steps |
|
||||
|
||||
### Logging
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `log_completions` | bool | `false` | Log sample completions to W&B |
|
||||
| `num_completions_to_print` | int | `null` | Number of completions to print per step |
|
||||
| `use_liger_loss` | bool | `null` | Use Liger fused kernel for GRPO loss (reduces VRAM) |
|
||||
@@ -15,30 +15,64 @@ This guide covers all the ways you can install and set up Axolotl for your envir
|
||||
|
||||
- NVIDIA GPU (Ampere architecture or newer for `bf16` and Flash Attention) or AMD GPU
|
||||
- Python ≥3.11
|
||||
- PyTorch ≥2.9.0
|
||||
- PyTorch ≥2.6.0
|
||||
|
||||
## Installation {#sec-installation}
|
||||
## Installation Methods {#sec-installation-methods}
|
||||
|
||||
::: {.callout-important}
|
||||
Please make sure to have Pytorch installed before installing Axolotl in your local environment.
|
||||
|
||||
Follow the instructions at: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/)
|
||||
:::
|
||||
|
||||
::: {.callout-important}
|
||||
For Blackwell GPUs, please use Pytorch 2.9.1 and CUDA 12.8.
|
||||
:::
|
||||
|
||||
### Quick Install {#sec-uv}
|
||||
### PyPI Installation (Recommended) {#sec-pypi}
|
||||
|
||||
Axolotl uses [uv](https://docs.astral.sh/uv/) as its package manager. uv is a fast, reliable Python package installer and resolver built in Rust.
|
||||
```{.bash}
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
```
|
||||
|
||||
Install uv if not already installed:
|
||||
We use `--no-build-isolation` in order to detect the installed PyTorch version (if
|
||||
installed) in order not to clobber it, and so that we set the correct version of
|
||||
dependencies that are specific to the PyTorch version or other installed
|
||||
co-dependencies.
|
||||
|
||||
### uv Installation {#sec-uv}
|
||||
|
||||
uv is a fast, reliable Python package installer and resolver built in Rust. It offers significant performance improvements over pip and provides better dependency resolution, making it an excellent choice for complex environments.
|
||||
|
||||
Install uv if not already installed
|
||||
```{.bash}
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
source $HOME/.local/bin/env
|
||||
```
|
||||
|
||||
Choose your CUDA version (e.g. `cu128`, `cu130`), create a venv, and install:
|
||||
Choose your CUDA version to use with PyTorch; e.g. `cu124`, `cu126`, `cu128`,
|
||||
then create the venv and activate
|
||||
```{.bash}
|
||||
export UV_TORCH_BACKEND=cu128 # or cu130
|
||||
export UV_TORCH_BACKEND=cu126
|
||||
uv venv --no-project --relocatable
|
||||
source .venv/bin/activate
|
||||
uv pip install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
```
|
||||
|
||||
Install PyTorch
|
||||
- PyTorch 2.6.0 recommended
|
||||
```{.bash}
|
||||
uv pip install packaging setuptools wheel
|
||||
uv pip install torch==2.6.0
|
||||
uv pip install awscli pydantic
|
||||
```
|
||||
|
||||
Install axolotl from PyPi
|
||||
```{.bash}
|
||||
uv pip install --no-build-isolation axolotl[deepspeed,flash-attn]
|
||||
|
||||
# optionally install with vLLM if you're using torch==2.6.0 and want to train w/ GRPO
|
||||
uv pip install --no-build-isolation axolotl[deepspeed,flash-attn,vllm]
|
||||
```
|
||||
|
||||
### Edge/Development Build {#sec-edge-build}
|
||||
@@ -48,17 +82,14 @@ For the latest features between releases:
|
||||
```{.bash}
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
export UV_TORCH_BACKEND=cu128 # or cu130
|
||||
uv sync --extra flash-attn --extra deepspeed
|
||||
source .venv/bin/activate
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
```
|
||||
|
||||
`uv sync` creates a `.venv`, installs exact pinned versions from `uv.lock`, and sets up an editable install automatically.
|
||||
|
||||
### Docker {#sec-docker}
|
||||
|
||||
```{.bash}
|
||||
docker run --gpus '"all"' --rm -it --ipc=host axolotlai/axolotl-uv:main-latest
|
||||
docker run --gpus '"all"' --rm -it axolotlai/axolotl:main-latest
|
||||
```
|
||||
|
||||
For development with Docker:
|
||||
@@ -75,12 +106,12 @@ docker run --privileged --gpus '"all"' --shm-size 10g --rm -it \
|
||||
--ulimit memlock=-1 --ulimit stack=67108864 \
|
||||
--mount type=bind,src="${PWD}",target=/workspace/axolotl \
|
||||
-v ${HOME}/.cache/huggingface:/root/.cache/huggingface \
|
||||
axolotlai/axolotl-uv:main-latest
|
||||
axolotlai/axolotl:main-latest
|
||||
```
|
||||
:::
|
||||
|
||||
::: {.callout-important}
|
||||
For Blackwell GPUs, please use `axolotlai/axolotl-uv:main-py3.11-cu128-2.9.1` or the cloud variant `axolotlai/axolotl-cloud-uv:main-py3.11-cu128-2.9.1`.
|
||||
For Blackwell GPUs, please use `axolotlai/axolotl:main-py3.11-cu128-2.9.1` or the cloud variant `axolotlai/axolotl-cloud:main-py3.11-cu128-2.9.1`.
|
||||
:::
|
||||
|
||||
Please refer to the [Docker documentation](docker.qmd) for more information on the different Docker images that are available.
|
||||
@@ -91,7 +122,7 @@ Please refer to the [Docker documentation](docker.qmd) for more information on t
|
||||
|
||||
For providers supporting Docker:
|
||||
|
||||
- Use `axolotlai/axolotl-cloud-uv:main-latest`
|
||||
- Use `axolotlai/axolotl-cloud:main-latest`
|
||||
- Available on:
|
||||
- [RunPod](https://runpod.io/gsc?template=v2ickqhz9s&ref=6i7fkpdz)
|
||||
- [Vast.ai](https://cloud.vast.ai?ref_id=62897&template_id=bdd4a49fa8bce926defc99471864cace&utm_source=axolotl&utm_medium=partner&utm_campaign=template_launch_july2025&utm_content=docs_link)
|
||||
@@ -110,7 +141,7 @@ For providers supporting Docker:
|
||||
### macOS {#sec-macos}
|
||||
|
||||
```{.bash}
|
||||
uv pip install --no-build-isolation -e '.'
|
||||
pip3 install --no-build-isolation -e '.'
|
||||
```
|
||||
|
||||
See @sec-troubleshooting for Mac-specific issues.
|
||||
@@ -121,44 +152,21 @@ See @sec-troubleshooting for Mac-specific issues.
|
||||
We recommend using WSL2 (Windows Subsystem for Linux) or Docker.
|
||||
:::
|
||||
|
||||
## Migrating from pip to uv {#sec-migrating}
|
||||
## Environment Managers {#sec-env-managers}
|
||||
|
||||
If you have an existing pip-based Axolotl installation, you can migrate to uv:
|
||||
### Conda/Pip venv {#sec-conda}
|
||||
|
||||
```{.bash}
|
||||
# Install uv
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
source $HOME/.local/bin/env
|
||||
|
||||
# Create a fresh venv (recommended for a clean start)
|
||||
export UV_TORCH_BACKEND=cu128 # or cu130
|
||||
uv venv --no-project --relocatable
|
||||
source .venv/bin/activate
|
||||
|
||||
# Reinstall axolotl
|
||||
uv pip install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
```
|
||||
|
||||
## Using pip (Alternative) {#sec-pip}
|
||||
|
||||
If you are unable to install uv, you can still use pip directly.
|
||||
|
||||
::: {.callout-important}
|
||||
Please make sure to have PyTorch installed before installing Axolotl with pip.
|
||||
|
||||
Follow the instructions at: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/)
|
||||
:::
|
||||
|
||||
```{.bash}
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
```
|
||||
|
||||
For editable/development installs:
|
||||
```{.bash}
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
```
|
||||
1. Install Python ≥3.11
|
||||
2. Install PyTorch: https://pytorch.org/get-started/locally/
|
||||
3. Install Axolotl:
|
||||
```{.bash}
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
```
|
||||
4. (Optional) Login to Hugging Face:
|
||||
```{.bash}
|
||||
hf auth login
|
||||
```
|
||||
|
||||
## Troubleshooting {#sec-troubleshooting}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ format:
|
||||
|
||||
## Supported Models
|
||||
|
||||
- [Gemma-4](#sec-gemma-4) *(NEW)*
|
||||
- [Mllama](#sec-mllama)
|
||||
- [Llama4](#sec-llama4)
|
||||
- [Pixtral](#sec-pixtral)
|
||||
@@ -21,7 +20,6 @@ format:
|
||||
- [Gemma-3n](#sec-gemma-3n)
|
||||
- [Qwen2-VL](#sec-qwen2-vl)
|
||||
- [Qwen2.5-VL](#sec-qwen25-vl)
|
||||
- [Qwen3.5](#sec-qwen3-5)
|
||||
- [GLM-4.6V](#sec-glm-4-6v)
|
||||
- [SmolVLM2](#sec-smolvlm2)
|
||||
- [LFM2-VL](#sec-lfm2-vl)
|
||||
@@ -139,40 +137,6 @@ base_model: mistralai/Voxtral-Mini-3B-2507
|
||||
processor_type: VoxtralProcessor
|
||||
```
|
||||
|
||||
### Gemma-4 {#sec-gemma-4}
|
||||
|
||||
All Gemma 4 variants (E2B, E4B, 26B-A4B, 31B) load as multimodal models even for text-only training.
|
||||
|
||||
```yaml
|
||||
base_model: google/gemma-4-E2B-it # or E4B-it, 26B-A4B, 31B
|
||||
|
||||
chat_template: gemma4
|
||||
freeze_mm_modules: true # freeze vision/audio encoders for text-only or vision LoRA
|
||||
|
||||
# For the 26B-A4B MoE model, enable ScatterMoE and expert LoRA:
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_scattermoe: true
|
||||
experts_implementation: scattermoe
|
||||
|
||||
lora_target_modules: 'model.language_model.layers.[\d]+.(_checkpoint_wrapped_module.)?(mlp|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||
|
||||
# MoE expert LoRA (3D tensors, not nn.Linear) — only for 26B-A4B:
|
||||
lora_target_parameters:
|
||||
- experts.gate_up_proj
|
||||
- experts.down_proj
|
||||
```
|
||||
|
||||
::: {.callout-warning}
|
||||
Gemma 4 VLM training starts with high loss (~8-15). This is expected — see the [training stability guide](training_stability.qmd) for details.
|
||||
:::
|
||||
|
||||
::: {.callout-tip}
|
||||
For DDP training, axolotl auto-detects Gemma4 and sets `use_reentrant=False` and `ddp_find_unused_parameters=True`. However, when `activation_offloading: true`, `ddp_find_unused_parameters` is skipped (checkpoint wrappers conflict with it); use `freeze_mm_modules: true` instead to handle unused vision/audio params. For FSDP2, use `fsdp_transformer_layer_cls_to_wrap: Gemma4TextDecoderLayer`.
|
||||
:::
|
||||
|
||||
### Gemma-3 {#sec-gemma-3}
|
||||
|
||||
::: {.callout-tip}
|
||||
@@ -227,14 +191,6 @@ base_model: Qwen/Qwen3-VL-4B-Instruct
|
||||
chat_template: qwen2_vl # same as qwen2-vl
|
||||
```
|
||||
|
||||
### Qwen3.5 {#sec-qwen3-5}
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen3.5-9B
|
||||
|
||||
chat_template: qwen3_5
|
||||
```
|
||||
|
||||
### GLM-4.6V {#sec-glm-4-6v}
|
||||
|
||||
Both GLM-4.6V (106B MoE) and GLM-4.6V-Flash (9B) are supported.
|
||||
|
||||
@@ -22,12 +22,12 @@ Improves GPU utilization by combining multiple short sequences into a single pac
|
||||
|
||||
Using an optimized attention implementation is critical for training speed.
|
||||
|
||||
- **[Flash Attention 2](https://github.com/Dao-AILab/flash-attention)**: `attn_implementation: flash_attention_2`. **(Recommended)** The industry standard for fast attention on modern GPUs. Requires Ampere or higher. For AMD, check [AMD Support](https://github.com/Dao-AILab/flash-attention?tab=readme-ov-file#amd-rocm-support).
|
||||
- **[Flex Attention](https://pytorch.org/blog/flexattention/)**: `attn_implementation: flex_attention`.
|
||||
- **[SDP Attention](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)**: `attn_implementation: sdpa`. PyTorch's native implementation.
|
||||
- **[Xformers](https://github.com/facebookresearch/xformers)**: `attn_implementation: xformers`. Works with FP16.
|
||||
- **[Flash Attention 2](https://github.com/Dao-AILab/flash-attention)**: `flash_attention: true`. **(Recommended)** The industry standard for fast attention on modern GPUs. Requires Ampere or higher. For AMD, check [AMD Support](https://github.com/Dao-AILab/flash-attention?tab=readme-ov-file#amd-rocm-support).
|
||||
- **[Flex Attention](https://pytorch.org/blog/flexattention/)**: `flex_attention: true`.
|
||||
- **[SDP Attention](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)**: `sdp_attention: true`. PyTorch's native implementation.
|
||||
- **[Xformers](https://github.com/facebookresearch/xformers)**: `xformers_attention: true`. Works with FP16.
|
||||
|
||||
See [Attention](attention.qmd) for the full list of backends and the canonical values.
|
||||
*Note: You should only enable one attention backend.*
|
||||
|
||||
### LoRA Optimizations
|
||||
|
||||
@@ -54,13 +54,6 @@ These techniques save VRAM by changing how activations are handled.
|
||||
- Activation Offloading: moves activations to CPU RAM or disk, trading I/O overhead for VRAM.
|
||||
- Learn more: [Gradient Checkpointing and Offloading Docs](gradient_checkpointing.qmd)
|
||||
|
||||
### Layer Offloading
|
||||
|
||||
Offloads frozen (non-trainable) decoder layer parameters to CPU and streams them back to GPU one layer at a time during forward/backward passes using CUDA stream prefetching. Especially effective for LoRA/QLoRA where most parameters are frozen.
|
||||
|
||||
- **Config:** `layer_offloading: true`
|
||||
- **Learn more:** [Layer Offloading Docs](gradient_checkpointing.qmd#enabling-layer-offloading)
|
||||
|
||||
### Cut Cross Entropy (CCE)
|
||||
|
||||
Reduces VRAM usage by using an optimized cross-entropy loss calculation.
|
||||
|
||||
313
docs/rlhf.qmd
313
docs/rlhf.qmd
@@ -16,12 +16,8 @@ feedback. Various methods include, but not limited to:
|
||||
- [Identity Preference Optimization (IPO)](#ipo)
|
||||
- [Kahneman-Tversky Optimization (KTO)](#kto)
|
||||
- [Odds Ratio Preference Optimization (ORPO)](#orpo)
|
||||
- [Group Relative Policy Optimization (GRPO)](#grpo) — see also the [GRPO deep dive](grpo.qmd) for async features, custom rewards, and scaling
|
||||
- [Group Relative Policy Optimization (GRPO)](#grpo)
|
||||
- [Group Reward-Decoupled Policy Optimization (GDPO)](#gdpo)
|
||||
- [Energy-Based Fine-Tuning (EBFT)](#ebft) — see also the [EBFT guide](ebft.qmd) for detailed mode comparisons and configuration
|
||||
- [NeMo Gym Integration](#nemo-gym-integration)
|
||||
|
||||
For help choosing between these methods, see [Choosing a Fine-Tuning Method](choosing_method.qmd).
|
||||
|
||||
|
||||
## RLHF using Axolotl
|
||||
@@ -320,10 +316,8 @@ The input format is a simple JSON input with customizable fields based on the ab
|
||||
As IPO is just DPO with a different loss function, all supported dataset formats for [DPO](#dpo) are also supported for IPO.
|
||||
|
||||
```yaml
|
||||
rl: dpo
|
||||
dpo_loss_type: ["ipo"]
|
||||
rl: ipo
|
||||
```
|
||||
*Note:* Passing `rl: ipo` directly is still supported, but will soon be deprecated.
|
||||
|
||||
### ORPO
|
||||
|
||||
@@ -519,7 +513,7 @@ The input format is a simple JSON input with customizable fields based on the ab
|
||||
### GRPO
|
||||
|
||||
::: {.callout-tip}
|
||||
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/grpo_code). For a comprehensive guide covering async training, custom rewards, importance sampling, and scaling, see the [GRPO deep dive](grpo.qmd).
|
||||
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/grpo_code).
|
||||
:::
|
||||
|
||||
In the latest GRPO implementation, `vLLM` is used to significantly speedup trajectory generation during training. In this example, we're using 4 GPUs - 2 for training, and 2 for vLLM:
|
||||
@@ -927,7 +921,7 @@ gradient_checkpointing_kwargs:
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
|
||||
# Terminal 2: Train on GPUs 0,1
|
||||
CUDA_VISIBLE_DEVICES=0,1 axolotl train config.yaml
|
||||
CUDA_VISIBLE_DEVICES=0,1 accelerate launch --num_processes 2 -m axolotl.cli.train config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
@@ -1043,305 +1037,6 @@ simpo_gamma: 0.5 # default in CPOTrainer
|
||||
|
||||
This method uses the same dataset format as [DPO](#dpo).
|
||||
|
||||
### EBFT {#ebft}
|
||||
|
||||
::: {.callout-tip}
|
||||
For a detailed guide on EBFT modes, feature extraction, and configuration, see the [EBFT guide](ebft.qmd).
|
||||
:::
|
||||
|
||||
EBFT (Energy-Based Fine-Tuning) fine-tunes language models by optimizing a **feature-matching loss** rather than relying on external reward functions. A frozen copy of the model extracts embeddings from both generated and ground-truth completions, and the generator is updated via REINFORCE to match the ground-truth feature moments.
|
||||
|
||||
Paper: ["Matching Features, Not Tokens: Energy-Based Fine-Tuning of Language Models"](https://arxiv.org/abs/2603.12248) (Jelassi et al., 2026)
|
||||
|
||||
**Key advantages:**
|
||||
|
||||
- No reward model or verifier required — works on any (prompt, completion) data
|
||||
- Applicable to non-verifiable tasks (code, translation, creative writing)
|
||||
- Operates on model rollouts (not teacher forcing), reducing distribution shift
|
||||
|
||||
EBFT supports two modes:
|
||||
|
||||
- **Structured mode**: For QA/instruction data with prompt + completion pairs. Uses vLLM for generation (like GRPO).
|
||||
- **Strided mode**: For unstructured text without prompt/completion splits. Uses strided block-parallel generation with flex_attention — no vLLM needed.
|
||||
|
||||
#### Structured Mode
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen3-4B
|
||||
|
||||
rl: ebft
|
||||
|
||||
ebft:
|
||||
feature_layers: [0.25, 0.5, 0.75] # Extract features at 25%, 50%, 75% depth
|
||||
embed_method: last_token
|
||||
use_whitening: false
|
||||
alignment_coef: 1.0 # Cosine similarity reward weight
|
||||
diversity_coef: 1.0 # Pairwise dot product penalty
|
||||
ce_coef: 0.0 # Cross-entropy on GT tokens (0 = off)
|
||||
|
||||
trl:
|
||||
num_generations: 4
|
||||
max_completion_length: 256
|
||||
temperature: 0.7
|
||||
use_vllm: true
|
||||
vllm_server_host: 0.0.0.0
|
||||
vllm_server_port: 8000
|
||||
vllm_lora_sync: true # LoRA adapter sync (recommended)
|
||||
vllm_sync_interval: 3
|
||||
use_data_producer: true
|
||||
async_prefetch: true # Set false for sync mode
|
||||
scale_rewards: true
|
||||
loss_type: grpo
|
||||
epsilon: 0.2
|
||||
|
||||
vllm:
|
||||
gpu_memory_utilization: 0.5
|
||||
max_model_len: 2048
|
||||
|
||||
datasets:
|
||||
- path: nvidia/OpenCodeInstruct
|
||||
type: ebft_opencode.transform
|
||||
split: train[:500]
|
||||
|
||||
adapter: lora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_target_linear: true
|
||||
```
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start vLLM
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
|
||||
# Terminal 2: Train
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train config.yaml
|
||||
```
|
||||
|
||||
#### Strided Mode
|
||||
|
||||
For unstructured text (raw code, prose). No vLLM needed — runs on a single GPU.
|
||||
|
||||
```yaml
|
||||
base_model: meta-llama/Llama-3.2-1B
|
||||
|
||||
rl: ebft
|
||||
|
||||
ebft:
|
||||
mode: strided
|
||||
stride: 8
|
||||
context_length: 8
|
||||
generate_max_len: 8
|
||||
n_samples_per_prompt: 4
|
||||
temperature: 0.6
|
||||
feature_layers: [0.25, 0.5, 0.75]
|
||||
embed_method: last_token
|
||||
use_whitening: true
|
||||
alignment_coef: 1.0
|
||||
diversity_coef: 1.0
|
||||
rl_coef: 1.0
|
||||
ce_coef: 0.03
|
||||
advantage_estimator: rloo
|
||||
|
||||
datasets:
|
||||
- path: nvidia/OpenCodeInstruct
|
||||
type: ebft_strided_structured.transform
|
||||
split: train[:1%]
|
||||
|
||||
attn_implementation: flex_attention # Strided mode uses flex_attention
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: true # Required for flex_attention
|
||||
```
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl train config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
See `examples/ebft/` for complete example configs covering Llama 1B/3B/8B and Qwen3 4B/8B models in both modes.
|
||||
:::
|
||||
|
||||
#### EBFT Configuration Reference
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `ebft.feature_layers` | `[0.25, 0.5, 0.75]` | Layer depths for feature extraction (fractional) |
|
||||
| `ebft.embed_method` | `last_token` | Feature pooling: `last_token`, `mean_pooling`, `concat` |
|
||||
| `ebft.use_whitening` | `false` | SVD whitening of feature dimensions |
|
||||
| `ebft.alignment_coef` | `1.0` | Cosine similarity reward weight |
|
||||
| `ebft.diversity_coef` | `1.0` | Pairwise dot product penalty weight |
|
||||
| `ebft.ce_coef` | `0.0` | Cross-entropy loss on ground-truth tokens |
|
||||
| `ebft.mode` | `structured` | `structured` (vLLM) or `strided` (no vLLM) |
|
||||
| `ebft.stride` | — | Tokens between anchor points (strided mode) |
|
||||
| `ebft.context_length` | — | Context window per block (strided mode) |
|
||||
| `ebft.generate_max_len` | — | Tokens to generate per block (strided mode) |
|
||||
| `ebft.n_samples_per_prompt` | — | Rollouts per document (strided mode) |
|
||||
| `ebft.advantage_estimator` | `grpo` | `grpo` or `rloo` (strided mode) |
|
||||
|
||||
### NeMo Gym Integration
|
||||
|
||||
[NeMo Gym](https://github.com/NVIDIA-NeMo/Gym) provides 50+ verified RL environments (math, coding, tool-use, reasoning) with deterministic reward signals. The axolotl integration supports both **single-turn** (call `/verify` after generation) and **multi-turn** (agent-based tool execution via `/run`).
|
||||
|
||||
#### Single-Turn (Simplest)
|
||||
|
||||
For environments that only need answer verification (math, coding challenges). No agent server needed — the reward function calls `/verify` directly on the resource server.
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen2.5-0.5B-Instruct
|
||||
|
||||
rl: grpo
|
||||
chat_template: tokenizer_default
|
||||
|
||||
trl:
|
||||
use_vllm: false # Colocate mode (single GPU)
|
||||
num_generations: 4
|
||||
max_completion_length: 128
|
||||
temperature: 0.9
|
||||
reward_funcs:
|
||||
- axolotl.integrations.nemo_gym.rewards.reward_nemo_gym_verify
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.nemo_gym.NemoGymPlugin
|
||||
|
||||
nemo_gym_enabled: true
|
||||
nemo_gym_dir: ~/Gym
|
||||
nemo_gym_auto_start: false
|
||||
nemo_gym_head_port: 11000
|
||||
nemo_gym_datasets:
|
||||
- path: resources_servers/reasoning_gym/data/train_basic_arithmetic.jsonl
|
||||
server_name: reasoning_gym
|
||||
|
||||
datasets:
|
||||
- path: ~/Gym/resources_servers/reasoning_gym/data/train_basic_arithmetic.jsonl
|
||||
type: chat_template
|
||||
field_messages: responses_create_params.input
|
||||
message_field_content: content
|
||||
message_field_role: role
|
||||
```
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start NeMo Gym resource server
|
||||
cd ~/Gym && .venv/bin/ng_run \
|
||||
"+config_paths=[resources_servers/reasoning_gym/configs/resources_only.yaml]" \
|
||||
"+skip_venv_if_present=true"
|
||||
|
||||
# Terminal 2: Train
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl train config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
`nemo_gym_datasets.path` is relative to `nemo_gym_dir`. Don't use absolute paths or they will be double-joined.
|
||||
:::
|
||||
|
||||
#### Multi-Turn with Async GRPO (Recommended)
|
||||
|
||||
For environments with tool-use (weather, search, databases). An agent server orchestrates multi-turn interactions: generate → parse tool calls → execute tools → feed results back → repeat until done.
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen3-0.6B
|
||||
|
||||
rl: grpo
|
||||
chat_template: tokenizer_default
|
||||
|
||||
adapter: lora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_target_modules: [q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj]
|
||||
|
||||
trl:
|
||||
use_vllm: true
|
||||
vllm_mode: server
|
||||
vllm_server_host: localhost
|
||||
vllm_server_port: 8000
|
||||
vllm_lora_sync: true
|
||||
vllm_sync_interval: 5
|
||||
use_data_producer: true
|
||||
async_prefetch: true # 3x speedup
|
||||
num_generations: 4
|
||||
max_completion_length: 512
|
||||
temperature: 0.8
|
||||
reward_funcs:
|
||||
- axolotl.integrations.nemo_gym.rewards.reward_env
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.nemo_gym.NemoGymPlugin
|
||||
|
||||
nemo_gym_enabled: true
|
||||
nemo_gym_auto_start: false
|
||||
nemo_gym_head_port: 11000
|
||||
nemo_gym_multi_turn: true
|
||||
nemo_gym_verify_timeout: 120
|
||||
nemo_gym_datasets:
|
||||
- path: resources_servers/example_single_tool_call/data/weather_tool_calling.jsonl
|
||||
server_name: example_single_tool_call
|
||||
|
||||
datasets:
|
||||
- path: ~/Gym/resources_servers/example_single_tool_call/data/weather_tool_calling.jsonl
|
||||
type: chat_template
|
||||
field_messages: responses_create_params.input
|
||||
message_field_content: content
|
||||
message_field_role: role
|
||||
|
||||
vllm:
|
||||
gpu_memory_utilization: 0.85
|
||||
max_model_len: 2048
|
||||
```
|
||||
|
||||
Multi-turn requires three services running:
|
||||
|
||||
```bash
|
||||
# Terminal 1: vLLM with LoRA + tool calling
|
||||
VLLM_ALLOW_RUNTIME_LORA_UPDATING=1 CUDA_VISIBLE_DEVICES=0 \
|
||||
python -m vllm.entrypoints.openai.api_server \
|
||||
--model Qwen/Qwen3-0.6B --max-model-len 2048 \
|
||||
--gpu-memory-utilization 0.85 \
|
||||
--enable-lora --max-lora-rank 64 \
|
||||
--enable-auto-tool-choice --tool-call-parser hermes
|
||||
|
||||
# Terminal 2: NeMo Gym servers (resource + model proxy + agent)
|
||||
cd ~/Gym && .venv/bin/ng_run \
|
||||
"+config_paths=[configs/axolotl_tool_calling.yaml]" \
|
||||
"+skip_venv_if_present=true"
|
||||
|
||||
# Terminal 3: Training
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
Multi-turn requires a NeMo Gym agent config YAML that defines three components: a resource server (tools + `/verify`), a model server proxy (forwards to your vLLM), and an agent server (orchestrates `/run`). See the [NeMo Gym README](https://github.com/NVIDIA-NeMo/Gym) for agent config format.
|
||||
:::
|
||||
|
||||
#### NeMo Gym Prerequisites
|
||||
|
||||
```bash
|
||||
# Clone and set up NeMo Gym
|
||||
git clone https://github.com/NVIDIA-NeMo/Gym.git ~/Gym
|
||||
cd ~/Gym
|
||||
uv venv --python 3.12 && source .venv/bin/activate && uv sync
|
||||
|
||||
# Fix pycosat build (GCC 13+)
|
||||
CFLAGS="" uv pip install pycosat --python .venv/bin/python --no-build-isolation
|
||||
```
|
||||
|
||||
#### NeMo Gym Configuration Reference
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `nemo_gym_enabled` | bool | — | Enable the NeMo Gym integration |
|
||||
| `nemo_gym_dir` | str | `~/Gym` | Path to NeMo Gym repo |
|
||||
| `nemo_gym_auto_start` | bool | `true` | Auto-start resource servers |
|
||||
| `nemo_gym_head_port` | int | `11000` | Head server port |
|
||||
| `nemo_gym_multi_turn` | bool | `false` | Enable multi-turn via agent `/run` |
|
||||
| `nemo_gym_verify_timeout` | int | `30` | Per-request timeout (seconds) |
|
||||
| `nemo_gym_datasets` | list | required | Dataset configs with `path` and `server_name` |
|
||||
|
||||
#### Reward Functions
|
||||
|
||||
| Function | Mode | Description |
|
||||
|----------|------|-------------|
|
||||
| `axolotl.integrations.nemo_gym.rewards.reward_nemo_gym_verify` | Single-turn | Calls `/verify`, returns binary reward |
|
||||
| `axolotl.integrations.nemo_gym.rewards.reward_env` | Multi-turn | Passthrough reward from agent `/run` |
|
||||
|
||||
### Using local dataset files
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -55,7 +55,7 @@ To use sequence parallelism, you need:
|
||||
|
||||
## Limitations
|
||||
|
||||
- Flash attention must be enabled for this to work (`attn_implementation: flash_attention_2` in config YAML)
|
||||
- Flash attention must be enabled for this to work (`flash_attention: true` in config YAML)
|
||||
- May have a small performance overhead due to communication between GPUs
|
||||
|
||||
## Example
|
||||
|
||||
@@ -1,399 +0,0 @@
|
||||
---
|
||||
title: "Training Stability & Debugging"
|
||||
order: 15
|
||||
description: "Guide to monitoring, debugging, and stabilizing training runs in axolotl"
|
||||
---
|
||||
|
||||
This guide covers practical techniques for monitoring training health, diagnosing instability, and resolving common failures in both supervised fine-tuning (SFT) and reinforcement learning (GRPO/EBFT) workflows.
|
||||
|
||||
## Monitoring Training
|
||||
|
||||
### Key Metrics for SFT
|
||||
|
||||
Every SFT run should be monitored through at least these four metrics:
|
||||
|
||||
| Metric | What It Tells You | Healthy Range |
|
||||
|--------|-------------------|---------------|
|
||||
| `train/loss` | How well the model fits training data | Decreasing; typically 0.5--2.0 for chat fine-tuning |
|
||||
| `eval/loss` | Generalization performance | Tracks train loss with small gap; divergence signals overfitting |
|
||||
| `grad_norm` | Gradient magnitude | 0.1--10.0; spikes above 100 indicate instability |
|
||||
| `learning_rate` | Current LR from scheduler | Should follow expected schedule (warmup then decay) |
|
||||
|
||||
::: {.callout-tip}
|
||||
## Set Up Logging Early
|
||||
Enable W&B or TensorBoard from the start. Debugging a failed run without metrics is guesswork.
|
||||
|
||||
```yaml
|
||||
wandb_project: my-project
|
||||
wandb_run_id: # optional, for resuming
|
||||
logging_steps: 1
|
||||
```
|
||||
:::
|
||||
|
||||
### Key Metrics for RL (GRPO)
|
||||
|
||||
GRPO training logs a richer set of metrics. These are the critical ones:
|
||||
|
||||
| Metric | Healthy Range | Red Flag |
|
||||
|--------|---------------|----------|
|
||||
| `rewards/<name>/mean` | > 0.15 within 20 steps | Stays at 0 -- reward function is broken or task is too hard |
|
||||
| `reward_std` | > 0 on most steps | Always 0 -- no learning signal (all completions get the same reward) |
|
||||
| `frac_reward_zero_std` | < 0.8 | 1.0 on every step -- zero-advantage skip fires constantly, no gradient updates |
|
||||
| `grad_norm` | 0.001--1.0 | 0.0 is acceptable occasionally (zero-adv skip); > 10.0 is unstable |
|
||||
| `entropy` | 0.05--0.5 | < 0.01 suggests mode collapse; > 1.0 suggests the model is not converging |
|
||||
| `kl` | 0.0--0.5 | > 2.0 suggests policy has diverged too far from reference |
|
||||
| `sampling/sampling_logp_difference/mean` | < 0.1 | > 1.0 means policy has diverged far from vLLM server weights |
|
||||
| `sampling/importance_sampling_ratio/min` | > 0.1 | Near 0 indicates stale off-policy data; increase `vllm_sync_interval` |
|
||||
| `clip_ratio/region_mean` | < 0.1 | > 0.3 means PPO clipping is too aggressive |
|
||||
| `completions/mean_length` | Task-dependent | Monotonically increasing to max length suggests reward hacking |
|
||||
| `completions/clipped_ratio` | < 0.3 | > 0.8 means most completions hit `max_completion_length` -- increase it |
|
||||
|
||||
::: {.callout-note}
|
||||
## EBFT-Specific Metrics
|
||||
For EBFT training, also monitor `ebft/alignment` (should trend upward, healthy 0.3--0.9), `ebft/diversity` (healthy 0.01--0.1; > 1.0 indicates mode collapse), and `ebft/cfm_loss` (should trend downward, < 10).
|
||||
:::
|
||||
|
||||
## SFT Stability
|
||||
|
||||
### Loss Plateau
|
||||
|
||||
**Symptom**: Loss stops decreasing early in training, well above expected values.
|
||||
|
||||
**Causes and fixes**:
|
||||
|
||||
- **Learning rate too low**: Increase by 2--5x. Typical ranges: full fine-tune 1e-5 to 5e-5, LoRA 1e-4 to 3e-4.
|
||||
- **Insufficient warmup**: Set `warmup_steps` to 5--10% of total steps. Too-aggressive learning at the start can push the model into a flat region.
|
||||
- **Data quality**: Check that labels are correctly masked. Use `axolotl preprocess` and inspect tokenized samples to confirm only the target tokens are trainable.
|
||||
- **Weight decay too high**: Default 0.01 is usually fine. Values above 0.1 can suppress learning in LoRA.
|
||||
|
||||
### Loss Spikes
|
||||
|
||||
**Symptom**: Loss suddenly jumps by 2--10x then (possibly) recovers.
|
||||
|
||||
**Causes and fixes**:
|
||||
|
||||
- **Bad data samples**: A single malformed or extremely long example can cause a spike. Enable `sample_packing: false` temporarily and check if spikes correlate with specific batches.
|
||||
- **Learning rate too high**: Reduce by 2--5x, or increase warmup.
|
||||
- **Gradient accumulation mismatch**: Effective batch size = `micro_batch_size * gradient_accumulation_steps * num_gpus`. Very large effective batch sizes amplify gradient noise.
|
||||
- **Mixed precision issues**: With `bf16: true`, some operations can lose precision. If spikes are severe, try `fp32` for diagnosis.
|
||||
|
||||
### Overfitting
|
||||
|
||||
**Symptom**: Train loss keeps decreasing but eval loss starts increasing.
|
||||
|
||||
**Fixes**:
|
||||
|
||||
- Increase `val_set_size` (e.g., 0.05) and monitor `eval/loss`.
|
||||
- Reduce `num_epochs` or `max_steps`.
|
||||
- Increase `weight_decay` (try 0.01--0.1).
|
||||
- Use a smaller LoRA rank (`lora_r`). Typical values: 8--32.
|
||||
- Increase dropout: `lora_dropout: 0.05`.
|
||||
|
||||
## RL/GRPO Stability
|
||||
|
||||
### Reward Never Increases
|
||||
|
||||
If `rewards/*/mean` stays at 0 for more than 20 steps:
|
||||
|
||||
1. **Test reward function standalone**: Run it outside training with known inputs to verify it returns nonzero values.
|
||||
```bash
|
||||
cd experiments && python -c "import my_rewards; print(my_rewards.accuracy_reward(...))"
|
||||
```
|
||||
2. **Check dataset columns**: The reward function receives `**kwargs` containing dataset columns. Verify the columns it needs (e.g., `answer`) are not removed by the dataset transform.
|
||||
3. **Check completion content**: Enable `log_completions: true` in the `trl:` config and inspect logged completions in W&B. If completions are empty or incoherent, the model may be too weak for the task.
|
||||
4. **Verify vLLM is serving the right model**: Hit the vLLM health endpoint and confirm the model name matches your config.
|
||||
|
||||
### Entropy Collapse (Mode Collapse)
|
||||
|
||||
**Symptom**: `entropy` drops below 0.01; all completions become nearly identical.
|
||||
|
||||
**Fixes**:
|
||||
|
||||
- Increase `temperature` in generation kwargs (try 0.8--1.0).
|
||||
- Reduce learning rate.
|
||||
- Add a KL penalty term (`beta` parameter in GRPO config).
|
||||
- Check that `num_generations` is sufficient (16+ gives better advantage estimates).
|
||||
|
||||
### IS Ratio Divergence
|
||||
|
||||
**Symptom**: `sampling/importance_sampling_ratio/min` drops near 0, or `sampling/sampling_logp_difference/mean` exceeds 1.0.
|
||||
|
||||
This means the policy has diverged significantly from the weights used by vLLM for generation. The importance sampling correction becomes unreliable.
|
||||
|
||||
**Fixes**:
|
||||
|
||||
- Decrease `vllm_sync_interval` (sync weights more often).
|
||||
- Enable `off_policy_mask_threshold` (e.g., 0.5) to mask stale off-policy samples.
|
||||
- Use `importance_sampling_level: token` for finer-grained correction.
|
||||
|
||||
### Gradient Norm Instability
|
||||
|
||||
**Symptom**: `grad_norm` oscillates wildly or exceeds 10.0 regularly.
|
||||
|
||||
**Fixes**:
|
||||
|
||||
- Enable gradient clipping: `max_grad_norm: 1.0` (default in most configs).
|
||||
- Reduce learning rate.
|
||||
- Increase `gradient_accumulation_steps` to smooth out noisy batches.
|
||||
- Check for NaN issues (see next section).
|
||||
|
||||
## NaN and Inf Handling
|
||||
|
||||
### Common Causes
|
||||
|
||||
| Cause | Where It Manifests | Detection |
|
||||
|-------|-------------------|-----------|
|
||||
| FP8 zero-scale division | Forward pass logits | `grad_norm: nan`, loss becomes NaN immediately |
|
||||
| Gradient explosion | Backward pass | `grad_norm` spikes to inf, then loss goes NaN |
|
||||
| Bad data (empty sequences) | Logprob computation | NaN in specific batches only |
|
||||
| Numerical overflow in log-softmax | Loss computation | Large negative logprobs cause exp() overflow |
|
||||
|
||||
### FP8-Specific NaN Issues
|
||||
|
||||
FP8 quantization (`fp8: true`) can produce NaN when the activation quantization kernel divides by `max(abs(x)) / 448`. If the input tensor is all zeros (e.g., padding positions), the scale becomes 0, causing division by zero.
|
||||
|
||||
**Fixes applied in axolotl**:
|
||||
|
||||
- The `act_quant_kernel` has a zero-guard: `s = tl.where(s == 0, 1.0, s)`.
|
||||
- A safety net `nan_to_num(logits, nan=0.0)` is applied in `_get_per_token_logps_and_entropies`.
|
||||
- Embedding padding is zero-padded for FP8 compatibility.
|
||||
|
||||
::: {.callout-important}
|
||||
## After Modifying Triton Kernels
|
||||
If you patch any Triton JIT kernel (e.g., the FP8 quantization kernels in transformers), you must clear the Triton cache for changes to take effect:
|
||||
|
||||
```bash
|
||||
rm -rf ~/.triton/cache
|
||||
```
|
||||
:::
|
||||
|
||||
### General NaN Debugging Steps
|
||||
|
||||
1. **Enable anomaly detection** (slow, but pinpoints the source):
|
||||
```python
|
||||
torch.autograd.set_detect_anomaly(True)
|
||||
```
|
||||
2. **Check grad_norm**: If it goes to NaN, the backward pass is the problem. If loss is NaN but grad_norm was fine on the previous step, the forward pass is the problem.
|
||||
3. **Reduce to single GPU, single batch**: Eliminate distributed training variables.
|
||||
4. **Inspect data**: Print the batch that triggers NaN. Look for empty sequences, extreme token IDs, or unexpected padding patterns.
|
||||
|
||||
## OOM Debugging
|
||||
|
||||
Out-of-memory errors are the most common training failure. Use this systematic approach, from least to most disruptive:
|
||||
|
||||
### Step 1: Reduce Batch Size
|
||||
|
||||
The single highest-impact change. VRAM scales roughly linearly with batch size.
|
||||
|
||||
```yaml
|
||||
micro_batch_size: 1 # Start here
|
||||
gradient_accumulation_steps: 16 # Increase to maintain effective batch size
|
||||
```
|
||||
|
||||
For GRPO specifically, the logits tensor for policy logprob computation can be very large. `batch_size * num_generations * seq_len * vocab_size` in bf16. For example, with `num_generations: 16` and `micro_batch_size: 8`, the logits tensor alone is:
|
||||
|
||||
```
|
||||
8 * 16 * 2048 * 151936 * 2 bytes = ~75 GB (way too large)
|
||||
```
|
||||
|
||||
Reduce `micro_batch_size` to 2--4 for GRPO.
|
||||
|
||||
### Step 2: Enable Gradient Checkpointing
|
||||
|
||||
Trades compute for memory by recomputing activations during the backward pass instead of storing them.
|
||||
|
||||
```yaml
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false # Recommended default
|
||||
```
|
||||
|
||||
::: {.callout-warning}
|
||||
## Reentrant Checkpointing Exceptions
|
||||
Some configurations require `use_reentrant: true`:
|
||||
|
||||
- DeepSpeed ZeRO-3 (non-reentrant causes `CheckpointError`)
|
||||
- EBFT strided mode with flex_attention
|
||||
:::
|
||||
|
||||
### Step 3: Use Quantization
|
||||
|
||||
Load the base model in reduced precision:
|
||||
|
||||
```yaml
|
||||
# 4-bit QLoRA
|
||||
adapter: qlora
|
||||
load_in_4bit: true
|
||||
|
||||
# 8-bit
|
||||
load_in_8bit: true
|
||||
|
||||
# FP8 (saves ~50% model VRAM, same compute speed as bf16)
|
||||
fp8: true
|
||||
```
|
||||
|
||||
### Step 4: Reduce Sequence Length
|
||||
|
||||
```yaml
|
||||
sequence_len: 1024 # Down from 2048 or 4096
|
||||
```
|
||||
|
||||
For GRPO, also reduce `max_completion_length`. Memory scales quadratically with sequence length when using standard attention.
|
||||
|
||||
### Step 5: Use Flash Attention
|
||||
|
||||
Reduces attention memory from O(n^2) to O(n):
|
||||
|
||||
```yaml
|
||||
attn_implementation: flash_attention_2
|
||||
```
|
||||
|
||||
### Step 6: Offload with DeepSpeed
|
||||
|
||||
For extreme cases, offload optimizer states or parameters to CPU:
|
||||
|
||||
```yaml
|
||||
deepspeed: deepspeed_configs/zero3_bf16.json
|
||||
```
|
||||
|
||||
### Diagnosing the Specific Culprit
|
||||
|
||||
Use the `profiler_steps` config option to capture GPU memory snapshots:
|
||||
|
||||
```yaml
|
||||
profiler_steps: [1, 2]
|
||||
```
|
||||
|
||||
This generates PyTorch profiler traces you can inspect to see exactly which tensor allocation caused the OOM.
|
||||
|
||||
## Common Errors
|
||||
|
||||
| Error Message | Likely Cause | Fix |
|
||||
|---------------|-------------|-----|
|
||||
| `exitcode: -9` | System RAM exhaustion | Reduce dataset size, `dataset_num_proc`, or number of data workers |
|
||||
| `exitcode: -7` (DeepSpeed) | DeepSpeed version issue | `pip install -U deepspeed` |
|
||||
| `CUDA out of memory` | GPU VRAM exhaustion | Follow OOM debugging steps above |
|
||||
| `RuntimeError: NCCL communicator was aborted` | GPU communication failure | See [NCCL docs](nccl.qmd); check `NCCL_DEBUG=INFO` output |
|
||||
| `ValueError: Asking to pad but the tokenizer does not have a padding token` | Missing pad token | Add `special_tokens: { pad_token: "<\|endoftext\|>" }` to config |
|
||||
| `'DummyOptim' object has no attribute 'step'` | DeepSpeed on single GPU | Remove `deepspeed:` section from config |
|
||||
| `unable to load strategy X` then `None is not callable` | Reward module not importable | Run `cd experiments && python -c "import my_rewards"` to check |
|
||||
| `generation_batch_size not divisible by num_generations` | micro_batch_size too small | Set `micro_batch_size >= num_generations` and make it divisible |
|
||||
| `'weight' must be 2-D` | FSDP1 flattened parameters | Use `fsdp_version: 2` or skip `unwrap_model` when FSDP is enabled |
|
||||
| `CheckpointError` (tensor count mismatch) | Non-reentrant checkpointing + ZeRO-3 or flex_attention | Set `use_reentrant: true` in `gradient_checkpointing_kwargs` |
|
||||
| `BFloat16` TypeError during weight sync | NumPy does not support bf16 | Fixed in axolotl's `weight_serde.py` (auto bf16 to fp16 conversion) |
|
||||
| `Content end boundary is before start boundary` | Chat template parsing issue | Check `eos_token` matches template; file a GitHub issue if persistent |
|
||||
| `CAS service error` during data processing | HuggingFace XET issue | Set `export HF_HUB_DISABLE_XET=1` |
|
||||
| Training hangs (multi-GPU) | FSDP + async prefetch deadlock | Set `async_prefetch: false` with FSDP |
|
||||
|
||||
## Profiling
|
||||
|
||||
### PyTorch Profiler
|
||||
|
||||
Axolotl supports PyTorch profiler integration via the config:
|
||||
|
||||
```yaml
|
||||
profiler_steps: [1, 2, 3]
|
||||
```
|
||||
|
||||
This captures profiler traces for the specified steps. View them in TensorBoard:
|
||||
|
||||
```bash
|
||||
tensorboard --logdir output_dir/runs
|
||||
```
|
||||
|
||||
Or open the `.json` trace file in `chrome://tracing`.
|
||||
|
||||
### CUDA Memory Snapshots
|
||||
|
||||
For detailed memory analysis, use PyTorch's memory snapshot API. Add this to your training script or use it interactively:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
# Enable memory history tracking
|
||||
torch.cuda.memory._record_memory_history()
|
||||
|
||||
# ... run your training step ...
|
||||
|
||||
# Save snapshot
|
||||
torch.cuda.memory._dump_snapshot("memory_snapshot.pickle")
|
||||
```
|
||||
|
||||
Visualize with PyTorch's memory visualizer:
|
||||
|
||||
```bash
|
||||
python -m torch.cuda.memory._viz memory_snapshot.pickle
|
||||
```
|
||||
|
||||
### Quick GPU Memory Check
|
||||
|
||||
During training, monitor GPU utilization in a separate terminal:
|
||||
|
||||
```bash
|
||||
watch -n 1 nvidia-smi
|
||||
```
|
||||
|
||||
For programmatic access within axolotl, the logged metrics `memory/max_alloc` and `memory/max_reserved` come from `torch.cuda.max_memory_allocated()` and `torch.cuda.max_memory_reserved()`. Note these report PyTorch's view of memory, which may differ from `nvidia-smi` (see [FAQ](faq.qmd)).
|
||||
|
||||
## W&B and Logging
|
||||
|
||||
### Enabling Logging
|
||||
|
||||
```yaml
|
||||
wandb_project: my-project
|
||||
wandb_entity: my-team # optional
|
||||
wandb_run_id: run-123 # optional, for resuming
|
||||
wandb_name: experiment-name # optional
|
||||
logging_steps: 1 # log every step (recommended for RL)
|
||||
```
|
||||
|
||||
### Debug Logging
|
||||
|
||||
For detailed axolotl-internal debug output:
|
||||
|
||||
```bash
|
||||
AXOLOTL_LOG_LEVEL=DEBUG axolotl train config.yaml 2>&1 | tee /tmp/training.log
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
## Always Log to a File
|
||||
Pipe training output to a log file so you can inspect it after the run:
|
||||
|
||||
```bash
|
||||
axolotl train config.yaml 2>&1 | tee /tmp/my_run.log
|
||||
```
|
||||
:::
|
||||
|
||||
### What Axolotl Logs
|
||||
|
||||
**SFT metrics** (logged every `logging_steps`):
|
||||
|
||||
- `train/loss`, `eval/loss` -- training and validation loss
|
||||
- `train/grad_norm` -- gradient L2 norm (before clipping)
|
||||
- `train/learning_rate` -- current learning rate
|
||||
- `memory/max_alloc`, `memory/max_reserved` -- peak GPU memory
|
||||
|
||||
**GRPO/RL metrics** (logged every step):
|
||||
|
||||
- `rewards/<name>/mean`, `rewards/<name>/std` -- per-reward-function statistics
|
||||
- `reward`, `reward_std` -- aggregated reward across all reward functions
|
||||
- `frac_reward_zero_std` -- fraction of prompt groups where all completions got the same reward
|
||||
- `completions/mean_length`, `completions/min_length`, `completions/max_length` -- completion token lengths
|
||||
- `completions/clipped_ratio` -- fraction of completions that hit the max length
|
||||
- `completions/mean_terminated_length`, `completions/min_terminated_length`, `completions/max_terminated_length` -- lengths of naturally terminated completions
|
||||
- `kl` -- KL divergence between policy and reference
|
||||
- `entropy` -- policy entropy (measure of output diversity)
|
||||
- `clip_ratio/region_mean`, `clip_ratio/low_mean`, `clip_ratio/high_mean` -- PPO clipping statistics
|
||||
- `sampling/sampling_logp_difference/mean`, `sampling/sampling_logp_difference/max` -- log-probability difference between policy and sampling distribution
|
||||
- `sampling/importance_sampling_ratio/min`, `sampling/importance_sampling_ratio/mean`, `sampling/importance_sampling_ratio/max` -- IS ratio statistics for off-policy correction
|
||||
- `num_tokens` -- total tokens processed
|
||||
|
||||
### Reading W&B Charts
|
||||
|
||||
For a healthy GRPO run, expect to see:
|
||||
|
||||
1. **`reward/mean`**: Gradual upward trend. May start near 0 and reach 0.3--0.8 depending on task difficulty. Not monotonic -- fluctuations are normal.
|
||||
2. **`entropy`**: Gradual decrease from initial values (often 0.3--0.6) as the model becomes more confident. Should not collapse to near-zero.
|
||||
3. **`grad_norm`**: Mostly in the 0.001--1.0 range. Occasional 0.0 values are fine (zero-advantage skip). Persistent values above 10.0 need investigation.
|
||||
4. **`kl`**: Starts near 0 and grows slowly. If it shoots up rapidly, the policy is diverging from the reference.
|
||||
5. **`completions/mean_length`**: Should reflect the task's natural answer length. If it steadily increases to `max_completion_length`, the model may be reward-hacking by generating longer outputs.
|
||||
53
docs/unsloth.qmd
Normal file
53
docs/unsloth.qmd
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
title: "Unsloth"
|
||||
description: "Hyper-optimized QLoRA finetuning for single GPUs"
|
||||
---
|
||||
|
||||
### Overview
|
||||
|
||||
Unsloth provides hand-written optimized kernels for LLM finetuning that slightly improve speed and VRAM over
|
||||
standard industry baselines.
|
||||
|
||||
::: {.callout-important}
|
||||
Due to breaking changes in transformers `v4.48.0`, users will need to downgrade to `<=v4.47.1` to use this patch.
|
||||
|
||||
This will later be deprecated in favor of [LoRA Optimizations](lora_optims.qmd).
|
||||
:::
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
The following will install the correct unsloth and extras from source.
|
||||
|
||||
```bash
|
||||
python scripts/unsloth_install.py | sh
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Axolotl exposes a few configuration options to try out unsloth and get most of the performance gains.
|
||||
|
||||
Our unsloth integration is currently limited to the following model architectures:
|
||||
- llama
|
||||
|
||||
These options are specific to LoRA finetuning and cannot be used for multi-GPU finetuning
|
||||
```yaml
|
||||
unsloth_lora_mlp: true
|
||||
unsloth_lora_qkv: true
|
||||
unsloth_lora_o: true
|
||||
```
|
||||
|
||||
These options are composable and can be used with multi-gpu finetuning
|
||||
```yaml
|
||||
unsloth_cross_entropy_loss: true
|
||||
unsloth_rms_norm: true
|
||||
unsloth_rope: true
|
||||
```
|
||||
|
||||
### Limitations
|
||||
|
||||
- Single GPU only; e.g. no multi-gpu support
|
||||
- No deepspeed or FSDP support (requires multi-gpu)
|
||||
- LoRA + QLoRA support only. No full fine tunes or fp8 support.
|
||||
- Limited model architecture support. Llama, Phi, Gemma, Mistral only
|
||||
- No MoE support.
|
||||
@@ -1,318 +0,0 @@
|
||||
---
|
||||
title: "vLLM Serving for GRPO Training"
|
||||
description: "How to configure and run vLLM as a generation backend for GRPO reinforcement learning in Axolotl."
|
||||
format:
|
||||
html:
|
||||
toc: true
|
||||
toc-depth: 3
|
||||
number-sections: true
|
||||
execute:
|
||||
enabled: false
|
||||
---
|
||||
|
||||
## Overview {#sec-overview}
|
||||
|
||||
GRPO (Group Relative Policy Optimization) trains a language model by generating completions, scoring them with reward functions, and updating the policy to favor higher-reward outputs. The generation step is the bottleneck: producing thousands of tokens per training step with the policy model is slow using standard HuggingFace generation.
|
||||
|
||||
Axolotl uses [vLLM](https://github.com/vllm-project/vllm) as a high-throughput generation backend. vLLM runs as a separate process (either on a dedicated GPU or colocated on the training GPU) and serves completions via an HTTP API. The trainer sends prompts to vLLM, receives completions, scores them, and performs gradient updates.
|
||||
|
||||
```
|
||||
┌──────────────────────┐ HTTP ┌──────────────────────┐
|
||||
│ Trainer (GPU 1) │ ───────────────── │ vLLM Server (GPU 0)│
|
||||
│ │ prompts/compls │ │
|
||||
│ - Policy model │ ◄──────────────── │ - Same base model │
|
||||
│ - Reward scoring │ │ - Fast generation │
|
||||
│ - Gradient updates │ weight sync │ - LoRA adapter │
|
||||
│ - LoRA adapter │ ─────────────────►│ (periodically │
|
||||
│ │ (every N steps) │ updated) │
|
||||
└──────────────────────┘ └──────────────────────┘
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
vLLM must serve the **same base model** specified in your training config. If the models do not match, weight synchronization will silently produce incorrect results.
|
||||
:::
|
||||
|
||||
## Server Mode {#sec-server-mode}
|
||||
|
||||
Server mode runs vLLM as an external process on dedicated GPU(s). This is the recommended configuration for most setups.
|
||||
|
||||
### Starting the Server
|
||||
|
||||
Use the `axolotl vllm-serve` command with your training config:
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start vLLM on GPU 0
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve grpo_config.yaml
|
||||
```
|
||||
|
||||
```bash
|
||||
# Terminal 2: Start training on GPU 1
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train grpo_config.yaml
|
||||
```
|
||||
|
||||
The server reads vLLM settings from the `vllm:` section of your config and starts an HTTP server (default: `http://0.0.0.0:8000`).
|
||||
|
||||
::: {.callout-tip}
|
||||
Use `tmux` or `screen` to manage the vLLM server process. Typical startup time is 30-90 seconds depending on model size and whether CUDA graphs are captured.
|
||||
:::
|
||||
|
||||
### Minimal Server Config
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen2.5-1.5B-Instruct
|
||||
|
||||
vllm:
|
||||
host: 0.0.0.0
|
||||
port: 8000
|
||||
gpu_memory_utilization: 0.85
|
||||
dtype: auto
|
||||
max_model_len: 4096
|
||||
|
||||
rl: grpo
|
||||
trl:
|
||||
use_vllm: true
|
||||
vllm_server_host: 0.0.0.0
|
||||
vllm_server_port: 8000
|
||||
vllm_server_timeout: 300
|
||||
```
|
||||
|
||||
### Multi-GPU vLLM
|
||||
|
||||
For larger models, use tensor parallelism across multiple GPUs:
|
||||
|
||||
```yaml
|
||||
vllm:
|
||||
tensor_parallel_size: 2
|
||||
gpu_memory_utilization: 0.85
|
||||
```
|
||||
|
||||
```bash
|
||||
# vLLM on GPUs 2,3; training on GPUs 0,1
|
||||
CUDA_VISIBLE_DEVICES=2,3 axolotl vllm-serve grpo_config.yaml
|
||||
CUDA_VISIBLE_DEVICES=0,1 axolotl train grpo_config.yaml --num-processes 2
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
Due to how TRL maps vLLM device indices, the vLLM instance should use the **last** N GPUs (highest device indices), while training uses the first N.
|
||||
:::
|
||||
|
||||
## Colocate Mode {#sec-colocate-mode}
|
||||
|
||||
Colocate mode runs vLLM on the same GPU as the trainer. This is useful when you only have a single GPU.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
use_vllm: true
|
||||
vllm_mode: colocate
|
||||
vllm_enable_sleep_mode: true
|
||||
```
|
||||
|
||||
With `vllm_enable_sleep_mode: true`, vLLM offloads its VRAM allocation when not actively generating, freeing memory for training. When the trainer needs new completions, vLLM wakes up and reclaims VRAM.
|
||||
|
||||
::: {.callout-warning}
|
||||
Colocate mode is significantly slower than server mode because generation and training cannot overlap. The GPU alternates between the two workloads. This mode is practical only for smaller models (up to ~3B on a 24 GB GPU).
|
||||
:::
|
||||
|
||||
**When to use colocate mode:**
|
||||
|
||||
- You have exactly one GPU
|
||||
- The model fits in memory with both vLLM and training active (with sleep mode), or is small enough to time-share
|
||||
- You accept the performance tradeoff for simpler setup (no separate vLLM process to manage)
|
||||
|
||||
**When to use server mode:**
|
||||
|
||||
- You have two or more GPUs
|
||||
- You want maximum throughput (generation overlaps with training via async prefetch)
|
||||
- You are running larger models (7B+)
|
||||
|
||||
## LoRA Sync {#sec-lora-sync}
|
||||
|
||||
LoRA sync is the recommended weight synchronization method when training with LoRA adapters. Instead of merging adapter weights into the base model and broadcasting the full merged weights over NCCL, it saves only the LoRA adapter files to the filesystem and tells vLLM to load them natively.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. The trainer calls `model.save_pretrained()` to write the LoRA adapter weights to a temporary directory
|
||||
2. The trainer sends an HTTP POST to `/set_lora_adapter/` on the vLLM server
|
||||
3. vLLM loads the adapter using its native LoRA support (Punica kernels)
|
||||
4. Generation uses the updated adapter on the next request
|
||||
|
||||
### Benefits
|
||||
|
||||
- **Smaller sync payload**: Transfers ~40 MB of LoRA weights instead of ~1.4 GB+ of merged model weights (for a typical 0.5-3B model)
|
||||
- **No NCCL communicator**: Eliminates the need for a cross-GPU NCCL communication channel, removing GPU contention between vLLM generation and weight sync
|
||||
- **Faster sync**: ~200 ms per sync vs. 350 ms to 5+ seconds for NCCL merge sync
|
||||
- **Simpler multi-GPU**: No need to set up NCCL groups between trainer and vLLM processes
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
adapter: lora
|
||||
lora_r: 32
|
||||
lora_alpha: 64
|
||||
lora_target_linear: true
|
||||
|
||||
trl:
|
||||
vllm_lora_sync: true # Enables LoRA sync mode
|
||||
vllm_sync_interval: 5 # Sync every 5 training steps
|
||||
```
|
||||
|
||||
Setting `vllm_lora_sync: true` automatically selects the LoRA-aware vLLM serve script (`axolotl.scripts.vllm_serve_lora`). You do not need to set `vllm.serve_module` manually.
|
||||
|
||||
::: {.callout-important}
|
||||
LoRA sync requires that you are training with a LoRA adapter (`adapter: lora` or `adapter: qlora`). It is not applicable to full fine-tuning.
|
||||
:::
|
||||
|
||||
## Weight Synchronization {#sec-weight-sync}
|
||||
|
||||
During GRPO training, the policy model on the trainer is continuously updated via gradient steps. The vLLM server, however, still holds the old weights. Periodically, the trainer must push updated weights to vLLM so that future generations reflect the improved policy.
|
||||
|
||||
### Sync Interval
|
||||
|
||||
The `vllm_sync_interval` parameter controls how often weights are synced:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
vllm_sync_interval: 5 # Sync every 5 optimizer steps
|
||||
```
|
||||
|
||||
**Tradeoffs:**
|
||||
|
||||
- **Lower interval** (e.g., 1-3): Fresher generations, better on-policy data, but more sync overhead per step
|
||||
- **Higher interval** (e.g., 5-10): Less overhead, but generations become increasingly off-policy between syncs
|
||||
- **Recommended**: 3-5 for most setups. Axolotl includes importance sampling correction (`vllm_importance_sampling_correction: true`) to handle mild distribution mismatch from stale vLLM weights.
|
||||
|
||||
### Sync Methods
|
||||
|
||||
| Method | Config | Payload | Mechanism | Typical Time |
|
||||
|--------|--------|---------|-----------|-------------|
|
||||
| **LoRA sync** | `vllm_lora_sync: true` | LoRA adapter only (~40 MB) | Filesystem + HTTP | ~200 ms |
|
||||
| **NCCL merge sync** | Default (no lora_sync) | Full merged weights (~1.4 GB+) | HTTP trigger + NCCL broadcast | 350 ms - 5 s |
|
||||
|
||||
::: {.callout-tip}
|
||||
If you are training with LoRA (which is recommended for GRPO), always enable `vllm_lora_sync: true`. The performance difference is substantial, especially as training progresses and NCCL contention increases.
|
||||
:::
|
||||
|
||||
### Importance Sampling Correction
|
||||
|
||||
When vLLM weights are stale (between syncs), the generated data is slightly off-policy. Axolotl can correct for this:
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
vllm_importance_sampling_correction: true
|
||||
importance_sampling_level: token # 'token' or 'sequence'
|
||||
off_policy_mask_threshold: 0.5 # KL threshold for masking stale sequences
|
||||
```
|
||||
|
||||
- **Token-level IS** is recommended when using Liger kernel (sequence-level has numerical issues with chunked computation)
|
||||
- **Off-policy sequence masking (OPSM)** drops sequences that have diverged too far from the current policy, providing a safety net against stale data
|
||||
|
||||
## Restart Requirements {#sec-restart}
|
||||
|
||||
::: {.callout-warning}
|
||||
**vLLM must be restarted between training runs.** Weight syncs from a previous run leave the server in a corrupted state. If you start a new training run against a stale vLLM server, the model may fail to learn.
|
||||
:::
|
||||
|
||||
### When to Restart
|
||||
|
||||
- Before every new training experiment
|
||||
- After a training run crashes or is interrupted
|
||||
- If you change the base model in your config
|
||||
|
||||
### How to Restart
|
||||
|
||||
Killing vLLM reliably requires terminating both the main process and its background EngineCore subprocess:
|
||||
|
||||
```bash
|
||||
# Kill all vLLM-related processes
|
||||
pkill -9 -f "vllm|EngineCore"
|
||||
|
||||
# Verify GPU memory is freed
|
||||
nvidia-smi
|
||||
|
||||
# Restart the server
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve grpo_config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
A single `kill` often does not fully stop vLLM. Always use `kill -9` and verify with `nvidia-smi` that GPU memory has been released before restarting.
|
||||
:::
|
||||
|
||||
### Health Check
|
||||
|
||||
The vLLM server exposes a health endpoint. Wait for it to return 200 before starting training:
|
||||
|
||||
```bash
|
||||
# For the LoRA serve script (trailing slash required)
|
||||
curl http://localhost:8000/health/
|
||||
|
||||
# For the default TRL serve script
|
||||
curl http://localhost:8000/health
|
||||
```
|
||||
|
||||
## Configuration Reference {#sec-config-reference}
|
||||
|
||||
### vLLM Server Options (`vllm:` section)
|
||||
|
||||
These control the vLLM server process started by `axolotl vllm-serve`.
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `host` | str | `0.0.0.0` | Host address for the vLLM server |
|
||||
| `port` | int | `8000` | Port for the vLLM server |
|
||||
| `device` | str | `auto` | Device to use for vLLM |
|
||||
| `tensor_parallel_size` | int | `None` | Number of GPUs for tensor parallelism |
|
||||
| `data_parallel_size` | int | `None` | Number of data parallel replicas |
|
||||
| `gpu_memory_utilization` | float | `0.9` | Fraction of GPU memory for vLLM (0.0-1.0) |
|
||||
| `dtype` | str | `auto` | Data type (`auto`, `float16`, `bfloat16`) |
|
||||
| `max_model_len` | int | `None` | Maximum model context length. Set explicitly if the default is too large for your GPU |
|
||||
| `enable_prefix_caching` | bool | `None` | Enable prefix caching for repeated prompt prefixes |
|
||||
| `enable_reasoning` | bool | `None` | Enable reasoning mode for models with thinking tokens |
|
||||
| `reasoning_parser` | str | `None` | Parser for reasoning output |
|
||||
| `enforce_eager` | bool | `None` | Disable CUDA graph capture (required for some architectures like Qwen3.5 hybrid attention) |
|
||||
| `serve_module` | str | `None` | Python module for vLLM serve script. Auto-set when `vllm_lora_sync: true` |
|
||||
| `worker_extension_cls` | str | `None` | vLLM worker extension class for weight sync |
|
||||
|
||||
### Trainer vLLM Options (`trl:` section)
|
||||
|
||||
These control how the trainer interacts with vLLM.
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `use_vllm` | bool | `false` | Enable vLLM for generation |
|
||||
| `vllm_mode` | str | `None` | `server` (external process) or `colocate` (same GPU) |
|
||||
| `vllm_server_host` | str | `0.0.0.0` | Host of the vLLM server to connect to |
|
||||
| `vllm_server_port` | int | `8000` | Port of the vLLM server to connect to |
|
||||
| `vllm_server_timeout` | int | `None` | Timeout in seconds for vLLM requests |
|
||||
| `vllm_lora_sync` | bool | `false` | Sync LoRA adapters via filesystem instead of NCCL merge |
|
||||
| `vllm_sync_interval` | int | `None` | Sync weights every N optimizer steps |
|
||||
| `vllm_enable_sleep_mode` | bool | `None` | Offload vLLM VRAM when idle (colocate mode) |
|
||||
| `vllm_guided_decoding_regex` | str | `None` | Regex constraint for guided decoding |
|
||||
|
||||
For async pipeline and off-policy correction options, see the [GRPO Configuration Reference](grpo.qmd#configuration-reference).
|
||||
|
||||
## Complete Example {#sec-complete-example}
|
||||
|
||||
For a full working GRPO config including vLLM, LoRA sync, async generation, rewards, and dataset setup, see the [GRPO Quick Start](grpo.qmd#quick-start). That config includes all the vLLM settings covered in this guide.
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start vLLM
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve grpo_config.yaml
|
||||
|
||||
# Wait for health check to pass
|
||||
curl http://localhost:8000/health/
|
||||
|
||||
# Terminal 2: Start training
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train grpo_config.yaml
|
||||
```
|
||||
|
||||
## Troubleshooting {#sec-troubleshooting}
|
||||
|
||||
| Problem | Likely Cause | Solution |
|
||||
|---------|-------------|----------|
|
||||
| Training hangs waiting for vLLM | Server not started or wrong port | Check `curl http://localhost:8000/health/` and verify `vllm_server_host`/`vllm_server_port` match |
|
||||
| OOM on vLLM GPU | `gpu_memory_utilization` too high or `max_model_len` too large | Reduce `gpu_memory_utilization` to 0.7 or set `max_model_len` explicitly |
|
||||
| OOM on training GPU | Batch too large for policy logprobs | Reduce `micro_batch_size` or `num_generations` |
|
||||
| Accuracy stays at zero | Stale vLLM from previous run | Restart vLLM: `pkill -9 -f "vllm\|EngineCore"`, verify with `nvidia-smi`, restart |
|
||||
| `ResponseValidationError` from vLLM | Missing logprobs in response | Ensure you are using the correct serve module (auto-selected with `vllm_lora_sync: true`) |
|
||||
| Weight sync takes 5+ seconds | NCCL contention with vLLM generation | Switch to `vllm_lora_sync: true` to eliminate NCCL |
|
||||
| `async_prefetch` deadlocks with FSDP | Background threads run unsynchronized FSDP collectives | Set `async_prefetch: false` when using FSDP or DeepSpeed multi-GPU |
|
||||
@@ -15,7 +15,8 @@ Thanks to the team at LiquidAI for giving us early access to prepare for these r
|
||||
Here is an example of how to install from pip:
|
||||
```bash
|
||||
# Ensure you have a compatible version of Pytorch installed
|
||||
uv pip install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
pip3 install packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
```
|
||||
|
||||
2. Run one of the finetuning examples below.
|
||||
@@ -34,7 +35,7 @@ Thanks to the team at LiquidAI for giving us early access to prepare for these r
|
||||
|
||||
**LFM2-MoE**
|
||||
```bash
|
||||
uv pip install git+https://github.com/huggingface/transformers.git@0c9a72e4576fe4c84077f066e585129c97bfd4e6
|
||||
pip install git+https://github.com/huggingface/transformers.git@0c9a72e4576fe4c84077f066e585129c97bfd4e6
|
||||
|
||||
# LoRA SFT (1x48GB @ 16.2GiB)
|
||||
axolotl train examples/LiquidAI/lfm2-8b-a1b-lora.yaml
|
||||
@@ -44,7 +45,7 @@ Thanks to the team at LiquidAI for giving us early access to prepare for these r
|
||||
|
||||
- **Installation Error**: If you encounter `ImportError: ... undefined symbol ...` or `ModuleNotFoundError: No module named 'causal_conv1d_cuda'`, the `causal-conv1d` package may have been installed incorrectly. Try uninstalling it:
|
||||
```bash
|
||||
uv pip uninstall causal-conv1d
|
||||
pip uninstall -y causal-conv1d
|
||||
```
|
||||
|
||||
- **Dataset Loading**: Read more on how to load your own dataset in our [documentation](https://docs.axolotl.ai/docs/dataset_loading.html).
|
||||
|
||||
@@ -39,7 +39,7 @@ tf32: true
|
||||
gradient_checkpointing: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 2
|
||||
|
||||
@@ -48,7 +48,7 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 2
|
||||
|
||||
@@ -50,7 +50,8 @@ tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
eager_attention:
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
|
||||
@@ -39,7 +39,7 @@ activation_offloading: legacy
|
||||
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_steps: 100
|
||||
saves_per_epoch: 1
|
||||
|
||||
@@ -39,7 +39,7 @@ activation_offloading: legacy
|
||||
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_steps: 100
|
||||
saves_per_epoch: 1
|
||||
|
||||
@@ -15,7 +15,8 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
uv pip install --no-build-isolation -e '.[flash-attn]'
|
||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
@@ -30,7 +31,7 @@ python scripts/cutcrossentropy_install.py | sh
|
||||
# For those using our Docker image, use the below path.
|
||||
export CUDA_HOME=/usr/local/cuda
|
||||
|
||||
uv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
pip3 install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
```
|
||||
|
||||
For any installation errors, see [XIELU Installation Issues](#xielu-installation-issues)
|
||||
@@ -66,7 +67,7 @@ If those didn't help, please try the below solutions:
|
||||
1. Pass env for CMAKE and try install again:
|
||||
|
||||
```bash
|
||||
Python_EXECUTABLE=$(which python) uv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
Python_EXECUTABLE=$(which python) pip3 install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
```
|
||||
|
||||
2. Git clone the repo and manually hardcode python path:
|
||||
@@ -91,7 +92,7 @@ If those didn't help, please try the below solutions:
|
||||
```
|
||||
|
||||
```bash
|
||||
uv pip install . --no-build-isolation --no-deps
|
||||
pip3 install . --no-build-isolation --no-deps
|
||||
```
|
||||
|
||||
## Optimization Guides
|
||||
|
||||
@@ -55,7 +55,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
|
||||
@@ -17,7 +17,8 @@ Thanks to the team at Arcee.ai for using Axolotl in supervised fine-tuning the A
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
uv pip install --no-build-isolation -e '.[flash-attn]'
|
||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
@@ -55,7 +55,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
|
||||
@@ -59,7 +59,8 @@ gradient_checkpointing: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
sdp_attention:
|
||||
flash_optimum:
|
||||
|
||||
gptq_groupsize:
|
||||
|
||||
@@ -39,7 +39,8 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -45,7 +45,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -46,7 +46,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -45,7 +45,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -46,7 +46,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -45,7 +45,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -46,7 +46,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -52,7 +52,7 @@ gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch:
|
||||
|
||||
@@ -55,7 +55,7 @@ gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch:
|
||||
|
||||
@@ -39,7 +39,7 @@ gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch:
|
||||
|
||||
@@ -45,7 +45,7 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
|
||||
@@ -43,7 +43,8 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -73,7 +73,8 @@ early_stopping_patience: 3
|
||||
resume_from_checkpoint:
|
||||
auto_resume_from_checkpoints: true
|
||||
logging_steps: 1
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -40,7 +40,8 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -47,7 +47,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -36,7 +36,8 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -37,7 +37,8 @@ bf16: auto
|
||||
tf32: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 5
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -39,6 +39,7 @@ bf16: auto
|
||||
tf32: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 5
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -39,7 +39,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -47,7 +47,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -40,7 +40,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -47,6 +47,7 @@ tf32: false
|
||||
gradient_checkpointing: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention:
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -47,6 +47,7 @@ tf32: false
|
||||
gradient_checkpointing: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention:
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -43,7 +43,7 @@ gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -46,7 +46,7 @@ gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -40,6 +40,7 @@ bf16: auto
|
||||
tf32: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 5
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -38,6 +38,7 @@ tf32: true
|
||||
gradient_checkpointing:
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -44,7 +44,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
flash_attn_cross_entropy: false
|
||||
flash_attn_rms_norm: true
|
||||
flash_attn_fuse_mlp: true
|
||||
|
||||
@@ -47,7 +47,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
flash_attn_cross_entropy: false
|
||||
flash_attn_rms_norm: true
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -47,6 +47,7 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: false
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 0
|
||||
|
||||
@@ -45,7 +45,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -36,7 +36,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch:
|
||||
|
||||
@@ -47,7 +47,7 @@ tf32: false
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
|
||||
@@ -71,7 +71,8 @@ early_stopping_patience: 3
|
||||
resume_from_checkpoint:
|
||||
auto_resume_from_checkpoints: true
|
||||
logging_steps: 1
|
||||
attn_implementation: xformers
|
||||
xformers_attention: true
|
||||
flash_attention:
|
||||
gptq_groupsize:
|
||||
gptq_model_v1:
|
||||
warmup_ratio: 0.1
|
||||
|
||||
@@ -10,7 +10,7 @@ load_in_4bit: true
|
||||
sequence_len: 1024
|
||||
bf16: auto
|
||||
tf32: false
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
special_tokens:
|
||||
bos_token: "<|startoftext|>"
|
||||
eos_token: "<|endoftext|>"
|
||||
|
||||
@@ -48,7 +48,7 @@ tf32: true
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
attn_implementation: flash_attention_2
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch:
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"%%capture\n",
|
||||
"# This step can take ~5-10 minutes to install dependencies\n",
|
||||
"!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1\n",
|
||||
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fec1a88\""
|
||||
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fa9a7fe\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user