Compare commits
66 Commits
devstral-s
...
chore/docs
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
159f0531f9 | ||
|
|
0494359c6c | ||
|
|
26c39e1ca7 | ||
|
|
45adf1bfb9 | ||
|
|
eb3a57eb17 | ||
|
|
34da391391 | ||
|
|
0bb9077553 | ||
|
|
a85efffbef | ||
|
|
06a648263b | ||
|
|
9d5bfc127e | ||
|
|
da8f6c32b9 | ||
|
|
88c0e8d048 | ||
|
|
d8e8cd8558 | ||
|
|
ccc94da8ad | ||
|
|
ba62aa65ee | ||
|
|
21388cf615 | ||
|
|
80d5b066ec | ||
|
|
a3c82e8cbb | ||
|
|
b2274d430b | ||
|
|
eac4a61f55 | ||
|
|
ace9287c96 | ||
|
|
f5fbc82f2b | ||
|
|
706c677cad | ||
|
|
468580d18e | ||
|
|
3634d8ff9d | ||
|
|
bcc108efc1 | ||
|
|
581dd324cc | ||
|
|
00cda8cc70 | ||
|
|
52a0452acb | ||
|
|
83632f71d8 | ||
|
|
92afa4fa27 | ||
|
|
dd660c2ed0 | ||
|
|
09c685fd2c | ||
|
|
7909bfb076 | ||
|
|
cb03c765a1 | ||
|
|
4440b4a1ce | ||
|
|
e8e45b3441 | ||
|
|
c67910fa6f | ||
|
|
787880215b | ||
|
|
4b1a29c694 | ||
|
|
d7fa60662e | ||
|
|
1d91d905c9 | ||
|
|
2bf61d8e25 | ||
|
|
68788e419e | ||
|
|
94219f6ee8 | ||
|
|
ecc719f5c7 | ||
|
|
d5d0dc5938 | ||
|
|
5e86c35322 | ||
|
|
6778856804 | ||
|
|
ec4ebfd997 | ||
|
|
bde8b5b6bd | ||
|
|
2962a398b7 | ||
|
|
65c5481120 | ||
|
|
5fca214108 | ||
|
|
20fda75917 | ||
|
|
6b6370f4e3 | ||
|
|
add2025253 | ||
|
|
a703560a10 | ||
|
|
4a80d309e8 | ||
|
|
e33f225434 | ||
|
|
3e6948be97 | ||
|
|
4a8af60d34 | ||
|
|
a0941a9271 | ||
|
|
5eb01f3df1 | ||
|
|
d27c35ac44 | ||
|
|
a535b68043 |
82
.github/workflows/base.yml
vendored
82
.github/workflows/base.yml
vendored
@@ -16,8 +16,9 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
build-base:
|
build-base:
|
||||||
if: github.repository_owner == 'axolotl-ai-cloud'
|
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||||
|
timeout-minutes: 480
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: ubuntu-latest-m
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -28,42 +29,50 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-base"
|
||||||
- cuda: "124"
|
- cuda: "124"
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.6.0
|
pytorch: 2.6.0
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-base"
|
||||||
- cuda: "126"
|
- cuda: "126"
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.6.0
|
pytorch: 2.6.0
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-base"
|
||||||
- cuda: "126"
|
- cuda: "126"
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-base"
|
||||||
- cuda: "128"
|
- cuda: "128"
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-base"
|
||||||
- cuda: "128"
|
- cuda: "128"
|
||||||
cuda_version: 12.8.1
|
cuda_version: 12.8.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: nightly
|
pytorch: nightly
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
- cuda: "128"
|
dockerfile: "Dockerfile-base-nightly"
|
||||||
cuda_version: 12.8.1
|
# # "next" is for release candidates of pytorch
|
||||||
cudnn_version: ""
|
# - cuda: "128"
|
||||||
python_version: "3.11"
|
# cuda_version: 12.8.1
|
||||||
pytorch: next
|
# cudnn_version: ""
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
# python_version: "3.11"
|
||||||
|
# pytorch: next
|
||||||
|
# torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
# dockerfile: "Dockerfile-base-next"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -85,7 +94,60 @@ jobs:
|
|||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.pytorch == 'nightly' && './docker/Dockerfile-base-nightly' || matrix.pytorch == 'next' && './docker/Dockerfile-base-next' || './docker/Dockerfile-base' }}
|
file: ./docker/${{ matrix.dockerfile }}
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
build-args: |
|
||||||
|
CUDA_VERSION=${{ matrix.cuda_version }}
|
||||||
|
CUDNN_VERSION=${{ matrix.cudnn_version }}
|
||||||
|
CUDA=${{ matrix.cuda }}
|
||||||
|
PYTHON_VERSION=${{ matrix.python_version }}
|
||||||
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
|
TORCH_CUDA_ARCH_LIST=${{ matrix.torch_cuda_arch_list }}
|
||||||
|
build-base-uv:
|
||||||
|
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||||
|
timeout-minutes: 480
|
||||||
|
runs-on: ubuntu-latest-m
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- cuda: "126"
|
||||||
|
cuda_version: 12.6.3
|
||||||
|
cudnn_version: ""
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-uv-base"
|
||||||
|
- cuda: "128"
|
||||||
|
cuda_version: 12.8.1
|
||||||
|
cudnn_version: ""
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.7.1
|
||||||
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
dockerfile: "Dockerfile-uv-base"
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Docker metadata
|
||||||
|
id: metadata
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
axolotlai/axolotl-base-uv
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Build
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ./docker/${{ matrix.dockerfile }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
|||||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install jupyter quartodoc
|
python3 -m pip install jupyter quartodoc
|
||||||
python3 -m pip install -e . --no-deps
|
python3 -m pip install -e .
|
||||||
- name: Build autodoc
|
- name: Build autodoc
|
||||||
run: quartodoc build
|
run: quartodoc build
|
||||||
- name: Publish to GitHub Pages (and render)
|
- name: Publish to GitHub Pages (and render)
|
||||||
|
|||||||
1
.github/workflows/lint.yml
vendored
1
.github/workflows/lint.yml
vendored
@@ -9,6 +9,7 @@ on:
|
|||||||
- '.github/workflows/*.yml'
|
- '.github/workflows/*.yml'
|
||||||
- "*.[q]md"
|
- "*.[q]md"
|
||||||
- "examples/**/*.y[a]?ml"
|
- "examples/**/*.y[a]?ml"
|
||||||
|
- ".pre-commit-config.yaml"
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|||||||
8
.github/workflows/main.yml
vendored
8
.github/workflows/main.yml
vendored
@@ -29,12 +29,12 @@ jobs:
|
|||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: 128
|
- cuda: 128
|
||||||
cuda_version: 12.8.1
|
cuda_version: 12.8.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
@@ -97,12 +97,12 @@ jobs:
|
|||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: 128
|
- cuda: 128
|
||||||
cuda_version: 12.8.1
|
cuda_version: 12.8.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
4
.github/workflows/multi-gpu-e2e.yml
vendored
4
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
nightly_build: "true"
|
||||||
@@ -59,7 +59,7 @@ jobs:
|
|||||||
- name: Install Modal
|
- name: Install Modal
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install modal==0.71.8 jinja2
|
pip install modal==1.0.2 jinja2
|
||||||
- name: Update env vars
|
- name: Update env vars
|
||||||
run: |
|
run: |
|
||||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||||
|
|||||||
9
.github/workflows/precommit-autoupdate.yml
vendored
9
.github/workflows/precommit-autoupdate.yml
vendored
@@ -25,7 +25,6 @@ jobs:
|
|||||||
pre-commit autoupdate
|
pre-commit autoupdate
|
||||||
if [[ -n $(git status --porcelain) ]]; then
|
if [[ -n $(git status --porcelain) ]]; then
|
||||||
echo "changes=true" >> $GITHUB_OUTPUT
|
echo "changes=true" >> $GITHUB_OUTPUT
|
||||||
git diff .pre-commit-config.yaml > pre-commit-update.diff
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
@@ -39,11 +38,3 @@ jobs:
|
|||||||
commit-message: "chore: update pre-commit hooks"
|
commit-message: "chore: update pre-commit hooks"
|
||||||
body: |
|
body: |
|
||||||
Automated PR to update pre-commit hooks to their latest versions.
|
Automated PR to update pre-commit hooks to their latest versions.
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Changes:</summary>
|
|
||||||
|
|
||||||
```diff
|
|
||||||
${{ steps.update.outputs.diff }}
|
|
||||||
```
|
|
||||||
</details>
|
|
||||||
|
|||||||
6
.github/workflows/preview-docs.yml
vendored
6
.github/workflows/preview-docs.yml
vendored
@@ -8,7 +8,9 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- '**/*.md' # any Markdown file
|
- '**/*.md' # any Markdown file
|
||||||
- '**/*.qmd' # any Quarto file
|
- '**/*.qmd' # any Quarto file
|
||||||
- '_quarto.yaml'
|
- '_quarto.yml'
|
||||||
|
- docs/scripts/generate_config_docs.py
|
||||||
|
- src/axolotl/utils/schemas/**.py
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
checks: write
|
checks: write
|
||||||
@@ -38,7 +40,7 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install jupyter quartodoc
|
python3 -m pip install jupyter quartodoc
|
||||||
python3 -m pip install -e . --no-deps
|
python3 -m pip install -e .
|
||||||
|
|
||||||
- name: Build autodoc
|
- name: Build autodoc
|
||||||
run: quartodoc build
|
run: quartodoc build
|
||||||
|
|||||||
138
.github/workflows/tests.yml
vendored
138
.github/workflows/tests.yml
vendored
@@ -44,98 +44,6 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SKIP: no-commit-to-branch
|
SKIP: no-commit-to-branch
|
||||||
|
|
||||||
# preload-cache:
|
|
||||||
# name: Preload HF cache
|
|
||||||
# runs-on: ubuntu-latest
|
|
||||||
# strategy:
|
|
||||||
# fail-fast: false
|
|
||||||
# matrix:
|
|
||||||
# python_version: ["3.11"]
|
|
||||||
# pytorch_version: ["2.6.0"]
|
|
||||||
# timeout-minutes: 20
|
|
||||||
#
|
|
||||||
# env:
|
|
||||||
# AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
|
||||||
#
|
|
||||||
# steps:
|
|
||||||
# - name: Check out repository code
|
|
||||||
# uses: actions/checkout@v4
|
|
||||||
#
|
|
||||||
# - name: Restore HF cache
|
|
||||||
# id: hf-cache-restore
|
|
||||||
# uses: actions/cache/restore@v4
|
|
||||||
# with:
|
|
||||||
# path: |
|
|
||||||
# /home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
# /home/runner/.cache/huggingface/hub/models--*
|
|
||||||
# key: ${{ runner.os }}-hf-hub-cache-v2
|
|
||||||
#
|
|
||||||
# - name: Restore Cache from S3
|
|
||||||
# id: hf-cache-restore-s3
|
|
||||||
# run: |
|
|
||||||
# mkdir -p /home/runner/.cache/huggingface/hub
|
|
||||||
# curl -L https://d1dttdx32dkk5p.cloudfront.net/hf-cache.tar.zst | tar -xf - -C /home/runner/.cache/huggingface/hub/ --use-compress-program unzstd
|
|
||||||
#
|
|
||||||
# - name: Setup Python
|
|
||||||
# uses: actions/setup-python@v5
|
|
||||||
# with:
|
|
||||||
# python-version: ${{ matrix.python_version }}
|
|
||||||
# cache: 'pip' # caching pip dependencies
|
|
||||||
#
|
|
||||||
# - name: upgrade pip
|
|
||||||
# run: |
|
|
||||||
# pip3 install --upgrade pip
|
|
||||||
# pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
|
||||||
#
|
|
||||||
# - name: Install PyTorch
|
|
||||||
# run: |
|
|
||||||
# pip3 install torch==${{ matrix.pytorch_version }}
|
|
||||||
#
|
|
||||||
# - name: Install dependencies
|
|
||||||
# run: |
|
|
||||||
# pip3 show torch
|
|
||||||
# pip3 install --no-build-isolation -U -e .
|
|
||||||
# python scripts/unsloth_install.py | sh
|
|
||||||
# python scripts/cutcrossentropy_install.py | sh
|
|
||||||
# pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
|
||||||
#
|
|
||||||
# - name: Make sure PyTorch version wasn't clobbered
|
|
||||||
# run: |
|
|
||||||
# python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
|
||||||
#
|
|
||||||
# - name: Ensure axolotl CLI was installed
|
|
||||||
# run: |
|
|
||||||
# axolotl --help
|
|
||||||
#
|
|
||||||
# - name: Pre-Download dataset fixture
|
|
||||||
# run: |
|
|
||||||
# huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
|
||||||
#
|
|
||||||
# - name: Run tests
|
|
||||||
# run: |
|
|
||||||
# pytest -v tests/conftest.py
|
|
||||||
#
|
|
||||||
# - name: Upload coverage to Codecov
|
|
||||||
# uses: codecov/codecov-action@v5
|
|
||||||
# with:
|
|
||||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
|
||||||
# files: ./coverage.xml
|
|
||||||
# flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
|
||||||
# fail_ci_if_error: false
|
|
||||||
#
|
|
||||||
# - name: cleanup pip cache
|
|
||||||
# run: |
|
|
||||||
# find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
|
||||||
#
|
|
||||||
# - name: Save HF cache
|
|
||||||
# id: hf-cache
|
|
||||||
# uses: actions/cache/save@v4
|
|
||||||
# with:
|
|
||||||
# path: |
|
|
||||||
# /home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
# /home/runner/.cache/huggingface/hub/models--*
|
|
||||||
# key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
pytest:
|
pytest:
|
||||||
name: PyTest
|
name: PyTest
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -144,22 +52,13 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.1"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# - name: Restore HF cache
|
|
||||||
# id: hf-cache-restore
|
|
||||||
# uses: actions/cache/restore@v4
|
|
||||||
# with:
|
|
||||||
# path: |
|
|
||||||
# /home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
# /home/runner/.cache/huggingface/hub/models--*
|
|
||||||
# key: ${{ runner.os }}-hf-hub-cache-v2
|
|
||||||
|
|
||||||
- name: Restore Cache from S3
|
- name: Restore Cache from S3
|
||||||
id: hf-cache-restore-s3
|
id: hf-cache-restore-s3
|
||||||
run: |
|
run: |
|
||||||
@@ -222,27 +121,17 @@ jobs:
|
|||||||
pytest-sdist:
|
pytest-sdist:
|
||||||
name: PyTest from Source Dist
|
name: PyTest from Source Dist
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# needs: [preload-cache]
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.1"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# - name: Restore HF cache
|
|
||||||
# id: hf-cache-restore
|
|
||||||
# uses: actions/cache/restore@v4
|
|
||||||
# with:
|
|
||||||
# path: |
|
|
||||||
# /home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
# /home/runner/.cache/huggingface/hub/models--*
|
|
||||||
# key: ${{ runner.os }}-hf-hub-cache-v2
|
|
||||||
|
|
||||||
- name: Restore Cache from S3
|
- name: Restore Cache from S3
|
||||||
id: hf-cache-restore-s3
|
id: hf-cache-restore-s3
|
||||||
run: |
|
run: |
|
||||||
@@ -299,7 +188,7 @@ jobs:
|
|||||||
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
timeout-minutes: 90
|
timeout-minutes: 120
|
||||||
needs: [pre-commit, pytest, pytest-sdist]
|
needs: [pre-commit, pytest, pytest-sdist]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -312,6 +201,13 @@ jobs:
|
|||||||
pytorch: 2.6.0
|
pytorch: 2.6.0
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras: vllm
|
axolotl_extras: vllm
|
||||||
|
- cuda: 126
|
||||||
|
cuda_version: 12.6.3
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
num_gpus: 1
|
||||||
|
axolotl_extras:
|
||||||
|
dockerfile: "Dockerfile-uv.jinja"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -322,7 +218,7 @@ jobs:
|
|||||||
- name: Install Modal
|
- name: Install Modal
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install modal==0.71.8 jinja2
|
pip install modal==1.0.2 jinja2
|
||||||
- name: Update env vars
|
- name: Update env vars
|
||||||
run: |
|
run: |
|
||||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||||
@@ -333,6 +229,7 @@ jobs:
|
|||||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||||
|
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||||
- name: Run tests job on Modal
|
- name: Run tests job on Modal
|
||||||
run: |
|
run: |
|
||||||
modal run cicd.e2e_tests
|
modal run cicd.e2e_tests
|
||||||
@@ -341,7 +238,7 @@ jobs:
|
|||||||
if: github.repository_owner == 'axolotl-ai-cloud'
|
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
timeout-minutes: 90
|
timeout-minutes: 120
|
||||||
# Only run the remainder of the matrix if the first e2e check passed;
|
# Only run the remainder of the matrix if the first e2e check passed;
|
||||||
# this is to save on wasted compute costs for known failures that get caught in the first run
|
# this is to save on wasted compute costs for known failures that get caught in the first run
|
||||||
needs: [pre-commit, pytest, docker-e2e-tests-1st]
|
needs: [pre-commit, pytest, docker-e2e-tests-1st]
|
||||||
@@ -365,13 +262,13 @@ jobs:
|
|||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: 128
|
- cuda: 128
|
||||||
cuda_version: 12.8.1
|
cuda_version: 12.8.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.1
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
steps:
|
steps:
|
||||||
@@ -384,7 +281,7 @@ jobs:
|
|||||||
- name: Install Modal
|
- name: Install Modal
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install modal==0.71.8 jinja2
|
pip install modal==1.0.2 jinja2
|
||||||
- name: Update env vars
|
- name: Update env vars
|
||||||
run: |
|
run: |
|
||||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||||
@@ -395,6 +292,7 @@ jobs:
|
|||||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||||
|
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||||
- name: Run tests job on Modal
|
- name: Run tests job on Modal
|
||||||
run: |
|
run: |
|
||||||
modal run cicd.e2e_tests
|
modal run cicd.e2e_tests
|
||||||
@@ -424,7 +322,7 @@ jobs:
|
|||||||
- name: Install Modal
|
- name: Install Modal
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install modal==0.71.8 jinja2
|
pip install modal==1.0.2 jinja2
|
||||||
- name: Update env vars
|
- name: Update env vars
|
||||||
run: |
|
run: |
|
||||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||||
|
|||||||
@@ -19,15 +19,15 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: isort
|
- id: isort
|
||||||
- repo: https://github.com/PyCQA/flake8
|
- repo: https://github.com/PyCQA/flake8
|
||||||
rev: 7.1.2
|
rev: 7.2.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
- repo: https://github.com/pylint-dev/pylint
|
- repo: https://github.com/pylint-dev/pylint
|
||||||
rev: v3.3.6
|
rev: v3.3.7
|
||||||
hooks:
|
hooks:
|
||||||
- id: pylint
|
- id: pylint
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
rev: v1.15.0
|
rev: v1.16.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
|
|||||||
@@ -328,7 +328,7 @@ The following optimizers are supported:
|
|||||||
- Use `gradient_checkpointing: true` to reduce memory usage
|
- Use `gradient_checkpointing: true` to reduce memory usage
|
||||||
- Adjust `micro_batch_size` and `gradient_accumulation_steps` based on your GPU memory
|
- Adjust `micro_batch_size` and `gradient_accumulation_steps` based on your GPU memory
|
||||||
|
|
||||||
For more detailed information, please refer to the [documentation](https://axolotl-ai-cloud.github.io/axolotl/docs/config.html).
|
For more detailed information, please refer to the [documentation](https://axolotl-ai-cloud.github.io/axolotl/docs/config-reference.html).
|
||||||
|
|
||||||
### Errors:
|
### Errors:
|
||||||
|
|
||||||
|
|||||||
@@ -242,16 +242,12 @@
|
|||||||
# early_stopping_patience: 3
|
# early_stopping_patience: 3
|
||||||
|
|
||||||
# # Specify a scheduler and kwargs to use with the optimizer
|
# # Specify a scheduler and kwargs to use with the optimizer
|
||||||
# lr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine
|
# lr_scheduler: # 'one_cycle' | empty for cosine
|
||||||
# lr_scheduler_kwargs:
|
# lr_scheduler_kwargs:
|
||||||
|
|
||||||
# # For one_cycle optim
|
# # For one_cycle optim
|
||||||
# lr_div_factor: # Learning rate div factor
|
# lr_div_factor: # Learning rate div factor
|
||||||
|
|
||||||
# # For log_sweep optim
|
|
||||||
# log_sweep_min_lr:
|
|
||||||
# log_sweep_max_lr:
|
|
||||||
|
|
||||||
# # Specify optimizer
|
# # Specify optimizer
|
||||||
# # Valid values are driven by the Transformers OptimizerNames class, see:
|
# # Valid values are driven by the Transformers OptimizerNames class, see:
|
||||||
# # https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134
|
# # https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134
|
||||||
|
|||||||
78
README.md
78
README.md
@@ -22,28 +22,32 @@
|
|||||||
<img src="https://github.com/axolotl-ai-cloud/axolotl/actions/workflows/multi-gpu-e2e.yml/badge.svg" alt="multigpu-semi-weekly tests">
|
<img src="https://github.com/axolotl-ai-cloud/axolotl/actions/workflows/multi-gpu-e2e.yml/badge.svg" alt="multigpu-semi-weekly tests">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
Axolotl is a tool designed to streamline post-training for various AI models.
|
|
||||||
Post-training refers to any modifications or additional training performed on
|
|
||||||
pre-trained models - including full model fine-tuning, parameter-efficient tuning (like
|
|
||||||
LoRA and QLoRA), supervised fine-tuning (SFT), instruction tuning, and alignment
|
|
||||||
techniques. With support for multiple model architectures and training configurations,
|
|
||||||
Axolotl makes it easy to get started with these techniques.
|
|
||||||
|
|
||||||
Axolotl is designed to work with YAML config files that contain everything you need to
|
## 🎉 Latest Updates
|
||||||
preprocess a dataset, train or fine-tune a model, run model inference or evaluation,
|
|
||||||
and much more.
|
- 2025/06: Magistral with mistral-common tokenizer support has been added to Axolotl. See [examples](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/magistral) to start training your own Magistral models with Axolotl!
|
||||||
|
- 2025/05: Quantization Aware Training (QAT) support has been added to Axolotl. Explore the [docs](https://docs.axolotl.ai/docs/qat.html) to learn more!
|
||||||
|
- 2025/04: Llama 4 support has been added in Axolotl. See [examples](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/llama-4) to start training your own Llama 4 models with Axolotl's linearized version!
|
||||||
|
- 2025/03: Axolotl has implemented Sequence Parallelism (SP) support. Read the [blog](https://huggingface.co/blog/axolotl-ai-co/long-context-with-sequence-parallelism-in-axolotl) and [docs](https://docs.axolotl.ai/docs/sequence_parallelism.html) to learn how to scale your context length when fine-tuning.
|
||||||
|
- 2025/03: (Beta) Fine-tuning Multimodal models is now supported in Axolotl. Check out the [docs](https://docs.axolotl.ai/docs/multimodal.html) to fine-tune your own!
|
||||||
|
- 2025/02: Axolotl has added LoRA optimizations to reduce memory usage and improve training speed for LoRA and QLoRA in single GPU and multi-GPU training (DDP and DeepSpeed). Jump into the [docs](https://docs.axolotl.ai/docs/lora_optims.html) to give it a try.
|
||||||
|
- 2025/02: Axolotl has added GRPO support. Dive into our [blog](https://huggingface.co/blog/axolotl-ai-co/training-llms-w-interpreter-feedback-wasm) and [GRPO example](https://github.com/axolotl-ai-cloud/grpo_code) and have some fun!
|
||||||
|
- 2025/01: Axolotl has added Reward Modelling / Process Reward Modelling fine-tuning support. See [docs](https://docs.axolotl.ai/docs/reward_modelling.html).
|
||||||
|
|
||||||
|
## ✨ Overview
|
||||||
|
|
||||||
|
Axolotl is a tool designed to streamline post-training for various AI models.
|
||||||
|
|
||||||
Features:
|
Features:
|
||||||
|
|
||||||
- Train various Huggingface models such as llama, pythia, falcon, mpt
|
- **Multiple Model Support**: Train various models like LLaMA, Mistral, Mixtral, Pythia, and more. We are compatible with HuggingFace transformers causal language models.
|
||||||
- Supports fullfinetune, lora, qlora, relora, and gptq
|
- **Training Methods**: Full fine-tuning, LoRA, QLoRA, GPTQ, QAT, Preference Tuning (DPO, IPO, KTO, ORPO), RL (GRPO), Multimodal, and Reward Modelling (RM) / Process Reward Modelling (PRM).
|
||||||
- Customize configurations using a simple yaml file or CLI overwrite
|
- **Easy Configuration**: Re-use a single YAML file between dataset preprocess, training, evaluation, quantization, and inference.
|
||||||
- Load different dataset formats, use custom formats, or bring your own tokenized datasets
|
- **Performance Optimizations**: [Multipacking](https://docs.axolotl.ai/docs/multipack.html), [Flash Attention](https://github.com/Dao-AILab/flash-attention), [Xformers](https://github.com/facebookresearch/xformers), [Flex Attention](https://pytorch.org/blog/flexattention/), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), [Cut Cross Entropy](https://github.com/apple/ml-cross-entropy/tree/main), Sequence Parallelism (SP), LoRA optimizations, Multi-GPU training (FSDP1, FSDP2, DeepSpeed), Multi-node training (Torchrun, Ray), and many more!
|
||||||
- Integrated with [xformers](https://github.com/facebookresearch/xformers), flash attention, [liger kernel](https://github.com/linkedin/Liger-Kernel), rope scaling, and multipacking
|
- **Flexible Dataset Handling**: Load from local, HuggingFace, and cloud (S3, Azure, GCP, OCI) datasets.
|
||||||
- Works with single GPU or multiple GPUs via FSDP or Deepspeed
|
- **Cloud Ready**: We ship [Docker images](https://hub.docker.com/u/axolotlai) and also [PyPI packages](https://pypi.org/project/axolotl/) for use on cloud platforms and local hardware.
|
||||||
- Easily run with Docker locally or on the cloud
|
|
||||||
- Log results and optionally checkpoints to wandb, mlflow or Comet
|
|
||||||
- And more!
|
|
||||||
|
|
||||||
## 🚀 Quick Start
|
## 🚀 Quick Start
|
||||||
|
|
||||||
@@ -51,7 +55,7 @@ Features:
|
|||||||
|
|
||||||
- NVIDIA GPU (Ampere or newer for `bf16` and Flash Attention) or AMD GPU
|
- NVIDIA GPU (Ampere or newer for `bf16` and Flash Attention) or AMD GPU
|
||||||
- Python 3.11
|
- Python 3.11
|
||||||
- PyTorch ≥2.4.1
|
- PyTorch ≥2.5.1
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
@@ -81,19 +85,12 @@ axolotl train examples/llama-3/lora-1b.yml
|
|||||||
|
|
||||||
That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/getting-started.html) for a more detailed walkthrough.
|
That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/getting-started.html) for a more detailed walkthrough.
|
||||||
|
|
||||||
## ✨ Key Features
|
|
||||||
|
|
||||||
- **Multiple Model Support**: Train various models like LLaMA, Mistral, Mixtral, Pythia, and more
|
|
||||||
- **Training Methods**: Full fine-tuning, LoRA, QLoRA, and more
|
|
||||||
- **Easy Configuration**: Simple YAML files to control your training setup
|
|
||||||
- **Performance Optimizations**: Flash Attention, xformers, multi-GPU training
|
|
||||||
- **Flexible Dataset Handling**: Use various formats and custom datasets
|
|
||||||
- **Cloud Ready**: Run on cloud platforms or local hardware
|
|
||||||
|
|
||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
- [Installation Options](https://docs.axolotl.ai/docs/installation.html) - Detailed setup instructions for different environments
|
- [Installation Options](https://docs.axolotl.ai/docs/installation.html) - Detailed setup instructions for different environments
|
||||||
- [Configuration Guide](https://docs.axolotl.ai/docs/config.html) - Full configuration options and examples
|
- [Configuration Guide](https://docs.axolotl.ai/docs/config-reference.html) - Full configuration options and examples
|
||||||
|
- [Dataset Loading](https://docs.axolotl.ai/docs/dataset_loading.html) - Loading datasets from various sources
|
||||||
- [Dataset Guide](https://docs.axolotl.ai/docs/dataset-formats/) - Supported formats and how to use them
|
- [Dataset Guide](https://docs.axolotl.ai/docs/dataset-formats/) - Supported formats and how to use them
|
||||||
- [Multi-GPU Training](https://docs.axolotl.ai/docs/multi-gpu.html)
|
- [Multi-GPU Training](https://docs.axolotl.ai/docs/multi-gpu.html)
|
||||||
- [Multi-Node Training](https://docs.axolotl.ai/docs/multi-node.html)
|
- [Multi-Node Training](https://docs.axolotl.ai/docs/multi-node.html)
|
||||||
@@ -112,31 +109,6 @@ That's it! Check out our [Getting Started Guide](https://docs.axolotl.ai/docs/ge
|
|||||||
|
|
||||||
Contributions are welcome! Please see our [Contributing Guide](https://github.com/axolotl-ai-cloud/axolotl/blob/main/.github/CONTRIBUTING.md) for details.
|
Contributions are welcome! Please see our [Contributing Guide](https://github.com/axolotl-ai-cloud/axolotl/blob/main/.github/CONTRIBUTING.md) for details.
|
||||||
|
|
||||||
## Supported Models
|
|
||||||
|
|
||||||
| | fp16/fp32 | lora | qlora | gptq | gptq w/flash attn | flash attn | xformers attn |
|
|
||||||
|-------------|:----------|:-----|-------|------|-------------------|------------|--------------|
|
|
||||||
| llama | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
|
||||||
| Mistral | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
|
||||||
| Mixtral-MoE | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| Mixtral8X22 | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| Pythia | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
|
||||||
| cerebras | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
|
||||||
| btlm | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
|
||||||
| mpt | ✅ | ❌ | ❓ | ❌ | ❌ | ❌ | ❓ |
|
|
||||||
| falcon | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
|
||||||
| gpt-j | ✅ | ✅ | ✅ | ❌ | ❌ | ❓ | ❓ |
|
|
||||||
| XGen | ✅ | ❓ | ✅ | ❓ | ❓ | ❓ | ✅ |
|
|
||||||
| phi | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| RWKV | ✅ | ❓ | ❓ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| Qwen | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| Gemma | ✅ | ✅ | ✅ | ❓ | ❓ | ✅ | ❓ |
|
|
||||||
| Jamba | ✅ | ✅ | ✅ | ❓ | ❓ | ✅ | ❓ |
|
|
||||||
|
|
||||||
✅: supported
|
|
||||||
❌: not supported
|
|
||||||
❓: untested
|
|
||||||
|
|
||||||
## ❤️ Sponsors
|
## ❤️ Sponsors
|
||||||
|
|
||||||
Thank you to our sponsors who help make Axolotl possible:
|
Thank you to our sponsors who help make Axolotl possible:
|
||||||
|
|||||||
31
_quarto.yml
31
_quarto.yml
@@ -1,5 +1,6 @@
|
|||||||
project:
|
project:
|
||||||
type: website
|
type: website
|
||||||
|
pre-render: docs/scripts/generate_config_docs.py
|
||||||
|
|
||||||
quartodoc:
|
quartodoc:
|
||||||
dir: docs/api
|
dir: docs/api
|
||||||
@@ -17,7 +18,9 @@ quartodoc:
|
|||||||
- convert
|
- convert
|
||||||
- prompt_tokenizers
|
- prompt_tokenizers
|
||||||
- logging_config
|
- logging_config
|
||||||
- core.trainer_builder
|
- core.builders.base
|
||||||
|
- core.builders.causal
|
||||||
|
- core.builders.rl
|
||||||
- core.training_args
|
- core.training_args
|
||||||
- core.chat.messages
|
- core.chat.messages
|
||||||
- core.chat.format.chatml
|
- core.chat.format.chatml
|
||||||
@@ -43,6 +46,7 @@ quartodoc:
|
|||||||
- cli.vllm_serve
|
- cli.vllm_serve
|
||||||
- cli.cloud.base
|
- cli.cloud.base
|
||||||
- cli.cloud.modal_
|
- cli.cloud.modal_
|
||||||
|
- cli.quantize
|
||||||
- title: Trainers
|
- title: Trainers
|
||||||
desc: Training implementations
|
desc: Training implementations
|
||||||
contents:
|
contents:
|
||||||
@@ -54,6 +58,15 @@ quartodoc:
|
|||||||
- core.trainers.grpo.trainer
|
- core.trainers.grpo.trainer
|
||||||
- core.trainers.grpo.sampler
|
- core.trainers.grpo.sampler
|
||||||
- core.trainers.utils
|
- core.trainers.utils
|
||||||
|
- title: Model Loading
|
||||||
|
desc: Functionality for loading and patching models, tokenizers, etc.
|
||||||
|
contents:
|
||||||
|
- loaders.model
|
||||||
|
- loaders.tokenizer
|
||||||
|
- loaders.processor
|
||||||
|
- loaders.adapter
|
||||||
|
- loaders.patch_manager
|
||||||
|
- loaders.constants
|
||||||
- title: Mixins
|
- title: Mixins
|
||||||
desc: Mixin classes for augmenting trainers
|
desc: Mixin classes for augmenting trainers
|
||||||
contents:
|
contents:
|
||||||
@@ -117,17 +130,16 @@ quartodoc:
|
|||||||
- monkeypatch.trainer_fsdp_optim
|
- monkeypatch.trainer_fsdp_optim
|
||||||
- monkeypatch.transformers_fa_utils
|
- monkeypatch.transformers_fa_utils
|
||||||
- monkeypatch.unsloth_
|
- monkeypatch.unsloth_
|
||||||
- monkeypatch.attention.mllama
|
|
||||||
- monkeypatch.data.batch_dataset_fetcher
|
- monkeypatch.data.batch_dataset_fetcher
|
||||||
- monkeypatch.mixtral
|
- monkeypatch.mixtral
|
||||||
|
- monkeypatch.gradient_checkpointing.offload_cpu
|
||||||
|
- monkeypatch.gradient_checkpointing.offload_disk
|
||||||
- title: Utils
|
- title: Utils
|
||||||
desc: Utility functions
|
desc: Utility functions
|
||||||
contents:
|
contents:
|
||||||
- utils.models
|
|
||||||
- utils.tokenization
|
- utils.tokenization
|
||||||
- utils.chat_templates
|
- utils.chat_templates
|
||||||
- utils.lora
|
- utils.lora
|
||||||
- utils.lora_embeddings
|
|
||||||
- utils.model_shard_quant
|
- utils.model_shard_quant
|
||||||
- utils.bench
|
- utils.bench
|
||||||
- utils.freeze
|
- utils.freeze
|
||||||
@@ -138,8 +150,7 @@ quartodoc:
|
|||||||
- utils.optimizers.adopt
|
- utils.optimizers.adopt
|
||||||
- utils.data.pretraining
|
- utils.data.pretraining
|
||||||
- utils.data.sft
|
- utils.data.sft
|
||||||
- utils.gradient_checkpointing.offload_cpu
|
- utils.quantization
|
||||||
- utils.gradient_checkpointing.offload_disk
|
|
||||||
- title: Schemas
|
- title: Schemas
|
||||||
desc: Pydantic data models for Axolotl config
|
desc: Pydantic data models for Axolotl config
|
||||||
contents:
|
contents:
|
||||||
@@ -189,12 +200,14 @@ quartodoc:
|
|||||||
- utils.callbacks.lisa
|
- utils.callbacks.lisa
|
||||||
- utils.callbacks.mlflow_
|
- utils.callbacks.mlflow_
|
||||||
- utils.callbacks.comet_
|
- utils.callbacks.comet_
|
||||||
|
- utils.callbacks.qat
|
||||||
website:
|
website:
|
||||||
title: "Axolotl"
|
title: "Axolotl"
|
||||||
description: "We make fine-tuning accessible, scalable, and fun"
|
description: "We make fine-tuning accessible, scalable, and fun"
|
||||||
favicon: favicon.jpg
|
favicon: favicon.jpg
|
||||||
|
|
||||||
|
google-analytics: "G-9KYCVJBNMQ"
|
||||||
|
|
||||||
navbar:
|
navbar:
|
||||||
logo: image/axolotl_logo_digital_white.svg
|
logo: image/axolotl_logo_digital_white.svg
|
||||||
title: false
|
title: false
|
||||||
@@ -223,7 +236,7 @@ website:
|
|||||||
- docs/installation.qmd
|
- docs/installation.qmd
|
||||||
- docs/inference.qmd
|
- docs/inference.qmd
|
||||||
- docs/cli.qmd
|
- docs/cli.qmd
|
||||||
- docs/config.qmd
|
- docs/config-reference.qmd
|
||||||
- text: "API Reference"
|
- text: "API Reference"
|
||||||
href: docs/api
|
href: docs/api
|
||||||
|
|
||||||
@@ -247,6 +260,8 @@ website:
|
|||||||
- docs/lr_groups.qmd
|
- docs/lr_groups.qmd
|
||||||
- docs/lora_optims.qmd
|
- docs/lora_optims.qmd
|
||||||
- docs/dataset_loading.qmd
|
- docs/dataset_loading.qmd
|
||||||
|
- docs/qat.qmd
|
||||||
|
- docs/quantize.qmd
|
||||||
|
|
||||||
- section: "Core Concepts"
|
- section: "Core Concepts"
|
||||||
contents:
|
contents:
|
||||||
|
|||||||
52
cicd/Dockerfile-uv.jinja
Normal file
52
cicd/Dockerfile-uv.jinja
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
FROM axolotlai/axolotl-base-uv:{{ BASE_TAG }}
|
||||||
|
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
ENV AXOLOTL_EXTRAS="{{ AXOLOTL_EXTRAS }}"
|
||||||
|
ENV AXOLOTL_ARGS="{{ AXOLOTL_ARGS }}"
|
||||||
|
ENV CUDA="{{ CUDA }}"
|
||||||
|
ENV PYTORCH_VERSION="{{ PYTORCH_VERSION }}"
|
||||||
|
ENV GITHUB_REF="{{ GITHUB_REF }}"
|
||||||
|
ENV GITHUB_SHA="{{ GITHUB_SHA }}"
|
||||||
|
ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
|
||||||
|
ENV HF_HOME="{{ HF_HOME }}"
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
|
|
||||||
|
WORKDIR /workspace/axolotl
|
||||||
|
|
||||||
|
RUN git fetch origin +$GITHUB_REF && \
|
||||||
|
git checkout FETCH_HEAD
|
||||||
|
|
||||||
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
|
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||||
|
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
|
||||||
|
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
|
||||||
|
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
|
||||||
|
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt; \
|
||||||
|
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN uv pip install packaging==23.2 setuptools==75.8.0
|
||||||
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
|
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
|
else \
|
||||||
|
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN python scripts/unsloth_install.py --uv | sh
|
||||||
|
RUN python scripts/cutcrossentropy_install.py --uv | sh
|
||||||
|
|
||||||
|
# So we can test the Docker image
|
||||||
|
RUN uv pip install -r requirements-dev.txt -r requirements-tests.txt
|
||||||
|
|
||||||
|
# fix so that git fetch/pull from remote works
|
||||||
|
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||||
|
git config --get remote.origin.fetch
|
||||||
|
|
||||||
|
# helper for huggingface-login cli
|
||||||
|
RUN git config --global credential.helper store
|
||||||
@@ -6,7 +6,7 @@ from .single_gpu import GPU_CONFIG, VOLUME_CONFIG, app, cicd_image, run_cmd
|
|||||||
@app.function(
|
@app.function(
|
||||||
image=cicd_image,
|
image=cicd_image,
|
||||||
gpu=GPU_CONFIG,
|
gpu=GPU_CONFIG,
|
||||||
timeout=90 * 60, # 90 min
|
timeout=120 * 60, # 90 min
|
||||||
cpu=8.0,
|
cpu=8.0,
|
||||||
memory=131072,
|
memory=131072,
|
||||||
volumes=VOLUME_CONFIG,
|
volumes=VOLUME_CONFIG,
|
||||||
|
|||||||
@@ -24,9 +24,9 @@ df_template = template_env.get_template("Dockerfile.jinja")
|
|||||||
df_args = {
|
df_args = {
|
||||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
|
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.5.1"),
|
||||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
|
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu124-2.5.1"),
|
||||||
"CUDA": os.environ.get("CUDA", "121"),
|
"CUDA": os.environ.get("CUDA", "124"),
|
||||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||||
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
||||||
@@ -55,7 +55,7 @@ VOLUME_CONFIG = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
N_GPUS = int(os.environ.get("N_GPUS", 2))
|
N_GPUS = int(os.environ.get("N_GPUS", 2))
|
||||||
GPU_CONFIG = modal.gpu.H100(count=N_GPUS)
|
GPU_CONFIG = f"H100:{N_GPUS}"
|
||||||
|
|
||||||
|
|
||||||
def run_cmd(cmd: str, run_folder: str):
|
def run_cmd(cmd: str, run_folder: str):
|
||||||
@@ -69,7 +69,7 @@ def run_cmd(cmd: str, run_folder: str):
|
|||||||
@app.function(
|
@app.function(
|
||||||
image=cicd_image,
|
image=cicd_image,
|
||||||
gpu=GPU_CONFIG,
|
gpu=GPU_CONFIG,
|
||||||
timeout=90 * 60,
|
timeout=120 * 60,
|
||||||
cpu=16.0,
|
cpu=16.0,
|
||||||
memory=131072 * N_GPUS,
|
memory=131072 * N_GPUS,
|
||||||
volumes=VOLUME_CONFIG,
|
volumes=VOLUME_CONFIG,
|
||||||
|
|||||||
@@ -8,8 +8,9 @@ import tempfile
|
|||||||
|
|
||||||
import jinja2
|
import jinja2
|
||||||
import modal
|
import modal
|
||||||
|
import modal.experimental
|
||||||
from jinja2 import select_autoescape
|
from jinja2 import select_autoescape
|
||||||
from modal import App, Image
|
from modal import App
|
||||||
|
|
||||||
cicd_path = pathlib.Path(__file__).parent.resolve()
|
cicd_path = pathlib.Path(__file__).parent.resolve()
|
||||||
|
|
||||||
@@ -17,14 +18,15 @@ template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
|||||||
template_env = jinja2.Environment(
|
template_env = jinja2.Environment(
|
||||||
loader=template_loader, autoescape=select_autoescape()
|
loader=template_loader, autoescape=select_autoescape()
|
||||||
)
|
)
|
||||||
df_template = template_env.get_template("Dockerfile.jinja")
|
dockerfile = os.environ.get("E2E_DOCKERFILE", "Dockerfile.jinja")
|
||||||
|
df_template = template_env.get_template(dockerfile)
|
||||||
|
|
||||||
df_args = {
|
df_args = {
|
||||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
|
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.5.1"),
|
||||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
|
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu124-2.5.1"),
|
||||||
"CUDA": os.environ.get("CUDA", "121"),
|
"CUDA": os.environ.get("CUDA", "124"),
|
||||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||||
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
||||||
@@ -38,11 +40,11 @@ temp_dir = tempfile.mkdtemp()
|
|||||||
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
||||||
f.write(dockerfile_contents)
|
f.write(dockerfile_contents)
|
||||||
|
|
||||||
cicd_image = Image.from_dockerfile(
|
cicd_image = modal.experimental.raw_dockerfile_image(
|
||||||
pathlib.Path(temp_dir) / "Dockerfile",
|
pathlib.Path(temp_dir) / "Dockerfile",
|
||||||
context_mount=None,
|
# context_mount=None,
|
||||||
force_build=True,
|
force_build=True,
|
||||||
gpu="A10G",
|
# gpu="A10G",
|
||||||
).env(df_args)
|
).env(df_args)
|
||||||
|
|
||||||
app = App("Axolotl CI/CD", secrets=[])
|
app = App("Axolotl CI/CD", secrets=[])
|
||||||
@@ -55,7 +57,7 @@ VOLUME_CONFIG = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
||||||
GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
|
GPU_CONFIG = f"L40S:{N_GPUS}"
|
||||||
|
|
||||||
|
|
||||||
def run_cmd(cmd: str, run_folder: str):
|
def run_cmd(cmd: str, run_folder: str):
|
||||||
|
|||||||
31
deepspeed_configs/zero2_torch_compile.json
Normal file
31
deepspeed_configs/zero2_torch_compile.json
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
{
|
||||||
|
"compile": {
|
||||||
|
"disable": false,
|
||||||
|
"backend": "inductor"
|
||||||
|
},
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": 2,
|
||||||
|
"offload_optimizer": {
|
||||||
|
"device": "cpu"
|
||||||
|
},
|
||||||
|
"contiguous_gradients": true,
|
||||||
|
"overlap_comm": true
|
||||||
|
},
|
||||||
|
"bf16": {
|
||||||
|
"enabled": "auto"
|
||||||
|
},
|
||||||
|
"fp16": {
|
||||||
|
"enabled": "auto",
|
||||||
|
"auto_cast": false,
|
||||||
|
"loss_scale": 0,
|
||||||
|
"initial_scale_power": 32,
|
||||||
|
"loss_scale_window": 1000,
|
||||||
|
"hysteresis": 2,
|
||||||
|
"min_loss_scale": 1
|
||||||
|
},
|
||||||
|
"gradient_accumulation_steps": "auto",
|
||||||
|
"gradient_clipping": "auto",
|
||||||
|
"train_batch_size": "auto",
|
||||||
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
"wall_clock_breakdown": false
|
||||||
|
}
|
||||||
@@ -38,6 +38,6 @@ RUN git lfs install --skip-repo && \
|
|||||||
# The base image ships with `pydantic==1.8.2` which is not working
|
# The base image ships with `pydantic==1.8.2` which is not working
|
||||||
pip3 install -U --no-cache-dir pydantic==1.10.10
|
pip3 install -U --no-cache-dir pydantic==1.10.10
|
||||||
|
|
||||||
RUN if [ "$PYTORCH_VERSION" = "2.7.0" ] ; then \
|
RUN if [ "$PYTORCH_VERSION" = "2.7.1" ] ; then \
|
||||||
pip3 install flash-attn==2.7.4.post1; \
|
pip3 install flash-attn==2.7.4.post1; \
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
|
|||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
|
RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
|
||||||
python3 -m pip install --no-cache-dir -U torch==2.7.0 --extra-index-url https://download.pytorch.org/whl/test/cu$CUDA && \
|
python3 -m pip install --no-cache-dir -U torch==2.7.1 --extra-index-url https://download.pytorch.org/whl/test/cu$CUDA && \
|
||||||
python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \
|
python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \
|
||||||
python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main"
|
python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main"
|
||||||
|
|
||||||
|
|||||||
40
docker/Dockerfile-uv-base
Normal file
40
docker/Dockerfile-uv-base
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
ARG CUDA_VERSION="12.6.3"
|
||||||
|
ARG CUDNN_VERSION=""
|
||||||
|
ARG UBUNTU_VERSION="22.04"
|
||||||
|
ARG MAX_JOBS=4
|
||||||
|
|
||||||
|
FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION AS base-builder
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION="3.11"
|
||||||
|
ARG PYTORCH_VERSION="2.6.0"
|
||||||
|
ARG CUDA="126"
|
||||||
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
|
||||||
|
ENV PYTHON_VERSION=$PYTHON_VERSION
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
||||||
|
ENV UV_TORCH_BACKEND="cu${CUDA}"
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev pkg-config curl && rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& git lfs install --skip-repo \
|
||||||
|
&& curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
|
||||||
|
ENV PATH="/root/.local/bin:${PATH}"
|
||||||
|
|
||||||
|
RUN uv python install ${PYTHON_VERSION}
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
RUN uv venv --no-project --relocatable axolotl-venv
|
||||||
|
|
||||||
|
ENV PATH="/workspace/axolotl-venv/bin:${PATH}"
|
||||||
|
|
||||||
|
RUN uv pip install packaging setuptools wheel psutil \
|
||||||
|
&& uv pip install torch==${PYTORCH_VERSION} \
|
||||||
|
&& uv pip install --no-build-isolation "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" \
|
||||||
|
&& uv pip install "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main" \
|
||||||
|
&& uv pip install awscli pydantic
|
||||||
|
|
||||||
|
RUN if [ "$PYTORCH_VERSION" = "2.7.1" ] ; then \
|
||||||
|
uv pip install --no-build-isolation flash-attn==2.7.4.post1; \
|
||||||
|
fi
|
||||||
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@@ -2,3 +2,4 @@
|
|||||||
_site/
|
_site/
|
||||||
/api/*.qmd
|
/api/*.qmd
|
||||||
/api/*.html
|
/api/*.html
|
||||||
|
config-reference.qmd
|
||||||
|
|||||||
10
docs/cli.qmd
10
docs/cli.qmd
@@ -209,6 +209,16 @@ axolotl delinearize-llama4 --model path/to/model_dir --output path/to/output_dir
|
|||||||
|
|
||||||
This would be necessary to use with other frameworks. If you have an adapter, merge it with the non-quantized linearized model before delinearizing.
|
This would be necessary to use with other frameworks. If you have an adapter, merge it with the non-quantized linearized model before delinearizing.
|
||||||
|
|
||||||
|
### quantize
|
||||||
|
|
||||||
|
Quantizes a model using the quantization configuration specified in your YAML file.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
axolotl quantize config.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
See [Quantization](./quantize.qmd) for more details.
|
||||||
|
|
||||||
|
|
||||||
## Legacy CLI Usage
|
## Legacy CLI Usage
|
||||||
|
|
||||||
|
|||||||
746
docs/config.qmd
746
docs/config.qmd
@@ -1,746 +0,0 @@
|
|||||||
---
|
|
||||||
title: Config Reference
|
|
||||||
description: A complete list of all configuration options.
|
|
||||||
---
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# This is the huggingface model that contains *.pt, *.safetensors, or *.bin files
|
|
||||||
# This can also be a relative path to a model on disk
|
|
||||||
base_model: ./llama-7b-hf
|
|
||||||
# You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)
|
|
||||||
base_model_ignore_patterns:
|
|
||||||
# If the base_model repo on hf hub doesn't include configuration .json files,
|
|
||||||
# You can set that here, or leave this empty to default to base_model
|
|
||||||
base_model_config: ./llama-7b-hf
|
|
||||||
# You can specify to choose a specific model revision from huggingface hub
|
|
||||||
revision_of_model:
|
|
||||||
# Optional tokenizer configuration path in case you want to use a different tokenizer
|
|
||||||
# than the one defined in the base model
|
|
||||||
tokenizer_config:
|
|
||||||
# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
# Corresponding tokenizer for the model AutoTokenizer is a good choice
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
# Trust remote code for untrusted source
|
|
||||||
trust_remote_code:
|
|
||||||
# use_fast option for tokenizer loading from_pretrained, default to True
|
|
||||||
tokenizer_use_fast:
|
|
||||||
# Whether to use the legacy tokenizer setting, defaults to True
|
|
||||||
tokenizer_legacy:
|
|
||||||
# Resize the model embeddings when new tokens are added to multiples of 32
|
|
||||||
# This is reported to improve training speed on some models
|
|
||||||
resize_token_embeddings_to_32x:
|
|
||||||
# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.
|
|
||||||
shrink_embeddings:
|
|
||||||
# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs
|
|
||||||
embeddings_skip_upcast:
|
|
||||||
# Whether to load the model with randomly initialized weights. Useful for
|
|
||||||
# pre-training a model from scratch or debugging purposes.
|
|
||||||
random_init_weights:
|
|
||||||
|
|
||||||
# (Internal use only)
|
|
||||||
# Used to identify which the model is based on
|
|
||||||
is_falcon_derived_model:
|
|
||||||
is_llama_derived_model:
|
|
||||||
is_qwen_derived_model:
|
|
||||||
# Please note that if you set this to true, `padding_side` will be set to "left" by default
|
|
||||||
is_mistral_derived_model:
|
|
||||||
|
|
||||||
# optional overrides to the base model configuration
|
|
||||||
overrides_of_model_config:
|
|
||||||
# RoPE Scaling https://github.com/huggingface/transformers/pull/24653
|
|
||||||
rope_scaling:
|
|
||||||
type: # linear | dynamic
|
|
||||||
factor: # float
|
|
||||||
|
|
||||||
# optional overrides the base model loading from_pretrained
|
|
||||||
overrides_of_model_kwargs:
|
|
||||||
# use_cache: False
|
|
||||||
|
|
||||||
# optional overrides to the bnb 4bit quantization configuration
|
|
||||||
# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig
|
|
||||||
bnb_config_kwargs:
|
|
||||||
# These are default values
|
|
||||||
llm_int8_has_fp16_weight: false
|
|
||||||
bnb_4bit_quant_type: nf4
|
|
||||||
bnb_4bit_use_double_quant: true
|
|
||||||
|
|
||||||
|
|
||||||
# Whether you are training a 4-bit GPTQ quantized model
|
|
||||||
gptq: true
|
|
||||||
|
|
||||||
# This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
|
|
||||||
load_in_8bit: true
|
|
||||||
# Use bitsandbytes 4 bit
|
|
||||||
load_in_4bit:
|
|
||||||
|
|
||||||
# Use CUDA bf16
|
|
||||||
bf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere
|
|
||||||
# Use CUDA fp16
|
|
||||||
fp16: true
|
|
||||||
# Use CUDA tf32
|
|
||||||
tf32: true # require >=ampere
|
|
||||||
# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting
|
|
||||||
|
|
||||||
# No AMP (automatic mixed precision)
|
|
||||||
bfloat16: true # require >=ampere
|
|
||||||
float16: true
|
|
||||||
|
|
||||||
# Limit the memory for all available GPUs to this amount (if an integer, expressed in gigabytes); default: unset
|
|
||||||
gpu_memory_limit: 20GiB
|
|
||||||
# Do the LoRA/PEFT loading on CPU -- this is required if the base model is so large it takes up most or all of the available GPU VRAM, e.g. during a model and LoRA merge
|
|
||||||
lora_on_cpu: true
|
|
||||||
|
|
||||||
# List[str]. Add plugins to extend the pipeline.
|
|
||||||
# See `src/axolotl/integrations` for the available plugins or doc below for more details.
|
|
||||||
# https://docs.axolotl.ai/docs/custom_integrations.html
|
|
||||||
plugins:
|
|
||||||
# - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
|
||||||
|
|
||||||
# A list of one or more datasets to finetune the model with
|
|
||||||
datasets:
|
|
||||||
# HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files
|
|
||||||
- path: vicgalle/alpaca-gpt4
|
|
||||||
# The type of prompt to use for training. [alpaca, gpteacher, oasst, reflection]
|
|
||||||
type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
|
||||||
ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file
|
|
||||||
data_files: # Optional[str] path to source data files
|
|
||||||
|
|
||||||
shards: # Optional[int] split dataset into N pieces (use with shards_idx)
|
|
||||||
shards_idx: # Optional[int] = 0 the index of sharded dataset to use
|
|
||||||
|
|
||||||
preprocess_shards: # Optional[int] process dataset in N sequential chunks for memory efficiency (exclusive with `shards`)
|
|
||||||
|
|
||||||
name: # Optional[str] name of dataset configuration to load
|
|
||||||
split: train # Optional[str] name of dataset split to load from
|
|
||||||
revision: # Optional[str] The specific revision of the dataset to use when loading from the Hugging Face Hub. This can be a commit hash, tag, or branch name. If not specified, the latest version will be used. This parameter is ignored for local datasets.
|
|
||||||
trust_remote_code: # Optional[bool] Trust remote code for untrusted source
|
|
||||||
|
|
||||||
# Custom user instruction prompt
|
|
||||||
- path: repo
|
|
||||||
type:
|
|
||||||
# The below are defaults. only set what's needed if you use a different column name.
|
|
||||||
system_prompt: ""
|
|
||||||
system_format: "{system}"
|
|
||||||
field_system: system
|
|
||||||
field_instruction: instruction
|
|
||||||
field_input: input
|
|
||||||
field_output: output
|
|
||||||
|
|
||||||
# Customizable to be single line or multi-line
|
|
||||||
# Use {instruction}/{input} as key to be replaced
|
|
||||||
# 'format' can include {input}
|
|
||||||
format: |-
|
|
||||||
User: {instruction} {input}
|
|
||||||
Assistant:
|
|
||||||
# 'no_input_format' cannot include {input}
|
|
||||||
no_input_format: "{instruction} "
|
|
||||||
|
|
||||||
# For `completion` datsets only, uses the provided field instead of `text` column
|
|
||||||
field:
|
|
||||||
|
|
||||||
# Using chat template
|
|
||||||
- path: ...
|
|
||||||
# Set type to `chat_template` to use this strategy
|
|
||||||
type: chat_template
|
|
||||||
# Specify the name of the chat template to use
|
|
||||||
# The name of the chat template to use for training, following values are supported:
|
|
||||||
# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default.
|
|
||||||
# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py
|
|
||||||
# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to if the tokenizer does not have a chat template else default to tokenizer. E.g. tokenizer_default_fallback_chatml.
|
|
||||||
# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.
|
|
||||||
chat_template: tokenizer_default
|
|
||||||
|
|
||||||
# Custom jinja chat template. Used only if `chat_template: jinja` or empty.
|
|
||||||
chat_template_jinja:
|
|
||||||
|
|
||||||
# Key containing the messages (default: "messages")
|
|
||||||
field_messages: messages
|
|
||||||
|
|
||||||
# Key containing the system message (default: "system")
|
|
||||||
# If the system message is not present in the dataset sample, it will be loaded from the field_system property.
|
|
||||||
field_system: system
|
|
||||||
|
|
||||||
# Mapping of properties from the input dataset to the chat template.
|
|
||||||
# (default: message_property_mappings={'role':'role', 'content':'content'})
|
|
||||||
# If a property exists in the template but not in this mapping, the system will attempt
|
|
||||||
# to load it directly from the message using the property name as the key.
|
|
||||||
# Example: In the mapping below, 'from' is loaded from input dataset and used as 'role',
|
|
||||||
# while 'value' is loaded and used as 'content' in the chat template.
|
|
||||||
message_property_mappings:
|
|
||||||
role: from
|
|
||||||
content: value
|
|
||||||
# ...
|
|
||||||
|
|
||||||
# Optional[Dict[str, List]]. Roles mapping in the messages.
|
|
||||||
# The format is {target_role: [source_roles]}. All source roles will be mapped to the target role.
|
|
||||||
# The default is:
|
|
||||||
roles:
|
|
||||||
user: ["human", "user"]
|
|
||||||
assistant: ["gpt", "assistant"]
|
|
||||||
system: ["system"]
|
|
||||||
tool: ["tool"]
|
|
||||||
|
|
||||||
# Optional[bool]. Whether to drop the system turn from the dataset. Only works with chat_template.
|
|
||||||
# This does not drop the default system message from chat_template if it exists. If you wish to,
|
|
||||||
# we recommend using a custom jinja template with the default system message removed or
|
|
||||||
# adding a system turn with empty content.
|
|
||||||
drop_system_message:
|
|
||||||
|
|
||||||
# Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags
|
|
||||||
# See example at `docs/dataset-formats/conversation.qmd`
|
|
||||||
split_thinking:
|
|
||||||
|
|
||||||
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
|
||||||
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
|
||||||
# See examples at `docs/dataset-formats/conversation.qmd`
|
|
||||||
# Note: If the below 5 fields are empty, defaults to training only on the last message.
|
|
||||||
|
|
||||||
# Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.
|
|
||||||
roles_to_train: ["assistant"] # default
|
|
||||||
# Optional[str]. Which EOS tokens to train on in the conversation. Possible values are:
|
|
||||||
# - all: train on all EOS tokens
|
|
||||||
# - turn (default): train on the EOS token at the end of each trainable turn
|
|
||||||
# - last: train on the last EOS token in the conversation
|
|
||||||
# TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
|
||||||
train_on_eos: turn
|
|
||||||
# Optional[str]. Which EOT (End-of-Turn) tokens to train on in the conversation. Possible values are:
|
|
||||||
# - all: train on all EOT tokens
|
|
||||||
# - turn: train on the EOT token at the end of each trainable turn
|
|
||||||
# - last: train on the last EOT token in the conversation
|
|
||||||
# If not specified, defaults to the value of train_on_eos for backward compatibility.
|
|
||||||
train_on_eot:
|
|
||||||
# The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.
|
|
||||||
message_field_training: training
|
|
||||||
# The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.
|
|
||||||
# The value of the key is a List[Dict] containing `begin_offset` (start character index in content), `end_offset` (end character index in content), and `train` (boolean whether to train).
|
|
||||||
message_field_training_detail: train_detail
|
|
||||||
|
|
||||||
|
|
||||||
# If false, the datasets will not be shuffled and will keep their original order in `datasets`.
|
|
||||||
# The same applies to the `test_datasets` option and the `pretraining_dataset` option. Default is true.
|
|
||||||
shuffle_merged_datasets: true
|
|
||||||
|
|
||||||
Deduplicates datasets and test_datasets with identical entries.
|
|
||||||
dataset_exact_deduplication: true
|
|
||||||
|
|
||||||
# A list of one or more datasets to eval the model with.
|
|
||||||
# You can use either test_datasets, or val_set_size, but not both.
|
|
||||||
test_datasets:
|
|
||||||
- path: /workspace/data/eval.jsonl
|
|
||||||
ds_type: json
|
|
||||||
# You need to specify a split. For "json" datasets the default split is called "train".
|
|
||||||
split: train
|
|
||||||
type: completion
|
|
||||||
data_files:
|
|
||||||
- /workspace/data/eval.jsonl
|
|
||||||
|
|
||||||
# use RL training: 'dpo', 'ipo', 'kto', 'simpo', 'orpo', 'grpo'
|
|
||||||
rl:
|
|
||||||
rl_beta: # Optional[float]. The beta parameter for the RL training.
|
|
||||||
|
|
||||||
# dpo
|
|
||||||
dpo_use_weighting: # Optional[bool]. Whether to perform weighting.
|
|
||||||
rpo_alpha: # Optional[float]. Weighting of NLL term in loss from RPO paper.
|
|
||||||
|
|
||||||
# orpo
|
|
||||||
orpo_alpha: 0.1 # Parameter controlling the relative ratio loss weight in the ORPO loss. Passed to `beta` in `ORPOConfig` due to trl mapping.
|
|
||||||
|
|
||||||
# kto
|
|
||||||
kto_desirable_weight: # Optional[float]. Factor for desirable loss term in KTO loss.
|
|
||||||
kto_undesirable_weight: # Optional[float]. Factor for undesirable loss term in KTO loss.
|
|
||||||
|
|
||||||
# simpo
|
|
||||||
cpo_alpha: 1.0 # Weight of the BC regularizer
|
|
||||||
simpo_gamma: 0.5 # Target reward margin for the SimPO loss
|
|
||||||
|
|
||||||
# grpo
|
|
||||||
trl:
|
|
||||||
use_vllm: # Optional[bool]. Whether to use VLLM for RL training.
|
|
||||||
vllm_server_host: # Optional[str]. Host of the vLLM server to connect to.
|
|
||||||
vllm_server_port: # Optional[int]. Port of the vLLM server to connect to.
|
|
||||||
vllm_server_timeout: # Optional[int]. Total timeout (in seconds) to wait for the vLLM server to respond.
|
|
||||||
vllm_guided_decoding_regex: # Optional[str]. Regex for vLLM guided decoding.
|
|
||||||
|
|
||||||
beta: # Optional[float]. Beta parameter for the RL training. Same as `rl_beta`. Use
|
|
||||||
max_completion_length: # Optional[int]. Maximum length of the completion for RL training.
|
|
||||||
|
|
||||||
reward_funcs: # Optional[list[str]]. List of reward functions to load. Paths must be importable from current dir.
|
|
||||||
reward_weights: # Optional[list[float]]. List of reward weights for the reward functions.
|
|
||||||
|
|
||||||
num_generations: # Optional[int]. Number of generations to sample.
|
|
||||||
log_completions: # Optional[bool]. Whether to log completions.
|
|
||||||
|
|
||||||
sync_ref_model: # Optional[bool]. Whether to sync the reference model.
|
|
||||||
ref_model_mixup_alpha: # Optional[float]. Mixup alpha for the reference model.
|
|
||||||
ref_model_sync_steps: # Optional[int]. Sync steps for the reference model.
|
|
||||||
|
|
||||||
|
|
||||||
# reward modelling: `True` or `False`
|
|
||||||
reward_model:
|
|
||||||
|
|
||||||
# process reward modelling: `True` or `False`
|
|
||||||
process_reward_model:
|
|
||||||
|
|
||||||
# The name of the chat template to use for training, following values are supported:
|
|
||||||
# - tokenizer_default: Uses the chat template that is available in the tokenizer_config.json. If the chat template is not available in the tokenizer, it will raise an error. This is the default value.
|
|
||||||
# - alpaca/inst/chatml/gemma/cohere/llama3/phi_3/deepseek_v2/jamba: These chat templates are available in the axolotl codebase at src/axolotl/utils/chat_templates.py
|
|
||||||
# - tokenizer_default_fallback_*: where * is the name of the chat template to fallback to. E.g. tokenizer_default_fallback_chatml. This is useful when the chat template is not available in the tokenizer.
|
|
||||||
# - jinja: Uses a custom jinja template for the chat template. The custom jinja template should be provided in the chat_template_jinja field.
|
|
||||||
# The selected chat template will be saved to the tokenizer_config.json for easier inferencing
|
|
||||||
# Note: It is recommended to set train_on_inputs to true when using a chat template that is different from the model's default chat template.
|
|
||||||
chat_template: tokenizer_default
|
|
||||||
# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.
|
|
||||||
chat_template_jinja: null
|
|
||||||
# Optional[List[str]]. Custom EOT (End-of-Turn) tokens to mask/unmask during training.
|
|
||||||
# These tokens mark the boundaries between conversation turns.
|
|
||||||
# For example: ["/INST", "</s>", "[/SYSTEM_PROMPT]"]
|
|
||||||
# If not specified, defaults to just the model's eos_token.
|
|
||||||
# This is useful for templates that use multiple delimiter tokens.
|
|
||||||
eot_tokens:
|
|
||||||
# - "</s>"
|
|
||||||
# - "[/INST]"
|
|
||||||
# - "[/SYSTEM_PROMPT]"
|
|
||||||
# Changes the default system message
|
|
||||||
default_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.
|
|
||||||
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
|
||||||
# subsequent training attempts load faster, relative path
|
|
||||||
dataset_prepared_path: data/last_run_prepared
|
|
||||||
# Push prepared dataset to hub
|
|
||||||
push_dataset_to_hub: # Optional[str] repo_org/repo_name
|
|
||||||
# The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`
|
|
||||||
# if not set.
|
|
||||||
dataset_processes: # defaults to os.cpu_count() if not set
|
|
||||||
# Keep dataset in memory while preprocessing
|
|
||||||
# Only needed if cached dataset is taking too much storage
|
|
||||||
dataset_keep_in_memory:
|
|
||||||
# push checkpoints to hub
|
|
||||||
hub_model_id: # private repo path to push finetuned model
|
|
||||||
# how to push checkpoints to hub
|
|
||||||
# https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy
|
|
||||||
hub_strategy:
|
|
||||||
# Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
|
||||||
# Required to be true when used in combination with `push_dataset_to_hub`
|
|
||||||
hf_use_auth_token: # boolean
|
|
||||||
# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.
|
|
||||||
val_set_size: 0.04
|
|
||||||
# Num shards for whole dataset
|
|
||||||
dataset_shard_num:
|
|
||||||
# Index of shard to use for whole dataset
|
|
||||||
dataset_shard_idx:
|
|
||||||
|
|
||||||
# The maximum length of an input to train with, this should typically be less than 2048
|
|
||||||
# as most models have a token/context limit of 2048
|
|
||||||
sequence_len: 2048
|
|
||||||
# Pad inputs so each step uses constant sized buffers
|
|
||||||
# This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently
|
|
||||||
pad_to_sequence_len:
|
|
||||||
# Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'
|
|
||||||
sample_packing:
|
|
||||||
# Set to 'false' if getting errors during eval with sample_packing on.
|
|
||||||
eval_sample_packing:
|
|
||||||
# You can set these packing optimizations AFTER starting a training at least once.
|
|
||||||
# The trainer will provide recommended values for these values.
|
|
||||||
sample_packing_eff_est:
|
|
||||||
total_num_tokens:
|
|
||||||
# Increasing the following values helps with packing, but usually only slightly (<%1.)
|
|
||||||
# The number of samples packed at a time.
|
|
||||||
sample_packing_group_size: 100000
|
|
||||||
# The number of samples which can be packed into one sequence. Increase if using a large sequence_len with many short samples.
|
|
||||||
sample_packing_bin_size: 200
|
|
||||||
sample_pack_sequentially: # Optional[bool]. Whether to pack samples sequentially.
|
|
||||||
|
|
||||||
# whether to concatenate samples during pretraining
|
|
||||||
pretraining_sample_concatenation:
|
|
||||||
|
|
||||||
curriculum_sampling: # Optional[bool]. Whether to use sequential sampling for curriculum learning
|
|
||||||
|
|
||||||
# Use batch flattening for speedups when not using sample_packing
|
|
||||||
batch_flattening:
|
|
||||||
|
|
||||||
# Passed through to transformers when loading the model when launched without accelerate
|
|
||||||
# Use `sequential` when training w/ model parallelism to limit memory
|
|
||||||
device_map:
|
|
||||||
# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.
|
|
||||||
max_memory:
|
|
||||||
|
|
||||||
# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
|
||||||
adapter: lora
|
|
||||||
# If you already have a lora model trained that you want to load, put that here.
|
|
||||||
# This means after training, if you want to test the model, you should set this to the value of `output_dir`.
|
|
||||||
# Note that if you merge an adapter to the base model, a new subdirectory `merged` will be created under the `output_dir`.
|
|
||||||
lora_model_dir:
|
|
||||||
|
|
||||||
# LoRA hyperparameters
|
|
||||||
# For more details about the following options, see:
|
|
||||||
# https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
# - k_proj
|
|
||||||
# - o_proj
|
|
||||||
# - gate_proj
|
|
||||||
# - down_proj
|
|
||||||
# - up_proj
|
|
||||||
lora_target_linear: # If true, will target all linear modules
|
|
||||||
|
|
||||||
# List[int] | int. # The layer indices to transform, otherwise, apply to all layers
|
|
||||||
# https://huggingface.co/docs/peft/v0.15.0/en/package_reference/lora#peft.LoraConfig.layers_to_transform
|
|
||||||
peft_layers_to_transform:
|
|
||||||
|
|
||||||
# Optional[bool]. Whether to use DoRA.
|
|
||||||
# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#weight-decomposed-low-rank-adaptation-dora
|
|
||||||
peft_use_dora:
|
|
||||||
|
|
||||||
# Optional[bool]. Whether to use RSLoRA.
|
|
||||||
# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#rank-stabilized-lora
|
|
||||||
peft_use_rslora:
|
|
||||||
|
|
||||||
# Optional[list[tuple[int, int]]]. List of layer indices to replicate.
|
|
||||||
# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#memory-efficient-layer-replication-with-lora
|
|
||||||
peft_layer_replication:
|
|
||||||
|
|
||||||
# bool | Literal["gaussian", "eva", "olora", "pissa", "pissa_niter_[number of iters]", "corda", "loftq"]
|
|
||||||
# How to initialize LoRA weights. Default to True which is MS original implementation.
|
|
||||||
# https://huggingface.co/docs/peft/v0.15.0/en/developer_guides/lora#initialization
|
|
||||||
peft_init_lora_weights:
|
|
||||||
|
|
||||||
# If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.
|
|
||||||
# For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.
|
|
||||||
# `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
|
|
||||||
# https://github.com/huggingface/peft/issues/334#issuecomment-1561727994
|
|
||||||
lora_modules_to_save:
|
|
||||||
# - embed_tokens
|
|
||||||
# - lm_head
|
|
||||||
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
|
|
||||||
# Apply custom LoRA autograd functions and activation function Triton kernels for
|
|
||||||
# speed and memory savings
|
|
||||||
# See: https://docs.axolotl.ai/docs/lora_optims.html
|
|
||||||
lora_mlp_kernel: true
|
|
||||||
lora_qkv_kernel: true
|
|
||||||
lora_o_kernel: true
|
|
||||||
|
|
||||||
# LoRA+ hyperparameters
|
|
||||||
# For more details about the following options, see:
|
|
||||||
# https://arxiv.org/abs/2402.12354 and `src/axolotl/core/train_builder.py`
|
|
||||||
loraplus_lr_ratio: # loraplus learning rate ratio lr_B / lr_A. Recommended value is 2^4.
|
|
||||||
loraplus_lr_embedding: # loraplus learning rate for lora embedding layers. Default value is 1e-6.
|
|
||||||
|
|
||||||
peft:
|
|
||||||
# Configuration options for loftq initialization for LoRA
|
|
||||||
# https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization
|
|
||||||
loftq_config:
|
|
||||||
loftq_bits: # typically 4 bits
|
|
||||||
|
|
||||||
# ReLoRA configuration
|
|
||||||
# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed
|
|
||||||
relora_steps: # Number of steps per ReLoRA restart
|
|
||||||
relora_warmup_steps: # Number of per-restart warmup steps
|
|
||||||
relora_anneal_steps: # Number of anneal steps for each relora cycle
|
|
||||||
relora_prune_ratio: # threshold for optimizer magnitude when pruning
|
|
||||||
relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings
|
|
||||||
|
|
||||||
# wandb configuration if you're using it
|
|
||||||
# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.
|
|
||||||
wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
|
|
||||||
wandb_project: # Your wandb project name
|
|
||||||
wandb_entity: # A wandb Team name if using a Team
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name: # Set the name of your wandb run
|
|
||||||
wandb_run_id: # Set the ID of your wandb run
|
|
||||||
wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
|
||||||
|
|
||||||
# mlflow configuration if you're using it
|
|
||||||
mlflow_tracking_uri: # URI to mlflow
|
|
||||||
mlflow_experiment_name: # Your experiment name
|
|
||||||
mlflow_run_name: # Your run name
|
|
||||||
hf_mlflow_log_artifacts: # set to true to copy each saved checkpoint on each save to mlflow artifact registry
|
|
||||||
|
|
||||||
# Comet configuration if you're using it
|
|
||||||
# Make sure your `COMET_API_KEY` environment variable is set (recommended) or you login to Comet with `comet login`.
|
|
||||||
# Check out our documentation for more details https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment-Creation/#comet_ml.start
|
|
||||||
use_comet: # Enable or disable Comet integration.
|
|
||||||
comet_api_key: # API key for Comet. Recommended to set via `comet login`.
|
|
||||||
comet_workspace: # Workspace name in Comet. Defaults to the user's default workspace.
|
|
||||||
comet_project_name: # Project name in Comet. Defaults to Uncategorized.
|
|
||||||
comet_experiment_key: # Identifier for the experiment. Used to append data to an existing experiment or control the key of new experiments. Default to a random key.
|
|
||||||
comet_mode: # Create a new experiment ("create") or log to an existing one ("get"). Default ("get_or_create") auto-selects based on configuration.
|
|
||||||
comet_online: # Set to True to log data to Comet server, or False for offline storage. Default is True.
|
|
||||||
comet_experiment_config: # Dictionary for additional configuration settings, see the doc for more details.
|
|
||||||
|
|
||||||
# Tensorboard
|
|
||||||
use_tensorboard: # Optional[bool]
|
|
||||||
|
|
||||||
# Where to save the full-finetuned model to
|
|
||||||
output_dir: ./completed-model
|
|
||||||
|
|
||||||
# Whether to use torch.compile and which backend to use
|
|
||||||
# setting to `auto` will enable torch compile when torch>=2.5.1
|
|
||||||
torch_compile: # Optional[Union[Literal["auto"], bool]]
|
|
||||||
torch_compile_backend: # Optional[str]
|
|
||||||
|
|
||||||
# Training hyperparameters
|
|
||||||
|
|
||||||
# If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
# The number of samples to include in each batch. This is the number of samples sent to each GPU.
|
|
||||||
# Batch size per gpu = micro_batch_size * gradient_accumulation_steps
|
|
||||||
micro_batch_size: 2
|
|
||||||
eval_batch_size:
|
|
||||||
num_epochs: 4
|
|
||||||
warmup_steps: 100 # cannot use with warmup_ratio
|
|
||||||
warmup_ratio: 0.05 # cannot use with warmup_steps
|
|
||||||
learning_rate: 0.00003
|
|
||||||
lr_quadratic_warmup:
|
|
||||||
logging_steps:
|
|
||||||
eval_steps: # Leave empty to eval at each epoch, integer for every N steps. float for fraction of total steps
|
|
||||||
evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps
|
|
||||||
eval_strategy: # Set to `"no"` to skip evaluation, `"epoch"` at end of each epoch, leave empty to infer from `eval_steps`.
|
|
||||||
save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of each epoch, `"best"` when better result is achieved, leave empty to infer from `save_steps`.
|
|
||||||
save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps
|
|
||||||
saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps
|
|
||||||
save_total_limit: # Checkpoints saved at a time
|
|
||||||
save_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints.
|
|
||||||
# Maximum number of iterations to train for. It precedes num_epochs which means that
|
|
||||||
# if both are set, num_epochs will not be guaranteed.
|
|
||||||
# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps
|
|
||||||
max_steps:
|
|
||||||
|
|
||||||
# bool of whether to include tokens trainer per second in the training metrics. This iterates over the entire dataset once, so it takes some time.
|
|
||||||
include_tokens_per_second: # Optional[bool]
|
|
||||||
|
|
||||||
# whether to find batch size that fits in memory. Passed to underlying transformers Trainer
|
|
||||||
auto_find_batch_size: # Optional[bool]
|
|
||||||
|
|
||||||
eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
|
||||||
eval_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
|
||||||
do_causal_lm_eval: # Whether to run causal language model evaluation for metrics in `eval_causal_lm_metrics`.
|
|
||||||
eval_causal_lm_metrics: # HF evaluate metrics used during evaluation. Default is ["sacrebleu", "comet", "ter", "chrf", "perplexity"]
|
|
||||||
|
|
||||||
profiler_steps: # enable the pytorch profiler to capture the first N steps of training to the output_dir.
|
|
||||||
# see https://pytorch.org/blog/understanding-gpu-memory-1/ for more information
|
|
||||||
# snapshots can be visualized @ https://pytorch.org/memory_viz
|
|
||||||
|
|
||||||
loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)
|
|
||||||
loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)
|
|
||||||
|
|
||||||
# Save model as safetensors (require safetensors package)
|
|
||||||
save_safetensors:
|
|
||||||
|
|
||||||
# Whether to mask out or include the human's prompt from the training labels
|
|
||||||
train_on_inputs: false
|
|
||||||
# Group similarly sized data to minimize padding.
|
|
||||||
# May be slower to start, as it must download and sort the entire dataset.
|
|
||||||
# Note that training loss may have an oscillating pattern with this enabled.
|
|
||||||
group_by_length: false
|
|
||||||
|
|
||||||
# Whether to use gradient checkpointing. Available options are: true, false, "offload", "offload_disk".
|
|
||||||
# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
|
||||||
gradient_checkpointing: false
|
|
||||||
# additional kwargs to pass to the trainer for gradient checkpointing
|
|
||||||
# gradient_checkpointing_kwargs:
|
|
||||||
# use_reentrant: true
|
|
||||||
|
|
||||||
# Stop training after this many evaluation losses have increased in a row
|
|
||||||
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
|
||||||
early_stopping_patience: 3
|
|
||||||
|
|
||||||
# Specify a scheduler and kwargs to use with the optimizer
|
|
||||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
|
|
||||||
lr_scheduler_kwargs:
|
|
||||||
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
|
||||||
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
|
||||||
|
|
||||||
# For one_cycle optim
|
|
||||||
lr_div_factor: # Learning rate div factor
|
|
||||||
|
|
||||||
# Specify optimizer
|
|
||||||
# Valid values are driven by the Transformers OptimizerNames class, see:
|
|
||||||
# https://github.com/huggingface/transformers/blob/cbf924b76c03828101a34069a96d209314114fd5/src/transformers/training_args.py#L144-L189
|
|
||||||
#
|
|
||||||
# Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of
|
|
||||||
# torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used
|
|
||||||
# in the examples/ for your model and fine-tuning use case.
|
|
||||||
#
|
|
||||||
# Valid values for 'optimizer' include:
|
|
||||||
# - adamw_torch
|
|
||||||
# - adamw_torch_fused
|
|
||||||
# - adamw_torch_xla
|
|
||||||
# - adamw_torch_npu_fused
|
|
||||||
# - adamw_apex_fused
|
|
||||||
# - adopt_adamw (an EXPERIMENTAL optimizer, only for torch version >= 2.5.1)
|
|
||||||
# - adafactor
|
|
||||||
# - adamw_anyprecision
|
|
||||||
# - adamw_torch_4bit
|
|
||||||
# - ademamix
|
|
||||||
# - sgd
|
|
||||||
# - adagrad
|
|
||||||
# - adamw_bnb_8bit
|
|
||||||
# - adamw_8bit # alias for adamw_bnb_8bit
|
|
||||||
# - ademamix_8bit
|
|
||||||
# - lion_8bit
|
|
||||||
# - lion_32bit
|
|
||||||
# - paged_adamw_32bit
|
|
||||||
# - paged_adamw_8bit
|
|
||||||
# - paged_ademamix_32bit
|
|
||||||
# - paged_ademamix_8bit
|
|
||||||
# - paged_lion_32bit
|
|
||||||
# - paged_lion_8bit
|
|
||||||
# - rmsprop
|
|
||||||
# - rmsprop_bnb
|
|
||||||
# - rmsprop_bnb_8bit
|
|
||||||
# - rmsprop_bnb_32bit
|
|
||||||
# - galore_adamw
|
|
||||||
# - galore_adamw_8bit
|
|
||||||
# - galore_adafactor
|
|
||||||
# - galore_adamw_layerwise
|
|
||||||
# - galore_adamw_8bit_layerwise
|
|
||||||
# - galore_adafactor_layerwise
|
|
||||||
# - lomo
|
|
||||||
# - adalomo
|
|
||||||
# - grokadamw
|
|
||||||
# - schedule_free_adamw
|
|
||||||
# - schedule_free_sgd
|
|
||||||
# - apollo_adamw
|
|
||||||
# - apollo_adamw_layerwise
|
|
||||||
#
|
|
||||||
# Additional custom optimizers include:
|
|
||||||
# - optimi_adamw
|
|
||||||
# - ao_adamw_8bit
|
|
||||||
# - ao_adamw_fp8
|
|
||||||
# - came_pytorch
|
|
||||||
optimizer:
|
|
||||||
# Dictionary of arguments to pass to the optimizer
|
|
||||||
optim_args:
|
|
||||||
# For Galore Optimizers the following optim_args are available
|
|
||||||
# rank: # type: int
|
|
||||||
# update_proj_gap # type: int
|
|
||||||
# scale # type: float
|
|
||||||
# proj_type: # type: str, default = std
|
|
||||||
|
|
||||||
# The target modules to optimize, i.e. the module names that you would like to train, right now this is used only for GaLore algorithm
|
|
||||||
optim_target_modules:
|
|
||||||
# - self_attn # for llama
|
|
||||||
# - mlp
|
|
||||||
|
|
||||||
# Specify weight decay
|
|
||||||
weight_decay:
|
|
||||||
# adamw hyperparams
|
|
||||||
adam_beta1:
|
|
||||||
adam_beta2:
|
|
||||||
adam_beta3: # only used for CAME Optimizer
|
|
||||||
adam_epsilon:
|
|
||||||
adam_epsilon2: # only used for CAME Optimizer
|
|
||||||
# Gradient clipping max norm
|
|
||||||
max_grad_norm:
|
|
||||||
|
|
||||||
# Augmentation techniques
|
|
||||||
# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings
|
|
||||||
# currently only supported on Llama and Mistral
|
|
||||||
neftune_noise_alpha:
|
|
||||||
|
|
||||||
# Optional[bool]. Whether to bettertransformers
|
|
||||||
flash_optimum:
|
|
||||||
|
|
||||||
# Note: Only one of the following attention patches can be used at a time.
|
|
||||||
# For example, if you set `xformers_attention` to `true`, do not set `flash_attention` to `true`.
|
|
||||||
|
|
||||||
# Optional[bool]. Whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
|
||||||
xformers_attention:
|
|
||||||
# Optional[bool]. Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:
|
|
||||||
flash_attention:
|
|
||||||
flash_attn_cross_entropy: # Optional[bool]. Whether to use flash-attention cross entropy implementation - advanced use only
|
|
||||||
flash_attn_rms_norm: # Optional[bool]. Whether to use flash-attention rms norm implementation - advanced use only
|
|
||||||
flash_attn_fuse_qkv: # Optional[bool]. Whether to fuse QKV into a single operation
|
|
||||||
flash_attn_fuse_mlp: # Optional[bool]. Whether to fuse part of the MLP into a single operation
|
|
||||||
# Optional[bool]. Whether to use scaled-dot-product attention
|
|
||||||
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
|
||||||
sdp_attention:
|
|
||||||
# Optional[bool]. Shifted-sparse attention (only llama) - https://arxiv.org/pdf/2309.12307.pdf
|
|
||||||
s2_attention:
|
|
||||||
|
|
||||||
# Optional[bool]. Whether to use low_cpu_mem_usage
|
|
||||||
low_cpu_mem_usage:
|
|
||||||
# Optional[str]. Resume from a specific checkpoint dir
|
|
||||||
resume_from_checkpoint:
|
|
||||||
# Optional[bool]. If resume_from_checkpoint isn't set and you simply want it to start where it left off.
|
|
||||||
# Be careful with this being turned on between different models.
|
|
||||||
auto_resume_from_checkpoints: false
|
|
||||||
|
|
||||||
## Multimodal section
|
|
||||||
# int | tuple[int, int] | None . Size to resize images to, width x height.
|
|
||||||
# Will read from model/processor config if not set.
|
|
||||||
image_size:
|
|
||||||
# str. Algorithm to use for image resizing. "bilinear", "bicubic", "lanczos". Default is "bilinear".
|
|
||||||
image_resize_algorithm: 'bilinear'
|
|
||||||
## End of multimodal section
|
|
||||||
|
|
||||||
# Don't mess with this, it's here for accelerate and torchrun
|
|
||||||
local_rank:
|
|
||||||
|
|
||||||
# Add or change special tokens.
|
|
||||||
# If you add tokens here, you don't need to add them to the `tokens` list.
|
|
||||||
special_tokens:
|
|
||||||
# bos_token: "<s>"
|
|
||||||
# eos_token: "</s>"
|
|
||||||
# unk_token: "<unk>"
|
|
||||||
# pad_token: "[PAD]"
|
|
||||||
|
|
||||||
# Optional[list[str]]. Add extra tokens to the tokenizer.
|
|
||||||
tokens:
|
|
||||||
# - "<|startoftext|>"
|
|
||||||
# - "<|endoftext|>"
|
|
||||||
|
|
||||||
# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.
|
|
||||||
# Only works for tokens that are not part of the base vocab (aka are added_tokens).
|
|
||||||
# Can be checked if they exist in tokenizer.json added_tokens.
|
|
||||||
added_tokens_overrides: # Dict[int, str]
|
|
||||||
# 128041: "<|im_start|>"
|
|
||||||
# 128042: "<|im_end|>"
|
|
||||||
|
|
||||||
# FSDP
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
|
|
||||||
# Deepspeed config path. e.g., deepspeed_configs/zero3.json
|
|
||||||
deepspeed:
|
|
||||||
|
|
||||||
# Advanced DDP Arguments
|
|
||||||
ddp_timeout:
|
|
||||||
ddp_bucket_cap_mb:
|
|
||||||
ddp_broadcast_buffers:
|
|
||||||
|
|
||||||
# Sequence parallelism
|
|
||||||
# Set to a divisor of the number of GPUs available to split sequences into chunks of equal size.
|
|
||||||
# Use in long context training to prevent OOM when sequences cannot fit into a single GPU's VRAM.
|
|
||||||
# E.g., if 4 GPUs are available, set this value to 2 to split each sequence into two equal-sized
|
|
||||||
# subsequences, or set to 4 to split into four equal-sized subsequences.
|
|
||||||
# See https://docs.axolotl.ai/docs/sequence_parallelism.html for more details.
|
|
||||||
sequence_parallel_degree:
|
|
||||||
# Optional; strides across the key dimension. Larger values use more memory but should make training faster.
|
|
||||||
# Must evenly divide the number of KV heads in your model.
|
|
||||||
heads_k_stride: 1
|
|
||||||
# One of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to "varlen_llama3"
|
|
||||||
# in the sample packing case, and "batch_ring" in the non-sample packing case.
|
|
||||||
ring_attn_func:
|
|
||||||
|
|
||||||
# Path to torch distx for optim 'adamw_anyprecision'
|
|
||||||
torchdistx_path:
|
|
||||||
|
|
||||||
# Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize
|
|
||||||
pretraining_dataset:
|
|
||||||
|
|
||||||
# Debug mode
|
|
||||||
debug:
|
|
||||||
|
|
||||||
# Seed
|
|
||||||
seed:
|
|
||||||
|
|
||||||
# Allow overwrite yml config using from cli
|
|
||||||
strict:
|
|
||||||
```
|
|
||||||
@@ -12,7 +12,7 @@ Chat Template strategy uses a jinja2 template that converts a list of messages i
|
|||||||
{"conversations": [{"role": "...", "content": "..."}]}
|
{"conversations": [{"role": "...", "content": "..."}]}
|
||||||
```
|
```
|
||||||
|
|
||||||
See [configs](../config.qmd) for full configs and supported templates.
|
See [configs](../config-reference.qmd) for full configs and supported templates.
|
||||||
|
|
||||||
### Migrating from sharegpt
|
### Migrating from sharegpt
|
||||||
|
|
||||||
@@ -52,7 +52,9 @@ We recommend checking the below examples for other usecases.
|
|||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
1. (Legacy) Using the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.
|
#### Training on last message
|
||||||
|
|
||||||
|
(Legacy) Using the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
datasets:
|
datasets:
|
||||||
@@ -66,7 +68,9 @@ datasets:
|
|||||||
If you receive an error like "`chat_template` choice is `tokenizer_default` but tokenizer's `chat_template` is null.", it means the tokenizer does not have a default `chat_template`. Follow the examples below instead to set a custom `chat_template`.
|
If you receive an error like "`chat_template` choice is `tokenizer_default` but tokenizer's `chat_template` is null.", it means the tokenizer does not have a default `chat_template`. Follow the examples below instead to set a custom `chat_template`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
2. Using the `gemma` chat template to override the tokenizer_config.json's chat template on OpenAI messages format, training on all assistant messages.
|
#### Overriding default chat template
|
||||||
|
|
||||||
|
Using the `gemma` chat template to override the tokenizer_config.json's chat template on OpenAI messages format, training on all assistant messages.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
chat_template: gemma # this overwrites the tokenizer's chat_template
|
chat_template: gemma # this overwrites the tokenizer's chat_template
|
||||||
@@ -76,7 +80,13 @@ datasets:
|
|||||||
roles_to_train: ["assistant"] # default value
|
roles_to_train: ["assistant"] # default value
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Using the tokenizer_config.json's chat template or `chatml` as fallback if the former's chat template does not exist, on OpenAI messages format, training on all assistant messages.
|
::: {.callout-note}
|
||||||
|
If you want to use built-in chat_template, use `chat_template: tokenizer_default` (this is set by default).
|
||||||
|
:::
|
||||||
|
|
||||||
|
#### Using default chat template with fallback
|
||||||
|
|
||||||
|
Using the tokenizer_config.json's chat template or `chatml` as fallback if the former's chat template does not exist, on OpenAI messages format, training on all assistant messages.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
chat_template: tokenizer_default_fallback_chatml # this overwrites the tokenizer's chat_template
|
chat_template: tokenizer_default_fallback_chatml # this overwrites the tokenizer's chat_template
|
||||||
@@ -85,7 +95,9 @@ datasets:
|
|||||||
type: chat_template
|
type: chat_template
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Using a custom jinja template on OpenAI messages format, training on all assistant messages.
|
#### Custom Jinja template
|
||||||
|
|
||||||
|
Using a custom jinja template on OpenAI messages format, training on all assistant messages.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# chat_template: jinja # `jinja` will be implied if the `chat_template_jinja` is set and this field is empty
|
# chat_template: jinja # `jinja` will be implied if the `chat_template_jinja` is set and this field is empty
|
||||||
@@ -100,7 +112,9 @@ datasets:
|
|||||||
Please make sure that your `tokenizer.eos_token` is same as EOS (End-of-Sequence) token in template. Otherwise, set `eos_token` under `special_tokens: `.
|
Please make sure that your `tokenizer.eos_token` is same as EOS (End-of-Sequence) token in template. Otherwise, set `eos_token` under `special_tokens: `.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
5. If you are using a template that has a different EOT (End-of-Turn) token from EOS token or multiple EOT tokens (like Mistral V7 Tekken), set the `eot_tokens: ` config. The handling of EOT tokens follows `train_on_eos: ` which defaults to turn.
|
#### Using template with different token for EOT and EOS
|
||||||
|
|
||||||
|
- If you are using a template that has a different EOT (End-of-Turn) token from EOS token or multiple EOT tokens (like Mistral V7 Tekken), set the `eot_tokens: ` config. The handling of EOT tokens follows `train_on_eos: ` which defaults to turn.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
eot_tokens:
|
eot_tokens:
|
||||||
@@ -116,16 +130,16 @@ datasets:
|
|||||||
```
|
```
|
||||||
|
|
||||||
::: {.callout-tip}
|
::: {.callout-tip}
|
||||||
See [config documentation](../config.qmd) for detailed explanations of "turn", "last", and "all" options for training on tokens.
|
See [config documentation](../config-reference.qmd) for detailed explanations of "turn", "last", and "all" options for training on tokens.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
::: {.callout-note}
|
::: {.callout-note}
|
||||||
Using `eot_tokens` requires each token that exists in `chat_template` to be a single token in the tokenizer. Otherwise, the tokenizer will split the token and cause unexpected behavior.
|
Using `eot_tokens` requires each token that exists in `chat_template` to be a single token in the tokenizer. Otherwise, the tokenizer will split the token and cause unexpected behavior.
|
||||||
|
|
||||||
You can add those tokens as new tokens under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `. See [config](../config.qmd) for more details.
|
You can add those tokens as new tokens under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `. See [config](../config-reference.qmd) for more details.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
6. Continuing from the previous example, if you want to train on all EOT token trainable turns but only last EOS token, set `train_on_eos: last`.
|
- Continuing from the previous example, if you want to train on all EOT token trainable turns but only last EOS token, set `train_on_eos: last`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
eot_tokens:
|
eot_tokens:
|
||||||
@@ -145,7 +159,73 @@ If EOS token only appears at the end of a prompt, `train_on_eos: last` is equiva
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
||||||
7. (Advanced) Using fine-grained control over tokens and turns to train in a conversation
|
#### Using tool use
|
||||||
|
|
||||||
|
Instead of passing `tools` via the system prompt, an alternative method would be to have the `tools` in a separate column and loaded via `chat_template` to let the template dynamically build it.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"type": "...",
|
||||||
|
"function": {
|
||||||
|
"name": "...",
|
||||||
|
"description": "...",
|
||||||
|
"parameters": {
|
||||||
|
"type": "...",
|
||||||
|
"properties": {
|
||||||
|
// ...
|
||||||
|
},
|
||||||
|
"required": ["..."],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"messages": [
|
||||||
|
// ...
|
||||||
|
{
|
||||||
|
"role": "assistant", // call the function via assistant
|
||||||
|
"tool_calls": [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "...",
|
||||||
|
"arguments": {
|
||||||
|
"...": "...",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "tool",
|
||||||
|
"name": "...",
|
||||||
|
"content": "..."
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
Tools need to follow [JSON schema](https://json-schema.org/learn/getting-started-step-by-step).
|
||||||
|
:::
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
chat_template: llama4
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
# field_tools: tools # default is `tools`
|
||||||
|
```
|
||||||
|
|
||||||
|
::: {.callout-tip}
|
||||||
|
Look into the `chat_template` you are using to see if it supports `tools` and what the expected role is for the tool answer. In the example above, the tool answer is expected to be in the `tool` or `ipython` role for `llama4` template.
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
|
#### Using fine-grained control over token masking
|
||||||
|
|
||||||
|
(Advanced) Using fine-grained control over tokens and turns to train in a conversation
|
||||||
|
|
||||||
For a data sample that looks like:
|
For a data sample that looks like:
|
||||||
|
|
||||||
@@ -196,7 +276,9 @@ datasets:
|
|||||||
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
8. (For Qwen3 template only) Enable reasoning split, where the reasoning is split from the content and passed as a separate field into the template.
|
#### Reasoning split
|
||||||
|
|
||||||
|
(For Qwen3 template only) Enable reasoning split, where the reasoning is split from the content and passed as a separate field into the template.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
datasets:
|
datasets:
|
||||||
|
|||||||
@@ -36,10 +36,6 @@ It is typically recommended to save your dataset as `.jsonl` due to its flexibil
|
|||||||
|
|
||||||
Axolotl supports loading from a Hugging Face hub repo or from local files.
|
Axolotl supports loading from a Hugging Face hub repo or from local files.
|
||||||
|
|
||||||
::: {.callout-important}
|
|
||||||
For pre-training only, Axolotl would split texts if it exceeds the context length into multiple smaller prompts.
|
|
||||||
:::
|
|
||||||
|
|
||||||
### Pre-training from Hugging Face hub datasets
|
### Pre-training from Hugging Face hub datasets
|
||||||
|
|
||||||
As an example, to train using a Hugging Face dataset `hf_org/name`, you can pass the following config:
|
As an example, to train using a Hugging Face dataset `hf_org/name`, you can pass the following config:
|
||||||
@@ -77,18 +73,21 @@ datasets:
|
|||||||
type: completion
|
type: completion
|
||||||
```
|
```
|
||||||
|
|
||||||
From local files (either example works):
|
From local files:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
datasets:
|
datasets:
|
||||||
- path: A.jsonl
|
- path: A.jsonl
|
||||||
type: completion
|
type: completion
|
||||||
|
|
||||||
- path: json
|
- path: B.jsonl
|
||||||
data_files: ["A.jsonl", "B.jsonl", "C.jsonl"]
|
|
||||||
type: completion
|
type: completion
|
||||||
```
|
```
|
||||||
|
|
||||||
|
::: {.callout-important}
|
||||||
|
For `completion` only, Axolotl would split texts if it exceeds the context length into multiple smaller prompts. If you are interested in having this for `pretraining_dataset` too, please let us know or help make a PR!
|
||||||
|
:::
|
||||||
|
|
||||||
### Pre-training dataset configuration tips
|
### Pre-training dataset configuration tips
|
||||||
|
|
||||||
#### Setting max_steps
|
#### Setting max_steps
|
||||||
|
|||||||
@@ -186,4 +186,4 @@ datasets:
|
|||||||
no_input_format: "[INST] {instruction} [/INST]"
|
no_input_format: "[INST] {instruction} [/INST]"
|
||||||
```
|
```
|
||||||
|
|
||||||
See full config options under [here](../config.qmd).
|
See full config options under [here](../config-reference.qmd).
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ This matches the API of [`datasets.load_dataset`](https://github.com/huggingface
|
|||||||
|
|
||||||
For HuggingFace's guide to load different dataset types, see [here](https://huggingface.co/docs/datasets/loading).
|
For HuggingFace's guide to load different dataset types, see [here](https://huggingface.co/docs/datasets/loading).
|
||||||
|
|
||||||
For full details on the config, see [config.qmd](config.qmd).
|
For full details on the config, see [config-reference.qmd](config-reference.qmd).
|
||||||
|
|
||||||
::: {.callout-note}
|
::: {.callout-note}
|
||||||
|
|
||||||
@@ -54,7 +54,7 @@ datasets:
|
|||||||
|
|
||||||
#### Files
|
#### Files
|
||||||
|
|
||||||
Usually, to load a JSON file, you would do something like this:
|
To load a JSON file, you would do something like this:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
@@ -66,20 +66,12 @@ Which translates to the following config:
|
|||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
datasets:
|
datasets:
|
||||||
- path: json
|
- path: data.json
|
||||||
data_files: /path/to/your/file.jsonl
|
|
||||||
```
|
|
||||||
|
|
||||||
However, to make things easier, we have added a few shortcuts for loading local dataset files.
|
|
||||||
|
|
||||||
You can just point the `path` to the file or directory along with the `ds_type` to load the dataset. The below example shows for a JSON file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
datasets:
|
|
||||||
- path: /path/to/your/file.jsonl
|
|
||||||
ds_type: json
|
ds_type: json
|
||||||
```
|
```
|
||||||
|
|
||||||
|
In the example above, it can be seen that we can just point the `path` to the file or directory along with the `ds_type` to load the dataset.
|
||||||
|
|
||||||
This works for CSV, JSON, Parquet, and Arrow files.
|
This works for CSV, JSON, Parquet, and Arrow files.
|
||||||
|
|
||||||
::: {.callout-tip}
|
::: {.callout-tip}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ format:
|
|||||||
This section describes the different Docker images that are released by AxolotlAI at [Docker Hub](https://hub.docker.com/u/axolotlai).
|
This section describes the different Docker images that are released by AxolotlAI at [Docker Hub](https://hub.docker.com/u/axolotlai).
|
||||||
|
|
||||||
::: {.callout-important}
|
::: {.callout-important}
|
||||||
For Blackwell GPUs, please use the tags with Pytorch 2.7.0 and CUDA 12.8.
|
For Blackwell GPUs, please use the tags with Pytorch 2.7.1 and CUDA 12.8.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Base
|
## Base
|
||||||
@@ -32,11 +32,10 @@ main-base-py{python_version}-cu{cuda_version}-{pytorch_version}
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
- `main-base-py3.11-cu128-2.7.0`
|
- `main-base-py3.11-cu128-2.7.1`
|
||||||
- `main-base-py3.11-cu126-2.7.0`
|
- `main-base-py3.11-cu126-2.7.1`
|
||||||
- `main-base-py3.11-cu124-2.6.0`
|
- `main-base-py3.11-cu124-2.6.0`
|
||||||
- `main-base-py3.11-cu124-2.5.1`
|
- `main-base-py3.11-cu124-2.5.1`
|
||||||
- `main-base-py3.11-cu124-2.4.1`
|
|
||||||
|
|
||||||
## Main
|
## Main
|
||||||
|
|
||||||
@@ -77,12 +76,10 @@ Tags examples:
|
|||||||
- `main-py3.11-cu126-2.7.0`
|
- `main-py3.11-cu126-2.7.0`
|
||||||
- `main-py3.11-cu124-2.6.0`
|
- `main-py3.11-cu124-2.6.0`
|
||||||
- `main-py3.11-cu124-2.5.1`
|
- `main-py3.11-cu124-2.5.1`
|
||||||
- `main-py3.11-cu124-2.4.1`
|
|
||||||
- `main-latest`
|
- `main-latest`
|
||||||
- `main-20250303-py3.11-cu124-2.6.0`
|
- `main-20250303-py3.11-cu124-2.6.0`
|
||||||
- `main-20250303-py3.11-cu124-2.5.1`
|
- `main-20250303-py3.11-cu124-2.5.1`
|
||||||
- `main-20250303-py3.11-cu124-2.4.1`
|
- `0.9.2`
|
||||||
- `0.7.1`
|
|
||||||
|
|
||||||
## Cloud
|
## Cloud
|
||||||
|
|
||||||
|
|||||||
18
docs/faq.qmd
18
docs/faq.qmd
@@ -9,11 +9,11 @@ description: Frequently asked questions
|
|||||||
|
|
||||||
> A: Usually an issue with the GPUs communicating with each other. See the [NCCL doc](nccl.qmd)
|
> A: Usually an issue with the GPUs communicating with each other. See the [NCCL doc](nccl.qmd)
|
||||||
|
|
||||||
**Q: Exitcode -9**
|
**Q: exitcode: -9**
|
||||||
|
|
||||||
> A: This usually happens when you run out of system RAM.
|
> A: This usually happens when you run out of system RAM.
|
||||||
|
|
||||||
**Q: Exitcode -7 while using deepspeed**
|
**Q: exitcode: -7 while using deepspeed**
|
||||||
|
|
||||||
> A: Try upgrading deepspeed w: `pip install -U deepspeed`
|
> A: Try upgrading deepspeed w: `pip install -U deepspeed`
|
||||||
|
|
||||||
@@ -110,3 +110,17 @@ description: Frequently asked questions
|
|||||||
> A: If `eot_tokens: ` is not provided, the default behavior is the same as before. EOS tokens used to delimit turns are masked/unmasked depending on whether the turn is trainable.
|
> A: If `eot_tokens: ` is not provided, the default behavior is the same as before. EOS tokens used to delimit turns are masked/unmasked depending on whether the turn is trainable.
|
||||||
|
|
||||||
> Internally, `eot_tokens: tokenizer.eos_token` and `train_on_eot: train_on_eos` (which defaults to `turn`). This transition helps clarify the naming and behavior of EOT/EOS tokens.
|
> Internally, `eot_tokens: tokenizer.eos_token` and `train_on_eot: train_on_eos` (which defaults to `turn`). This transition helps clarify the naming and behavior of EOT/EOS tokens.
|
||||||
|
|
||||||
|
**Q: `Data processing error: CAS service error`**
|
||||||
|
|
||||||
|
> A: Try disabling XET with `export HF_HUB_DISABLE_XET=1`
|
||||||
|
|
||||||
|
**Q: `torch._inductor.exc.LoweringException: NoValidChoicesError: No choices to select, please consider adding ATEN into max_autotune_gemm_backends config (defined in torch/_inductor/config.py) to allow at least one choice. `**
|
||||||
|
|
||||||
|
> A: Depending on the version of torch, you may need to include this in your YAML:
|
||||||
|
|
||||||
|
> ```yaml
|
||||||
|
> flex_attn_compile_kwargs:
|
||||||
|
> dynamic: false
|
||||||
|
> mode: max-autotune-no-cudagraphs
|
||||||
|
> ```
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ output_dir: ./outputs/lora-out
|
|||||||
- To perform QLoRA finetuning, replace with `load_in_4bit: true` and `adapter: qlora`.
|
- To perform QLoRA finetuning, replace with `load_in_4bit: true` and `adapter: qlora`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
See our [Config options](config.qmd) for more details.
|
See our [config options](config-reference.qmd) for more details.
|
||||||
|
|
||||||
### Training {#sec-training}
|
### Training {#sec-training}
|
||||||
|
|
||||||
@@ -179,8 +179,8 @@ Now that you have the basics, you might want to:
|
|||||||
|
|
||||||
Check our other guides for details on these topics:
|
Check our other guides for details on these topics:
|
||||||
|
|
||||||
- [Configuration Guide](config.qmd) - Full configuration options
|
- [Configuration Guide](config-reference.qmd) - Full configuration options
|
||||||
- [Dataset Loading](dataset-loading.qmd) - Loading datasets from various sources
|
- [Dataset Loading](dataset_loading.qmd) - Loading datasets from various sources
|
||||||
- [Dataset Formats](dataset-formats) - Working with different data formats
|
- [Dataset Formats](dataset-formats) - Working with different data formats
|
||||||
- [Multi-GPU Training](multi-gpu.qmd)
|
- [Multi-GPU Training](multi-gpu.qmd)
|
||||||
- [Multi-Node Training](multi-node.qmd)
|
- [Multi-Node Training](multi-node.qmd)
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ This guide covers all the ways you can install and set up Axolotl for your envir
|
|||||||
## Requirements {#sec-requirements}
|
## Requirements {#sec-requirements}
|
||||||
|
|
||||||
- NVIDIA GPU (Ampere architecture or newer for `bf16` and Flash Attention) or AMD GPU
|
- NVIDIA GPU (Ampere architecture or newer for `bf16` and Flash Attention) or AMD GPU
|
||||||
- Python ≥3.10
|
- Python ≥3.11
|
||||||
- PyTorch ≥2.4.1
|
- PyTorch ≥2.5.1
|
||||||
|
|
||||||
## Installation Methods {#sec-installation-methods}
|
## Installation Methods {#sec-installation-methods}
|
||||||
|
|
||||||
@@ -41,6 +41,40 @@ installed) in order not to clobber it, and so that we set the correct version of
|
|||||||
dependencies that are specific to the PyTorch version or other installed
|
dependencies that are specific to the PyTorch version or other installed
|
||||||
co-dependencies.
|
co-dependencies.
|
||||||
|
|
||||||
|
### uv Installation {#sec-uv}
|
||||||
|
|
||||||
|
uv is a fast, reliable Python package installer and resolver built in Rust. It offers significant performance improvements over pip and provides better dependency resolution, making it an excellent choice for complex environments.
|
||||||
|
|
||||||
|
Install uv if not already installed
|
||||||
|
```{.bash}
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
source $HOME/.local/bin/env
|
||||||
|
```
|
||||||
|
|
||||||
|
Choose your CUDA version to use with PyTorch; e.g. `cu124`, `cu126`, `cu128`,
|
||||||
|
then create the venv and activate
|
||||||
|
```{.bash}
|
||||||
|
export UV_TORCH_BACKEND=cu126
|
||||||
|
uv venv --no-project --relocatable
|
||||||
|
source .venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
Install PyTorch
|
||||||
|
- PyTorch 2.6.0 recommended
|
||||||
|
```{.bash}
|
||||||
|
uv pip install packaging setuptools wheel
|
||||||
|
uv pip install torch==2.6.0
|
||||||
|
uv pip install awscli pydantic
|
||||||
|
```
|
||||||
|
|
||||||
|
Install axolotl from PyPi
|
||||||
|
```{.bash}
|
||||||
|
uv pip install --no-build-isolation axolotl[deepspeed,flash-attn]
|
||||||
|
|
||||||
|
# optionally install with vLLM if you're using torch==2.6.0 and want to train w/ GRPO
|
||||||
|
uv pip install --no-build-isolation axolotl[deepspeed,flash-attn,vllm]
|
||||||
|
```
|
||||||
|
|
||||||
### Edge/Development Build {#sec-edge-build}
|
### Edge/Development Build {#sec-edge-build}
|
||||||
|
|
||||||
For the latest features between releases:
|
For the latest features between releases:
|
||||||
@@ -119,7 +153,7 @@ We recommend using WSL2 (Windows Subsystem for Linux) or Docker.
|
|||||||
|
|
||||||
### Conda/Pip venv {#sec-conda}
|
### Conda/Pip venv {#sec-conda}
|
||||||
|
|
||||||
1. Install Python ≥3.10
|
1. Install Python ≥3.11
|
||||||
2. Install PyTorch: https://pytorch.org/get-started/locally/
|
2. Install PyTorch: https://pytorch.org/get-started/locally/
|
||||||
3. Install Axolotl:
|
3. Install Axolotl:
|
||||||
```{.bash}
|
```{.bash}
|
||||||
|
|||||||
@@ -84,6 +84,10 @@ lora_qkv_kernel: true
|
|||||||
lora_o_kernel: true
|
lora_o_kernel: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
Currently, LoRA kernels are not supported for RLHF training, only SFT.
|
||||||
|
:::
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- One or more NVIDIA or AMD GPUs (in order to use the Triton kernels)
|
- One or more NVIDIA or AMD GPUs (in order to use the Triton kernels)
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ datasets:
|
|||||||
# leave the vision model and vision tower frozen
|
# leave the vision model and vision tower frozen
|
||||||
# load_in_8bit: true
|
# load_in_8bit: true
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
# (optional) if you want to resize images to a set size
|
# (optional) if you want to resize images to a set size
|
||||||
image_size: 512
|
image_size: 512
|
||||||
|
|||||||
32
docs/qat.qmd
Normal file
32
docs/qat.qmd
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
title: "Quantization Aware Training (QAT)"
|
||||||
|
back-to-top-navigation: true
|
||||||
|
toc: true
|
||||||
|
toc-expand: 2
|
||||||
|
toc-depth: 4
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
[Quantization Aware Training](https://pytorch.org/blog/introduction-to-quantization-on-pytorch/#quantization-aware-training) (QAT) is a technique for improving the accuracy of models which are quantized
|
||||||
|
by applying "fake" quantizations to the model's weights (and optionally, activations) during training. This fake
|
||||||
|
quantization allows for the model to adjust for noise introduced by the quantization, so when the model is eventually
|
||||||
|
quantized, the accuracy loss is minimized. We use the quantization techniques implemented in [torchao](https://github.com/pytorch/ao) to provide
|
||||||
|
support for QAT and post-training quantization (PTQ) in axolotl.
|
||||||
|
|
||||||
|
We recommend reviewing the excellent QAT tutorial in the [torchtune library](https://pytorch.org/torchtune/main/tutorials/qat_finetune.html#quantizing-the-qat-model),
|
||||||
|
and the QAT documentation in the [torchao library](https://github.com/pytorch/ao/tree/main/torchao/quantization/qat), for more details.
|
||||||
|
|
||||||
|
## Configuring QAT in Axolotl
|
||||||
|
|
||||||
|
To enable QAT in axolotl, add the following to your configuration file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
qat:
|
||||||
|
activation_dtype: # Optional[str] = "int8". Fake quantization layout to use for activation quantization. Valid options are "int4" and "int8"
|
||||||
|
weight_dtype: # Optional[str] = "int8". Fake quantization layout to use for weight quantization. Valid options are "int4" and "int8"
|
||||||
|
group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization
|
||||||
|
fake_quant_after_n_steps: # Optional[int] = None. The number of steps to apply fake quantization after
|
||||||
|
```
|
||||||
|
|
||||||
|
Once you have finished training, you must quantize your model by using the same quantization configuration which you used to train the model with. You can use the [`quantize`](./quantize.qmd) command to do this.
|
||||||
53
docs/quantize.qmd
Normal file
53
docs/quantize.qmd
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
title: "Quantization with torchao"
|
||||||
|
back-to-top-navigation: true
|
||||||
|
toc: true
|
||||||
|
toc-expand: 2
|
||||||
|
toc-depth: 4
|
||||||
|
---
|
||||||
|
|
||||||
|
Quantization is a technique to lower the memory footprint of your model, potentially at the cost of accuracy or model performance. We support quantizing your model using the [torchao](https://github.com/pytorch/ao) library. Quantization is supported for both post-training quantization (PTQ) and quantization-aware training (QAT).
|
||||||
|
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
|
||||||
|
We do not currently support quantization techniques such as GGUF/GPTQ,EXL2 at the moment.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Configuring Quantization in Axolotl
|
||||||
|
|
||||||
|
Quantization is configured using the `quantization` key in your configuration file.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
base_model: # The path to the model to quantize.
|
||||||
|
quantization:
|
||||||
|
weight_dtype: # Optional[str] = "int8". Fake quantization layout to use for weight quantization. Valid options are uintX for X in [1, 2, 3, 4, 5, 6, 7], or int4, or int8
|
||||||
|
activation_dtype: # Optional[str] = "int8". Fake quantization layout to use for activation quantization. Valid options are "int4" and "int8"
|
||||||
|
group_size: # Optional[int] = 32. The number of elements in each group for per-group fake quantization
|
||||||
|
quantize_embedding: # Optional[bool] = False. Whether to quantize the embedding layer.
|
||||||
|
|
||||||
|
output_dir: # The path to the output directory.
|
||||||
|
```
|
||||||
|
|
||||||
|
Once quantization is complete, your quantized model will be saved in the `{output_dir}/quantized` directory.
|
||||||
|
|
||||||
|
You may also use the `quantize` command to quantize a model which has been trained with [QAT](./qat.qmd) - you can do this by using the existing QAT configuration file which
|
||||||
|
you used to train the model:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# qat.yml
|
||||||
|
qat:
|
||||||
|
activation_dtype: int8
|
||||||
|
weight_dtype: int8
|
||||||
|
group_size: 256
|
||||||
|
quantize_embedding: true
|
||||||
|
|
||||||
|
output_dir: # The path to the output directory used during training where the final checkpoint has been saved.
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
axolotl quantize qat.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures that an identical quantization configuration is used to quantize the model as was used to train it.
|
||||||
@@ -16,7 +16,8 @@ feedback. Various methods include, but not limited to:
|
|||||||
- [Identity Preference Optimization (IPO)](#ipo)
|
- [Identity Preference Optimization (IPO)](#ipo)
|
||||||
- [Kahneman-Tversky Optimization (KTO)](#kto)
|
- [Kahneman-Tversky Optimization (KTO)](#kto)
|
||||||
- [Odds Ratio Preference Optimization (ORPO)](#orpo)
|
- [Odds Ratio Preference Optimization (ORPO)](#orpo)
|
||||||
- Proximal Policy Optimization (PPO) (not yet supported in axolotl)
|
- [Group Relative Policy Optimization (GRPO)](#grpo)
|
||||||
|
- Proximal Policy Optimization (PPO) (not yet supported in axolotl, if you're interested in contributing, please reach out!)
|
||||||
|
|
||||||
|
|
||||||
## RLHF using Axolotl
|
## RLHF using Axolotl
|
||||||
@@ -499,7 +500,7 @@ The input format is a simple JSON input with customizable fields based on the ab
|
|||||||
### GRPO
|
### GRPO
|
||||||
|
|
||||||
::: {.callout-tip}
|
::: {.callout-tip}
|
||||||
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/axolotl-cookbook/tree/main/grpo#training-an-r1-style-large-language-model-using-grpo).
|
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/grpo_code).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
In the latest GRPO implementation, `vLLM` is used to significantly speedup trajectory generation during training. In this example, we're using 4 GPUs - 2 for training, and 2 for vLLM:
|
In the latest GRPO implementation, `vLLM` is used to significantly speedup trajectory generation during training. In this example, we're using 4 GPUs - 2 for training, and 2 for vLLM:
|
||||||
@@ -582,7 +583,20 @@ datasets:
|
|||||||
|
|
||||||
To see other examples of custom reward functions, please see [TRL GRPO Docs](https://github.com/huggingface/trl/blob/main/docs/source/grpo_trainer.md#using-a-custom-reward-function).
|
To see other examples of custom reward functions, please see [TRL GRPO Docs](https://github.com/huggingface/trl/blob/main/docs/source/grpo_trainer.md#using-a-custom-reward-function).
|
||||||
|
|
||||||
To see description of the configs, please see [TRLConfig](https://github.com/axolotl-ai-cloud/axolotl/blob/main/src/axolotl/utils/config/models/input/v0_4_1/trl.py).
|
To see all configs, please see [TRLConfig](https://github.com/axolotl-ai-cloud/axolotl/blob/v0.9.2/src/axolotl/utils/schemas/trl.py).
|
||||||
|
|
||||||
|
#### GRPO with DAPO/Dr. GRPO loss
|
||||||
|
|
||||||
|
The DAPO paper and subsequently Dr. GRPO paper proposed an alternative loss function for GRPO to remediate the penalty in longer responses.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
trl:
|
||||||
|
loss_type: dr_grpo
|
||||||
|
# Normalizes loss based on max completion length (default: 256)
|
||||||
|
max_completion_length:
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, see [GRPO docs](https://huggingface.co/docs/trl/v0.17.0/en/grpo_trainer#loss-types).
|
||||||
|
|
||||||
### SimPO
|
### SimPO
|
||||||
|
|
||||||
|
|||||||
752
docs/scripts/generate_config_docs.py
Normal file
752
docs/scripts/generate_config_docs.py
Normal file
@@ -0,0 +1,752 @@
|
|||||||
|
# type: ignore
|
||||||
|
|
||||||
|
"""
|
||||||
|
Quarto documentation generation from Pydantic models. Uses Pydantic model source code
|
||||||
|
to automatically group fields, including inherited fields from parent classes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import inspect
|
||||||
|
import textwrap
|
||||||
|
import types
|
||||||
|
import typing
|
||||||
|
from typing import Any, FrozenSet, Type, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
|
||||||
|
class QuartoGenerator:
|
||||||
|
"""Generate Quarto documentation from Pydantic models."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._class_fields_cache = {}
|
||||||
|
self._inheritance_map_cache = {}
|
||||||
|
self._nested_models_cache = {}
|
||||||
|
|
||||||
|
def _get_direct_fields(self, cls: Type[BaseModel]) -> FrozenSet[str]:
|
||||||
|
"""Get fields defined directly in a single class (not inherited)."""
|
||||||
|
if cls in self._class_fields_cache:
|
||||||
|
return self._class_fields_cache[cls]
|
||||||
|
|
||||||
|
fields = set()
|
||||||
|
|
||||||
|
# Get annotated fields
|
||||||
|
if hasattr(cls, "__annotations__"):
|
||||||
|
fields.update(cls.__annotations__.keys())
|
||||||
|
|
||||||
|
# Filter out private/special methods
|
||||||
|
fields = {f for f in fields if not f.startswith("_")}
|
||||||
|
|
||||||
|
result = frozenset(fields)
|
||||||
|
self._class_fields_cache[cls] = result
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _is_pydantic_model(self, type_obj) -> bool:
|
||||||
|
"""Check if a type is a Pydantic BaseModel."""
|
||||||
|
return inspect.isclass(type_obj) and issubclass(type_obj, BaseModel)
|
||||||
|
|
||||||
|
# pylint: disable=too-many-return-statements
|
||||||
|
def _extract_nested_type(self, field_type) -> Any:
|
||||||
|
"""Extract the actual type from complex type annotations."""
|
||||||
|
# Handle Annotated types (Python 3.9+)
|
||||||
|
if hasattr(typing, "get_origin") and hasattr(typing, "get_args"):
|
||||||
|
origin = typing.get_origin(field_type)
|
||||||
|
args = typing.get_args(field_type)
|
||||||
|
|
||||||
|
if origin is not None:
|
||||||
|
# Handle Annotated[SomeType, ...] - extract the first argument
|
||||||
|
if hasattr(typing, "Annotated") and origin is typing.Annotated:
|
||||||
|
if args:
|
||||||
|
return self._extract_nested_type(
|
||||||
|
args[0]
|
||||||
|
) # Recursively process the actual type
|
||||||
|
|
||||||
|
# Handle list[SomeType], List[SomeType], etc.
|
||||||
|
elif origin in (list, typing.List):
|
||||||
|
if args:
|
||||||
|
return self._extract_nested_type(
|
||||||
|
args[0]
|
||||||
|
) # Extract element type
|
||||||
|
|
||||||
|
# Handle Union types (including | syntax)
|
||||||
|
elif origin is typing.Union:
|
||||||
|
# Get non-None types from the Union
|
||||||
|
non_none_types = [arg for arg in args if arg is not type(None)]
|
||||||
|
if len(non_none_types) >= 1:
|
||||||
|
# Prioritize Pydantic models over primitive types
|
||||||
|
pydantic_models = [
|
||||||
|
arg
|
||||||
|
for arg in non_none_types
|
||||||
|
if self._is_pydantic_model(arg)
|
||||||
|
]
|
||||||
|
if pydantic_models:
|
||||||
|
# Return the first Pydantic model found
|
||||||
|
return self._extract_nested_type(pydantic_models[0])
|
||||||
|
|
||||||
|
# No Pydantic models, return the first non-None type
|
||||||
|
return self._extract_nested_type(non_none_types[0])
|
||||||
|
|
||||||
|
# Handle new Python 3.10+ union syntax (PeftConfig | None)
|
||||||
|
if hasattr(field_type, "__class__") and field_type.__class__ is types.UnionType:
|
||||||
|
# Get non-None types from the Union
|
||||||
|
non_none_types = [
|
||||||
|
arg for arg in field_type.__args__ if arg is not type(None)
|
||||||
|
]
|
||||||
|
if len(non_none_types) >= 1:
|
||||||
|
# Prioritize Pydantic models over primitive types
|
||||||
|
pydantic_models = [
|
||||||
|
arg for arg in non_none_types if self._is_pydantic_model(arg)
|
||||||
|
]
|
||||||
|
if pydantic_models:
|
||||||
|
return self._extract_nested_type(pydantic_models[0])
|
||||||
|
return self._extract_nested_type(non_none_types[0])
|
||||||
|
|
||||||
|
# Handle old typing.Union syntax (fallback)
|
||||||
|
if hasattr(field_type, "__origin__"):
|
||||||
|
if field_type.__origin__ is Union:
|
||||||
|
# Get non-None types from the Union
|
||||||
|
non_none_types = [
|
||||||
|
arg for arg in field_type.__args__ if arg is not type(None)
|
||||||
|
]
|
||||||
|
if len(non_none_types) >= 1:
|
||||||
|
# Prioritize Pydantic models over primitive types
|
||||||
|
pydantic_models = [
|
||||||
|
arg for arg in non_none_types if self._is_pydantic_model(arg)
|
||||||
|
]
|
||||||
|
if pydantic_models:
|
||||||
|
return self._extract_nested_type(pydantic_models[0])
|
||||||
|
return self._extract_nested_type(non_none_types[0])
|
||||||
|
# Handle other generic types like dict[str, Any], etc.
|
||||||
|
elif hasattr(field_type, "__args__"):
|
||||||
|
return field_type
|
||||||
|
|
||||||
|
return field_type
|
||||||
|
|
||||||
|
# pylint: disable=too-many-return-statements
|
||||||
|
def _extract_all_pydantic_models_from_type(
|
||||||
|
self, field_type
|
||||||
|
) -> list[type[BaseModel]]:
|
||||||
|
"""Extract all Pydantic models from a type annotation, including from Unions."""
|
||||||
|
models = []
|
||||||
|
|
||||||
|
if field_type is None:
|
||||||
|
return models
|
||||||
|
|
||||||
|
# Handle Annotated types
|
||||||
|
if hasattr(typing, "get_origin") and hasattr(typing, "get_args"):
|
||||||
|
origin = typing.get_origin(field_type)
|
||||||
|
args = typing.get_args(field_type)
|
||||||
|
|
||||||
|
if origin is not None:
|
||||||
|
# Handle Annotated[SomeType, ...] - extract from the first argument
|
||||||
|
if hasattr(typing, "Annotated") and origin is typing.Annotated:
|
||||||
|
if args:
|
||||||
|
models.extend(
|
||||||
|
self._extract_all_pydantic_models_from_type(args[0])
|
||||||
|
)
|
||||||
|
return models
|
||||||
|
|
||||||
|
# Handle list[SomeType], List[SomeType], etc.
|
||||||
|
if origin in (list, typing.List):
|
||||||
|
if args:
|
||||||
|
models.extend(
|
||||||
|
self._extract_all_pydantic_models_from_type(args[0])
|
||||||
|
)
|
||||||
|
return models
|
||||||
|
|
||||||
|
# Handle Union types
|
||||||
|
if origin is typing.Union:
|
||||||
|
for arg in args:
|
||||||
|
if arg is not type(None): # Skip None type
|
||||||
|
models.extend(
|
||||||
|
self._extract_all_pydantic_models_from_type(arg)
|
||||||
|
)
|
||||||
|
return models
|
||||||
|
|
||||||
|
# Handle new Python 3.10+ union syntax
|
||||||
|
if hasattr(field_type, "__class__") and field_type.__class__ is types.UnionType:
|
||||||
|
for arg in field_type.__args__:
|
||||||
|
if arg is not type(None): # Skip None type
|
||||||
|
models.extend(self._extract_all_pydantic_models_from_type(arg))
|
||||||
|
return models
|
||||||
|
|
||||||
|
# Handle old typing.Union syntax (fallback)
|
||||||
|
if hasattr(field_type, "__origin__") and field_type.__origin__ is Union:
|
||||||
|
for arg in field_type.__args__:
|
||||||
|
if arg is not type(None): # Skip None type
|
||||||
|
models.extend(self._extract_all_pydantic_models_from_type(arg))
|
||||||
|
return models
|
||||||
|
|
||||||
|
# Check if this type itself is a Pydantic model
|
||||||
|
if self._is_pydantic_model(field_type):
|
||||||
|
models.append(field_type)
|
||||||
|
|
||||||
|
return models
|
||||||
|
|
||||||
|
def _get_nested_models(
|
||||||
|
self, model_class: type[BaseModel], visited=None
|
||||||
|
) -> dict[str, type[BaseModel]]:
|
||||||
|
"""Get all nested Pydantic models from a model class."""
|
||||||
|
if visited is None:
|
||||||
|
visited = set()
|
||||||
|
|
||||||
|
# Avoid infinite recursion
|
||||||
|
if model_class in visited:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if model_class in self._nested_models_cache:
|
||||||
|
return self._nested_models_cache[model_class]
|
||||||
|
|
||||||
|
visited.add(model_class)
|
||||||
|
nested_models = {}
|
||||||
|
|
||||||
|
# Check all fields in the model
|
||||||
|
for field_info in model_class.model_fields.values():
|
||||||
|
field_type = self._extract_nested_type(field_info.annotation)
|
||||||
|
|
||||||
|
if self._is_pydantic_model(field_type):
|
||||||
|
nested_models[field_type.__name__] = field_type
|
||||||
|
# Recursively get nested models from this nested model
|
||||||
|
deeper_nested = self._get_nested_models(field_type, visited.copy())
|
||||||
|
nested_models.update(deeper_nested)
|
||||||
|
|
||||||
|
self._nested_models_cache[model_class] = nested_models
|
||||||
|
return nested_models
|
||||||
|
|
||||||
|
def _build_inheritance_map(self, child_class: Type[BaseModel]):
|
||||||
|
"""Build inheritance map for a class and all its parents."""
|
||||||
|
if child_class in self._inheritance_map_cache:
|
||||||
|
return self._inheritance_map_cache[child_class]
|
||||||
|
|
||||||
|
inheritance_map = {}
|
||||||
|
|
||||||
|
# Get MRO and filter out BaseModel and object
|
||||||
|
mro_classes = [
|
||||||
|
cls
|
||||||
|
for cls in child_class.__mro__
|
||||||
|
if cls not in (BaseModel, object) and hasattr(cls, "__annotations__")
|
||||||
|
]
|
||||||
|
|
||||||
|
# Process each class in the MRO
|
||||||
|
for cls in mro_classes:
|
||||||
|
inheritance_map[cls] = self._get_direct_fields(cls)
|
||||||
|
|
||||||
|
self._inheritance_map_cache[child_class] = inheritance_map
|
||||||
|
return inheritance_map
|
||||||
|
|
||||||
|
def _wrap_comment(self, text: str, width: int = 88) -> list[str]:
|
||||||
|
"""Wrap a comment to specified width, accounting for '# ' prefix."""
|
||||||
|
if not text.strip():
|
||||||
|
return ["#"]
|
||||||
|
|
||||||
|
# Account for "# " prefix (2 characters)
|
||||||
|
content_width = width - 2
|
||||||
|
wrapped_lines = textwrap.wrap(text, width=content_width)
|
||||||
|
return [f"# {line}" for line in wrapped_lines]
|
||||||
|
|
||||||
|
def _extract_type_from_source(
|
||||||
|
self, model_class: type[BaseModel], field_name: str
|
||||||
|
) -> str:
|
||||||
|
"""Extract the actual type annotation text from source code, checking inheritance chain."""
|
||||||
|
# Use inheritance map to check classes efficiently
|
||||||
|
inheritance_map = self._build_inheritance_map(model_class)
|
||||||
|
|
||||||
|
# Check classes in MRO order
|
||||||
|
for cls in model_class.__mro__:
|
||||||
|
if cls in inheritance_map and field_name in inheritance_map[cls]:
|
||||||
|
type_annotation = self._get_type_from_class_source(cls, field_name)
|
||||||
|
if type_annotation != "unknown":
|
||||||
|
return type_annotation
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
def _get_type_from_class_source(self, class_obj: type, field_name: str) -> str:
|
||||||
|
"""Extract type annotation from a specific class's source code."""
|
||||||
|
try:
|
||||||
|
source = inspect.getsource(class_obj)
|
||||||
|
tree = ast.parse(source)
|
||||||
|
except (OSError, TypeError):
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
# Find the class definition
|
||||||
|
for node in tree.body:
|
||||||
|
if isinstance(node, ast.ClassDef) and node.name == class_obj.__name__:
|
||||||
|
# Find the field assignment
|
||||||
|
for body_node in node.body:
|
||||||
|
if isinstance(body_node, ast.AnnAssign) and isinstance(
|
||||||
|
body_node.target, ast.Name
|
||||||
|
):
|
||||||
|
if body_node.target.id == field_name and body_node.annotation:
|
||||||
|
return ast.unparse(body_node.annotation)
|
||||||
|
break
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
def _extract_field_groups_from_all_classes(
|
||||||
|
self, model_class: type[BaseModel]
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Extract field groups from all classes in the inheritance hierarchy."""
|
||||||
|
all_groups = []
|
||||||
|
inheritance_map = self._build_inheritance_map(model_class)
|
||||||
|
|
||||||
|
# Get all Pydantic base classes in MRO order (most specific first)
|
||||||
|
# This puts AxolotlInputConfig fields first, then parent class fields
|
||||||
|
pydantic_classes = [
|
||||||
|
cls
|
||||||
|
for cls in model_class.__mro__
|
||||||
|
if cls in inheritance_map and inheritance_map[cls]
|
||||||
|
]
|
||||||
|
|
||||||
|
# Extract groups from each class
|
||||||
|
for cls in pydantic_classes:
|
||||||
|
class_groups = self._extract_field_groups_from_source(cls)
|
||||||
|
for group in class_groups:
|
||||||
|
all_groups.append(group)
|
||||||
|
|
||||||
|
# If no groups found, create a default grouping by class
|
||||||
|
if not all_groups:
|
||||||
|
for cls in pydantic_classes:
|
||||||
|
fields_in_class = inheritance_map[cls]
|
||||||
|
if fields_in_class:
|
||||||
|
all_groups.append(
|
||||||
|
{
|
||||||
|
"fields": list(fields_in_class),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_groups
|
||||||
|
|
||||||
|
# pylint: disable=too-many-return-statements
|
||||||
|
def _extract_field_groups_from_source(
|
||||||
|
self, model_class: type[BaseModel]
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Extract field groups from source code based on blank lines and comments."""
|
||||||
|
try:
|
||||||
|
source = inspect.getsource(model_class)
|
||||||
|
tree = ast.parse(source)
|
||||||
|
except (OSError, TypeError):
|
||||||
|
# Fallback if we can't get source code
|
||||||
|
fields_in_class = self._get_direct_fields(model_class)
|
||||||
|
if fields_in_class:
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"fields": list(fields_in_class),
|
||||||
|
}
|
||||||
|
]
|
||||||
|
return []
|
||||||
|
|
||||||
|
groups = []
|
||||||
|
current_group_fields = []
|
||||||
|
current_group_comment = None
|
||||||
|
|
||||||
|
# Find the class definition
|
||||||
|
class_node = None
|
||||||
|
for node in ast.walk(tree):
|
||||||
|
if isinstance(node, ast.ClassDef) and node.name == model_class.__name__:
|
||||||
|
class_node = node
|
||||||
|
break
|
||||||
|
|
||||||
|
if not class_node:
|
||||||
|
fields_in_class = self._get_direct_fields(model_class)
|
||||||
|
if fields_in_class:
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"fields": list(fields_in_class),
|
||||||
|
}
|
||||||
|
]
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Parse the source lines to detect groupings
|
||||||
|
source_lines = source.split("\n")
|
||||||
|
|
||||||
|
# Get fields that are actually defined in this specific class
|
||||||
|
fields_in_class = self._get_direct_fields(model_class)
|
||||||
|
|
||||||
|
# Find assignments that correspond to model fields for THIS class only
|
||||||
|
field_assignments = []
|
||||||
|
for node in class_node.body:
|
||||||
|
if isinstance(node, ast.AnnAssign) and isinstance(node.target, ast.Name):
|
||||||
|
field_name = node.target.id
|
||||||
|
if field_name in fields_in_class:
|
||||||
|
field_assignments.append(
|
||||||
|
{
|
||||||
|
"name": field_name,
|
||||||
|
"lineno": node.lineno,
|
||||||
|
"end_lineno": getattr(node, "end_lineno", node.lineno),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if not field_assignments:
|
||||||
|
if fields_in_class:
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"fields": list(fields_in_class),
|
||||||
|
}
|
||||||
|
]
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Sort by line number
|
||||||
|
field_assignments.sort(key=lambda x: x["lineno"])
|
||||||
|
|
||||||
|
# Group fields based on blank lines and comments
|
||||||
|
for i, field_info in enumerate(field_assignments):
|
||||||
|
field_name = field_info["name"]
|
||||||
|
current_line = field_info["lineno"]
|
||||||
|
|
||||||
|
# Check if this starts a new group (blank line before or significant gap)
|
||||||
|
is_new_group = False
|
||||||
|
|
||||||
|
if i == 0:
|
||||||
|
is_new_group = True
|
||||||
|
else:
|
||||||
|
prev_end_line = field_assignments[i - 1]["end_lineno"]
|
||||||
|
|
||||||
|
# Check for blank lines or comments between fields
|
||||||
|
lines_between = source_lines[prev_end_line : current_line - 1]
|
||||||
|
has_blank_line = any(line.strip() == "" for line in lines_between)
|
||||||
|
has_comment = any(
|
||||||
|
line.strip().startswith("#") for line in lines_between
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start new group if there's a blank line or comment, or significant gap
|
||||||
|
if has_blank_line or has_comment or (current_line - prev_end_line > 3):
|
||||||
|
is_new_group = True
|
||||||
|
|
||||||
|
if is_new_group and current_group_fields:
|
||||||
|
# Save the previous group
|
||||||
|
groups.append(
|
||||||
|
{
|
||||||
|
"fields": current_group_fields.copy(),
|
||||||
|
"description": current_group_comment,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
current_group_fields = []
|
||||||
|
current_group_comment = None
|
||||||
|
|
||||||
|
current_group_fields.append(field_name)
|
||||||
|
|
||||||
|
# Add the final group
|
||||||
|
if current_group_fields:
|
||||||
|
groups.append(
|
||||||
|
{
|
||||||
|
"fields": current_group_fields,
|
||||||
|
"description": current_group_comment,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return groups
|
||||||
|
|
||||||
|
def _generate_field_documentation(
|
||||||
|
self,
|
||||||
|
model_class: type[BaseModel],
|
||||||
|
field_name: str,
|
||||||
|
field_info: dict,
|
||||||
|
field_type_str: str,
|
||||||
|
is_required: bool,
|
||||||
|
indent_level: int = 0,
|
||||||
|
visited_models: set = None,
|
||||||
|
) -> list[str]:
|
||||||
|
"""Generate documentation for a single field, expanding nested models inline."""
|
||||||
|
if visited_models is None:
|
||||||
|
visited_models = set()
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
indent = " " * indent_level
|
||||||
|
|
||||||
|
# Get the actual field type for nested model detection
|
||||||
|
if field_name in model_class.model_fields:
|
||||||
|
pydantic_field_info = model_class.model_fields[field_name]
|
||||||
|
actual_field_type = pydantic_field_info.annotation
|
||||||
|
else:
|
||||||
|
actual_field_type = None
|
||||||
|
|
||||||
|
# Add description comment if available
|
||||||
|
description = field_info.get("description", "")
|
||||||
|
if description:
|
||||||
|
wrapped_lines = self._wrap_comment(description, width=88 - len(indent))
|
||||||
|
for line in wrapped_lines:
|
||||||
|
lines.append(f"{indent}{line}")
|
||||||
|
|
||||||
|
# Extract nested Pydantic models from the type annotation
|
||||||
|
nested_models = self._extract_all_pydantic_models_from_type(actual_field_type)
|
||||||
|
|
||||||
|
# Filter out already visited models to prevent infinite recursion
|
||||||
|
expandable_models = [
|
||||||
|
model for model in nested_models if model not in visited_models
|
||||||
|
]
|
||||||
|
|
||||||
|
if expandable_models:
|
||||||
|
# This field contains Pydantic models that can be expanded
|
||||||
|
|
||||||
|
# Show the field with its full type annotation
|
||||||
|
field_line = f"{indent}{field_name}: {field_type_str}"
|
||||||
|
if field_info.get("default") is not None:
|
||||||
|
field_line += f" = {field_info['default']}"
|
||||||
|
if is_required:
|
||||||
|
field_line += " (required)"
|
||||||
|
lines.append(field_line)
|
||||||
|
|
||||||
|
# Add to visited to prevent infinite recursion
|
||||||
|
new_visited = visited_models.copy()
|
||||||
|
new_visited.update(expandable_models)
|
||||||
|
|
||||||
|
# Expand each nested Pydantic model
|
||||||
|
for i, nested_model in enumerate(expandable_models):
|
||||||
|
if i > 0:
|
||||||
|
lines.append("\n")
|
||||||
|
lines.append(f"{indent} # For {nested_model.__name__}:")
|
||||||
|
|
||||||
|
# Get nested model schema
|
||||||
|
try:
|
||||||
|
nested_schema = nested_model.model_json_schema()
|
||||||
|
nested_properties = nested_schema.get("properties", {})
|
||||||
|
nested_required = nested_schema.get("required", [])
|
||||||
|
except Exception: # pylint: disable=broad-exception-caught
|
||||||
|
# Fallback: use model fields directly
|
||||||
|
nested_properties = {}
|
||||||
|
nested_required = []
|
||||||
|
for (
|
||||||
|
nested_field_name,
|
||||||
|
nested_field_info,
|
||||||
|
) in nested_model.model_fields.items():
|
||||||
|
nested_description = ""
|
||||||
|
if (
|
||||||
|
hasattr(nested_field_info, "json_schema_extra")
|
||||||
|
and nested_field_info.json_schema_extra
|
||||||
|
):
|
||||||
|
nested_description = (
|
||||||
|
nested_field_info.json_schema_extra.get(
|
||||||
|
"description", ""
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
hasattr(nested_field_info, "description")
|
||||||
|
and nested_field_info.description
|
||||||
|
):
|
||||||
|
nested_description = nested_field_info.description
|
||||||
|
|
||||||
|
nested_default_val = None
|
||||||
|
if (
|
||||||
|
hasattr(nested_field_info, "default")
|
||||||
|
and nested_field_info.default is not None
|
||||||
|
):
|
||||||
|
if str(nested_field_info.default) != "PydanticUndefined":
|
||||||
|
nested_default_val = nested_field_info.default
|
||||||
|
|
||||||
|
nested_properties[nested_field_name] = {
|
||||||
|
"type": "unknown",
|
||||||
|
"description": nested_description,
|
||||||
|
"default": nested_default_val,
|
||||||
|
}
|
||||||
|
|
||||||
|
if nested_field_info.is_required():
|
||||||
|
nested_required.append(nested_field_name)
|
||||||
|
|
||||||
|
# Get field groups for the nested model
|
||||||
|
nested_field_groups = self._extract_field_groups_from_all_classes(
|
||||||
|
nested_model
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate nested fields with increased indentation
|
||||||
|
for i, group in enumerate(nested_field_groups):
|
||||||
|
if not group["fields"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add blank line between groups (except before first group)
|
||||||
|
if i > 0:
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Process nested fields
|
||||||
|
for nested_field_name in group["fields"]:
|
||||||
|
if nested_field_name not in nested_properties:
|
||||||
|
continue
|
||||||
|
|
||||||
|
nested_field_info = nested_properties[nested_field_name]
|
||||||
|
nested_field_type = self._extract_type_from_source(
|
||||||
|
nested_model, nested_field_name
|
||||||
|
)
|
||||||
|
nested_is_required = nested_field_name in nested_required
|
||||||
|
|
||||||
|
# Recursively generate documentation for nested field
|
||||||
|
nested_lines = self._generate_field_documentation(
|
||||||
|
nested_model,
|
||||||
|
nested_field_name,
|
||||||
|
nested_field_info,
|
||||||
|
nested_field_type,
|
||||||
|
nested_is_required,
|
||||||
|
indent_level + 1,
|
||||||
|
new_visited,
|
||||||
|
)
|
||||||
|
lines.extend(nested_lines)
|
||||||
|
else:
|
||||||
|
# Regular field (no expandable nested models)
|
||||||
|
field_line = f"{indent}{field_name}: {field_type_str}"
|
||||||
|
if field_info.get("default") is not None:
|
||||||
|
field_line += f" = {field_info['default']}"
|
||||||
|
if is_required:
|
||||||
|
field_line += " (required)"
|
||||||
|
lines.append(field_line)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def generate_qmd(
|
||||||
|
self,
|
||||||
|
model_class: type[BaseModel],
|
||||||
|
title: str | None = None,
|
||||||
|
expand_nested: bool = True,
|
||||||
|
) -> str:
|
||||||
|
"""Auto-generate config reference documentation including inherited fields."""
|
||||||
|
|
||||||
|
if title is None:
|
||||||
|
title = f"{model_class.__name__} Reference"
|
||||||
|
|
||||||
|
# Try to get JSON schema, with fallback for serialization issues
|
||||||
|
try:
|
||||||
|
schema = model_class.model_json_schema()
|
||||||
|
properties = schema.get("properties", {})
|
||||||
|
required = schema.get("required", [])
|
||||||
|
except Exception as e: # pylint: disable=broad-exception-caught
|
||||||
|
print(
|
||||||
|
f"Warning: Could not generate JSON schema ({e}). Using model fields instead."
|
||||||
|
)
|
||||||
|
# Fallback: use model fields directly
|
||||||
|
properties = {}
|
||||||
|
required = []
|
||||||
|
for field_name, field_info in model_class.model_fields.items():
|
||||||
|
# Extract description from json_schema_extra or field info
|
||||||
|
description = ""
|
||||||
|
if (
|
||||||
|
hasattr(field_info, "json_schema_extra")
|
||||||
|
and field_info.json_schema_extra
|
||||||
|
):
|
||||||
|
description = field_info.json_schema_extra.get("description", "")
|
||||||
|
elif hasattr(field_info, "description") and field_info.description:
|
||||||
|
description = field_info.description
|
||||||
|
|
||||||
|
# Get default value
|
||||||
|
default_val = None
|
||||||
|
if hasattr(field_info, "default") and field_info.default is not None:
|
||||||
|
# Handle special Pydantic default markers
|
||||||
|
if str(field_info.default) != "PydanticUndefined":
|
||||||
|
default_val = field_info.default
|
||||||
|
|
||||||
|
properties[field_name] = {
|
||||||
|
"type": "unknown",
|
||||||
|
"description": description,
|
||||||
|
"default": default_val,
|
||||||
|
}
|
||||||
|
|
||||||
|
if field_info.is_required():
|
||||||
|
required.append(field_name)
|
||||||
|
|
||||||
|
# Extract field groups from all classes in inheritance hierarchy
|
||||||
|
field_groups = self._extract_field_groups_from_all_classes(model_class)
|
||||||
|
|
||||||
|
# Start building QMD content
|
||||||
|
qmd_lines = [
|
||||||
|
"---",
|
||||||
|
f"title: {title}",
|
||||||
|
"description: A complete list of all configuration options.",
|
||||||
|
"---",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Generate one big code block with all fields (inline nested expansion)
|
||||||
|
qmd_lines.append("```yaml")
|
||||||
|
|
||||||
|
for i, group in enumerate(field_groups):
|
||||||
|
if not group["fields"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add blank line between groups (except before first group)
|
||||||
|
if i > 0:
|
||||||
|
qmd_lines.append("")
|
||||||
|
|
||||||
|
# Process fields in the order they appear in source
|
||||||
|
for field_name in group["fields"]:
|
||||||
|
if field_name not in properties:
|
||||||
|
continue
|
||||||
|
|
||||||
|
field_info = properties[field_name]
|
||||||
|
field_type = self._extract_type_from_source(model_class, field_name)
|
||||||
|
is_required = field_name in required
|
||||||
|
|
||||||
|
if expand_nested:
|
||||||
|
# Check if this field has nested models
|
||||||
|
if field_name in model_class.model_fields:
|
||||||
|
pydantic_field_info = model_class.model_fields[field_name]
|
||||||
|
nested_models = self._extract_all_pydantic_models_from_type(
|
||||||
|
pydantic_field_info.annotation
|
||||||
|
)
|
||||||
|
has_nested = bool(nested_models)
|
||||||
|
else:
|
||||||
|
has_nested = False
|
||||||
|
|
||||||
|
# Add blank line before nested config
|
||||||
|
if has_nested:
|
||||||
|
qmd_lines.append("")
|
||||||
|
|
||||||
|
# Use the new inline generation method
|
||||||
|
field_lines = self._generate_field_documentation(
|
||||||
|
model_class,
|
||||||
|
field_name,
|
||||||
|
field_info,
|
||||||
|
field_type,
|
||||||
|
is_required,
|
||||||
|
indent_level=0,
|
||||||
|
visited_models=set(),
|
||||||
|
)
|
||||||
|
qmd_lines.extend(field_lines)
|
||||||
|
|
||||||
|
# Add blank line after nested config
|
||||||
|
if has_nested:
|
||||||
|
qmd_lines.append("")
|
||||||
|
else:
|
||||||
|
# Original simple approach
|
||||||
|
description = field_info.get("description", "")
|
||||||
|
default = field_info.get("default")
|
||||||
|
|
||||||
|
# Add wrapped comment for description
|
||||||
|
if description:
|
||||||
|
wrapped_lines = self._wrap_comment(description)
|
||||||
|
qmd_lines.extend(wrapped_lines)
|
||||||
|
|
||||||
|
line = f"{field_name}: {field_type}"
|
||||||
|
if default is not None:
|
||||||
|
line += f" = {default}"
|
||||||
|
if is_required:
|
||||||
|
line += " (required)"
|
||||||
|
qmd_lines.append(line)
|
||||||
|
|
||||||
|
qmd_lines.append("```")
|
||||||
|
|
||||||
|
# Join all lines and clean up any double newlines
|
||||||
|
content = "\n".join(qmd_lines)
|
||||||
|
|
||||||
|
# Replace multiple consecutive newlines with just two newlines (one blank line)
|
||||||
|
import re
|
||||||
|
|
||||||
|
content = re.sub(r"\n{3,}", "\n\n", content)
|
||||||
|
|
||||||
|
# Ensure single newline at the very end
|
||||||
|
content = content.rstrip("\n") + "\n"
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
generator = QuartoGenerator()
|
||||||
|
|
||||||
|
print("Generating config reference content...")
|
||||||
|
qmd_content = generator.generate_qmd(AxolotlInputConfig, "Config Reference", True)
|
||||||
|
|
||||||
|
print("Writing to file...")
|
||||||
|
with open("docs/config-reference.qmd", "w", encoding="utf-8") as f:
|
||||||
|
f.write(qmd_content)
|
||||||
|
print("Done!")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -28,7 +28,7 @@ pad_to_sequence_len: true
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ pad_to_sequence_len: false
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ pad_to_sequence_len: false
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
79
examples/llama-3/3b-qat-fsdp2.yaml
Normal file
79
examples/llama-3/3b-qat-fsdp2.yaml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
base_model: meta-llama/Llama-3.2-3B
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
|
liger_rope: true
|
||||||
|
liger_rms_norm: true
|
||||||
|
liger_glu_activation: true
|
||||||
|
liger_layer_norm: true
|
||||||
|
liger_fused_linear_cross_entropy: true
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: yahma/alpaca-cleaned
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
output_dir: ./outputs/qat_out/
|
||||||
|
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
sequence_len: 512
|
||||||
|
|
||||||
|
flex_attention: true
|
||||||
|
flex_attn_compile_kwargs:
|
||||||
|
dynamic: false
|
||||||
|
mode: max-autotune-no-cudagraphs
|
||||||
|
|
||||||
|
qat:
|
||||||
|
activation_dtype: int8
|
||||||
|
weight_dtype: int4
|
||||||
|
group_size: 32
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 16
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
|
||||||
|
cosine_constant_lr_ratio: 0
|
||||||
|
cosine_min_lr_ratio: 1.0
|
||||||
|
learning_rate: 2e-5
|
||||||
|
save_only_model: true
|
||||||
|
bf16: true
|
||||||
|
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
|
||||||
|
evals_per_epoch: 1
|
||||||
|
saves_per_epoch: 1
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
- auto_wrap
|
||||||
|
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_version: 2
|
||||||
|
fsdp_offload_params: false
|
||||||
|
fsdp_cpu_ram_efficient_loading: true
|
||||||
|
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||||
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
fsdp_sharding_strategy: FULL_SHARD
|
||||||
|
fsdp_reshard_after_forward: true
|
||||||
|
fsdp_activation_checkpointing: true
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <|end_of_text|>
|
||||||
@@ -5,6 +5,10 @@ tokenizer_type: AutoTokenizer
|
|||||||
# Automatically upload checkpoint and final model to HF
|
# Automatically upload checkpoint and final model to HF
|
||||||
# hub_model_id: username/custom_model_name
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <|finetune_right_pad_id|>
|
||||||
|
eos_token: <|eot_id|>
|
||||||
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ base_model: NousResearch/Llama-3.2-1B
|
|||||||
datasets:
|
datasets:
|
||||||
- path: teknium/GPT4-LLM-Cleaned
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.1
|
val_set_size: 0.1
|
||||||
output_dir: ./outputs/lora-out
|
output_dir: ./outputs/lora-out
|
||||||
|
|
||||||
@@ -38,6 +38,7 @@ wandb_log_model:
|
|||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 2
|
||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
num_epochs: 1
|
num_epochs: 1
|
||||||
|
|
||||||
optimizer: adamw_8bit
|
optimizer: adamw_8bit
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0002
|
learning_rate: 0.0002
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ pad_to_sequence_len: false
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
71
examples/magistral/README.md
Normal file
71
examples/magistral/README.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Finetune Magistral Small with Axolotl
|
||||||
|
|
||||||
|
Magistral Small is a 24B parameter opensource model from MistralAI found on [HuggingFace](https://huggingface.co/mistralai/Magistral-Small-2506). This guide shows how to fine-tune it with Axolotl with multi-turn conversations with proper masking.
|
||||||
|
|
||||||
|
MistralAI has also released a proprietary medium-sized version called Magistral Medium.
|
||||||
|
|
||||||
|
Thanks to the team at MistralAI for giving us early access to prepare for this release.
|
||||||
|
|
||||||
|
## Getting started
|
||||||
|
|
||||||
|
1. Install Axolotl following the [installation guide](https://docs.axolotl.ai/docs/installation.html). You need to install from main as Magistral is only on nightly or use our latest [Docker images](https://docs.axolotl.ai/docs/docker.html).
|
||||||
|
|
||||||
|
Here is an example of how to install from main for pip:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ensure you have Pytorch installed (Pytorch 2.6.0 recommended)
|
||||||
|
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
|
cd axolotl
|
||||||
|
|
||||||
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
|
pip3 install --no-build-isolation -e '.[flash-attn,mistral]'
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Download the example config:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
axolotl fetch examples
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Run the finetuning example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
axolotl train examples/magistral/magistral-small-qlora.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
This config uses about 24GB VRAM.
|
||||||
|
|
||||||
|
Let us know how it goes. Happy finetuning! 🚀
|
||||||
|
|
||||||
|
### TIPS
|
||||||
|
|
||||||
|
- For inference, the official MistralAI team recommends `top_p: 0.95` and `temperature: 0.7` with `max_tokens: 40960`.
|
||||||
|
- You can run a full finetuning by removing the `adapter: qlora` and `load_in_4bit: true` from the config.
|
||||||
|
- Read more on how to load your own dataset at [docs](https://docs.axolotl.ai/docs/dataset_loading.html).
|
||||||
|
- The dataset format is the OpenAI Messages format as seen [here](https://docs.axolotl.ai/docs/dataset-formats/conversation.html#chat_template).
|
||||||
|
|
||||||
|
## Optimization Guides
|
||||||
|
|
||||||
|
- [Multi-GPU Training](https://docs.axolotl.ai/docs/multi-gpu.html)
|
||||||
|
- [Multi-Node Training](https://docs.axolotl.ai/docs/multi-node.html)
|
||||||
|
- [LoRA Optimizations](https://docs.axolotl.ai/docs/lora_optims.html)
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
We only support the `mistral-common` tokenizer for Supervised Fine-tuning at the moment and for `type: chat_template` only.
|
||||||
|
|
||||||
|
The tokenizer does not work with `dataset.map` with multiprocessing, so we had to disable it. In addition, we do not support overriding tokens yet.
|
||||||
|
|
||||||
|
## Related Resources
|
||||||
|
|
||||||
|
- [MistralAI Magistral Blog](https://mistral.ai/news/magistral/)
|
||||||
|
- [Axolotl Docs](https://docs.axolotl.ai)
|
||||||
|
- [Axolotl Website](https://axolotl.ai)
|
||||||
|
- [Axolotl GitHub](https://github.com/axolotl-ai-cloud/axolotl)
|
||||||
|
- [Axolotl Discord](https://discord.gg/7m9sfhzaf3)
|
||||||
|
|
||||||
|
|
||||||
|
## Future Work
|
||||||
|
|
||||||
|
- Add parity to Preference Tuning, RL, Multi-modal, etc.
|
||||||
|
- Add parity to other tokenizer configs like overriding tokens.
|
||||||
72
examples/magistral/magistral-small-fsdp-qlora.yaml
Normal file
72
examples/magistral/magistral-small-fsdp-qlora.yaml
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
base_model: mistralai/Magistral-Small-2506
|
||||||
|
|
||||||
|
# Enable to use mistral-common tokenizer
|
||||||
|
tokenizer_use_mistral_common: true
|
||||||
|
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: fozziethebeat/alpaca_messages_2k_test
|
||||||
|
type: chat_template
|
||||||
|
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.1
|
||||||
|
output_dir: ./outputs/lora-out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_target_modules:
|
||||||
|
- gate_proj
|
||||||
|
- down_proj
|
||||||
|
- up_proj
|
||||||
|
- q_proj
|
||||||
|
- v_proj
|
||||||
|
- k_proj
|
||||||
|
- o_proj
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
evals_per_epoch: 1
|
||||||
|
saves_per_epoch: 1
|
||||||
|
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
- auto_wrap
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
|
||||||
|
fsdp_activation_checkpointing: true
|
||||||
63
examples/magistral/magistral-small-qlora.yaml
Normal file
63
examples/magistral/magistral-small-qlora.yaml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
base_model: mistralai/Magistral-Small-2506
|
||||||
|
|
||||||
|
# Enable to use mistral-common tokenizer
|
||||||
|
tokenizer_use_mistral_common: true
|
||||||
|
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: fozziethebeat/alpaca_messages_2k_test
|
||||||
|
type: chat_template
|
||||||
|
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.1
|
||||||
|
output_dir: ./outputs/lora-out
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_target_modules:
|
||||||
|
- gate_proj
|
||||||
|
- down_proj
|
||||||
|
- up_proj
|
||||||
|
- q_proj
|
||||||
|
- v_proj
|
||||||
|
- k_proj
|
||||||
|
- o_proj
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
evals_per_epoch: 1
|
||||||
|
saves_per_epoch: 1
|
||||||
@@ -27,7 +27,7 @@ pad_to_sequence_len: false
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ pad_to_sequence_len: false
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'language_model.model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ pad_to_sequence_len: false
|
|||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules: 'model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
|
|||||||
78
examples/qwen3/8b-qat-fsdp2.yml
Normal file
78
examples/qwen3/8b-qat-fsdp2.yml
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
base_model: Qwen/Qwen3-8B
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
|
liger_rope: true
|
||||||
|
liger_rms_norm: true
|
||||||
|
liger_glu_activation: true
|
||||||
|
liger_layer_norm: true
|
||||||
|
liger_fused_linear_cross_entropy: true
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
|
||||||
|
output_dir: ./outputs/qat_out/
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
flex_attention: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
flex_attn_compile_kwargs:
|
||||||
|
dynamic: false
|
||||||
|
mode: max-autotune-no-cudagraphs
|
||||||
|
|
||||||
|
qat:
|
||||||
|
activation_dtype: int8
|
||||||
|
weight_dtype: int4
|
||||||
|
group_size: 256
|
||||||
|
fake_quant_after_n_steps: 1000
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 2
|
||||||
|
max_steps: 2000
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
bf16: true
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
|
||||||
|
evals_per_epoch: 1
|
||||||
|
saves_per_epoch: 1
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
- auto_wrap
|
||||||
|
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_version: 2
|
||||||
|
fsdp_offload_params: false
|
||||||
|
fsdp_cpu_ram_efficient_loading: true
|
||||||
|
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: Qwen3DecoderLayer
|
||||||
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
fsdp_sharding_strategy: FULL_SHARD
|
||||||
|
fsdp_reshard_after_forward: true
|
||||||
|
fsdp_activation_checkpointing: true
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
BIN
favicon.jpg
BIN
favicon.jpg
Binary file not shown.
|
Before Width: | Height: | Size: 4.5 KiB After Width: | Height: | Size: 4.7 KiB |
@@ -6,21 +6,20 @@ triton>=3.0.0
|
|||||||
mamba-ssm==1.2.0.post1
|
mamba-ssm==1.2.0.post1
|
||||||
xformers>=0.0.23.post1
|
xformers>=0.0.23.post1
|
||||||
autoawq==0.2.7.post3
|
autoawq==0.2.7.post3
|
||||||
liger-kernel==0.5.9
|
liger-kernel==0.5.10
|
||||||
# END section
|
# END section
|
||||||
|
|
||||||
packaging==23.2
|
packaging==23.2
|
||||||
|
|
||||||
huggingface_hub==0.31.0
|
huggingface_hub==0.32.2
|
||||||
peft==0.15.2
|
peft==0.15.2
|
||||||
transformers==4.51.3
|
transformers==4.52.4
|
||||||
tokenizers>=0.21.1
|
tokenizers>=0.21.1
|
||||||
accelerate==1.6.0
|
accelerate==1.7.0
|
||||||
datasets==3.5.1
|
datasets==3.6.0
|
||||||
deepspeed>=0.15.4
|
deepspeed>=0.17.0
|
||||||
trl==0.17.0
|
trl==0.18.2
|
||||||
hf_xet==1.1.0
|
hf_xet==1.1.2
|
||||||
hqq==0.2.5
|
|
||||||
|
|
||||||
optimum==1.16.2
|
optimum==1.16.2
|
||||||
hf_transfer
|
hf_transfer
|
||||||
@@ -63,8 +62,10 @@ langdetect==1.0.9
|
|||||||
immutabledict==4.2.0
|
immutabledict==4.2.0
|
||||||
antlr4-python3-runtime==4.13.2
|
antlr4-python3-runtime==4.13.2
|
||||||
|
|
||||||
torchao==0.9.0
|
torchao==0.10.0
|
||||||
schedulefree==1.4.1
|
schedulefree==1.4.1
|
||||||
|
|
||||||
axolotl-contribs-lgpl==0.0.6
|
axolotl-contribs-lgpl==0.0.6
|
||||||
axolotl-contribs-mit==0.0.3
|
axolotl-contribs-mit==0.0.3
|
||||||
|
|
||||||
|
mistral-common==1.6.0
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ except ImportError as exc:
|
|||||||
raise ImportError("Install torch via `pip install torch`") from exc
|
raise ImportError("Install torch via `pip install torch`") from exc
|
||||||
from packaging.version import Version as V
|
from packaging.version import Version as V
|
||||||
|
|
||||||
|
USE_UV = "--uv" in sys.argv[1:]
|
||||||
|
|
||||||
v = V(torch.__version__)
|
v = V(torch.__version__)
|
||||||
|
|
||||||
# no cut-cross-entropy support for torch < 2.4.0
|
# no cut-cross-entropy support for torch < 2.4.0
|
||||||
@@ -23,7 +25,9 @@ if cce_spec:
|
|||||||
if not importlib.util.find_spec("cut_cross_entropy.transformers"):
|
if not importlib.util.find_spec("cut_cross_entropy.transformers"):
|
||||||
UNINSTALL_PREFIX = "pip uninstall -y cut-cross-entropy && "
|
UNINSTALL_PREFIX = "pip uninstall -y cut-cross-entropy && "
|
||||||
|
|
||||||
|
UV_PREFIX = "uv " if USE_UV else ""
|
||||||
|
|
||||||
print(
|
print(
|
||||||
UNINSTALL_PREFIX
|
UNINSTALL_PREFIX
|
||||||
+ 'pip install "cut-cross-entropy[transformers] @ git+https://github.com/apple/ml-cross-entropy.git@bad6f7b49c75fdec69471abb71b4cddd0f0c6438"'
|
+ f'{UV_PREFIX}pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@a1174ca"'
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
=@# @# #@= #@ =#@@@@#= +#@@= +#@@@@#= .##@@+ @@
|
=@# @# #@= #@ =#@@@@#= +#@@= +#@@@@#= .##@@+ @@
|
||||||
@@@@ @@@@@@@@@@@@@@@@
|
@@@@ @@@@@@@@@@@@@@@@
|
||||||
|
|
||||||
Welcome to the axolotl cloud image! If the you've mounted a disk to /workspace and the axolotl directory ie empty, run the following commands:
|
Welcome to the axolotl cloud image! If the you've mounted a disk to /workspace and the axolotl directory is empty, run the following commands:
|
||||||
|
|
||||||
```
|
```
|
||||||
cd /workspace
|
cd /workspace
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
# noqa
|
# noqa
|
||||||
# pylint: skip-file
|
# pylint: skip-file
|
||||||
|
import sys
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError("Install torch via `pip install torch`")
|
raise ImportError("Install torch via `pip install torch`")
|
||||||
from packaging.version import Version as V
|
from packaging.version import Version as V
|
||||||
|
|
||||||
|
use_uv = "--uv" in sys.argv[1:]
|
||||||
|
|
||||||
v = V(torch.__version__)
|
v = V(torch.__version__)
|
||||||
cuda = str(torch.version.cuda)
|
cuda = str(torch.version.cuda)
|
||||||
try:
|
try:
|
||||||
@@ -31,6 +35,7 @@ elif v < V("2.6.0"):
|
|||||||
else:
|
else:
|
||||||
raise RuntimeError(f"Torch = {v} too new!")
|
raise RuntimeError(f"Torch = {v} too new!")
|
||||||
x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "")
|
x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "")
|
||||||
|
uv_prefix = "uv " if use_uv else ""
|
||||||
print(
|
print(
|
||||||
f'pip install unsloth-zoo==2024.12.1 && pip install --no-deps "unsloth[{x}]==2024.12.4"'
|
f'{uv_prefix}pip install unsloth-zoo==2024.12.1 && {uv_prefix}pip install --no-deps "unsloth[{x}]==2024.12.4"'
|
||||||
)
|
)
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -118,7 +118,7 @@ extras_require = {
|
|||||||
"yunchang==0.6.0",
|
"yunchang==0.6.0",
|
||||||
],
|
],
|
||||||
"deepspeed": [
|
"deepspeed": [
|
||||||
"deepspeed==0.15.4",
|
"deepspeed==0.17.1",
|
||||||
"deepspeed-kernels",
|
"deepspeed-kernels",
|
||||||
],
|
],
|
||||||
"mamba-ssm": [
|
"mamba-ssm": [
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ import pkgutil
|
|||||||
|
|
||||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||||
|
|
||||||
__version__ = "0.10.0.dev0"
|
__version__ = "0.11.0.dev"
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ class TrainerCliArgs:
|
|||||||
debug: bool = field(default=False)
|
debug: bool = field(default=False)
|
||||||
debug_text_only: bool = field(default=False)
|
debug_text_only: bool = field(default=False)
|
||||||
debug_num_examples: int = field(default=0)
|
debug_num_examples: int = field(default=0)
|
||||||
merge_lora: bool = field(default=False)
|
|
||||||
prompter: Optional[str] = field(default=None)
|
prompter: Optional[str] = field(default=None)
|
||||||
shard: bool = field(default=False)
|
shard: bool = field(default=False)
|
||||||
main_process_port: Optional[int] = field(default=None)
|
main_process_port: Optional[int] = field(default=None)
|
||||||
@@ -89,6 +88,26 @@ class VllmServeCliArgs:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
enable_reasoning: Optional[bool] = field(
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
reasoning_parser: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class QuantizeCliArgs:
|
||||||
|
"""Dataclass with CLI arguments for `axolotl quantize` command."""
|
||||||
|
|
||||||
|
base_model: Optional[str] = field(default=None)
|
||||||
|
weight_dtype: Optional[str] = field(default=None)
|
||||||
|
activation_dtype: Optional[str] = field(default=None)
|
||||||
|
quantize_embedding: Optional[bool] = field(default=None)
|
||||||
|
group_size: Optional[int] = field(default=None)
|
||||||
|
output_dir: Optional[str] = field(default=None)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class EvaluateCliArgs:
|
class EvaluateCliArgs:
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""Various checks for Axolotl CLI."""
|
"""Various checks for Axolotl CLI."""
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
@@ -8,7 +7,9 @@ from accelerate.commands.config import config_args
|
|||||||
from huggingface_hub import HfApi
|
from huggingface_hub import HfApi
|
||||||
from huggingface_hub.utils import LocalTokenNotFoundError
|
from huggingface_hub.utils import LocalTokenNotFoundError
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_accelerate_default_config() -> None:
|
def check_accelerate_default_config() -> None:
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ class ModalCloud(Cloud):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
def get_image(self):
|
def get_image(self):
|
||||||
docker_tag = "main-py3.11-cu124-2.5.1"
|
docker_tag = "main-py3.11-cu124-2.6.0"
|
||||||
if self.config.docker_tag:
|
if self.config.docker_tag:
|
||||||
docker_tag = self.config.docker_tag
|
docker_tag = self.config.docker_tag
|
||||||
docker_image = f"axolotlai/axolotl:{docker_tag}"
|
docker_image = f"axolotlai/axolotl:{docker_tag}"
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
"""Configuration loading and processing."""
|
"""Configuration loading and processing."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -22,11 +21,12 @@ from axolotl.utils.config import (
|
|||||||
validate_config,
|
validate_config,
|
||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.mlflow_ import setup_mlflow_env_vars
|
from axolotl.utils.mlflow_ import setup_mlflow_env_vars
|
||||||
from axolotl.utils.trainer import prepare_opinionated_env, prepare_optim_env
|
from axolotl.utils.trainer import prepare_opinionated_env, prepare_optim_env
|
||||||
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_remote_config(config: Union[str, Path]) -> Union[str, Path]:
|
def check_remote_config(config: Union[str, Path]) -> Union[str, Path]:
|
||||||
@@ -119,12 +119,12 @@ def choose_config(path: Path) -> str:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if len(yaml_files) == 1:
|
if len(yaml_files) == 1:
|
||||||
print(f"Using default YAML file '{yaml_files[0]}'")
|
LOG.info(f"Using default YAML file '{yaml_files[0]}'")
|
||||||
return str(yaml_files[0])
|
return str(yaml_files[0])
|
||||||
|
|
||||||
print("Choose a YAML file:")
|
LOG.info("Choose a YAML file:")
|
||||||
for idx, file in enumerate(yaml_files):
|
for idx, file in enumerate(yaml_files):
|
||||||
print(f"{idx + 1}. {file}")
|
LOG.info(f"{idx + 1}. {file}")
|
||||||
|
|
||||||
chosen_file = None
|
chosen_file = None
|
||||||
while chosen_file is None:
|
while chosen_file is None:
|
||||||
@@ -133,9 +133,9 @@ def choose_config(path: Path) -> str:
|
|||||||
if 1 <= choice <= len(yaml_files):
|
if 1 <= choice <= len(yaml_files):
|
||||||
chosen_file = str(yaml_files[choice - 1])
|
chosen_file = str(yaml_files[choice - 1])
|
||||||
else:
|
else:
|
||||||
print("Invalid choice. Please choose a number from the list.")
|
LOG.info("Invalid choice. Please choose a number from the list.")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("Invalid input. Please enter a number.")
|
LOG.info("Invalid input. Please enter a number.")
|
||||||
|
|
||||||
return chosen_file
|
return chosen_file
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""CLI to run evaluation on a model."""
|
"""CLI to run evaluation on a model."""
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
@@ -17,8 +16,9 @@ from axolotl.common.datasets import load_datasets, load_preference_datasets
|
|||||||
from axolotl.evaluate import evaluate
|
from axolotl.evaluate import evaluate
|
||||||
from axolotl.utils import patch_optimized_env
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
"""CLI to run inference on a trained model."""
|
"""CLI to run inference on a trained model."""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import logging
|
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
@@ -22,8 +21,9 @@ from axolotl.utils.chat_templates import (
|
|||||||
get_chat_template_from_config,
|
get_chat_template_from_config,
|
||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def get_multi_line_input() -> str:
|
def get_multi_line_input() -> str:
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
# pylint: disable=redefined-outer-name
|
# pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import subprocess # nosec B404
|
import subprocess # nosec B404
|
||||||
import tempfile
|
import tempfile
|
||||||
@@ -17,6 +16,7 @@ import axolotl
|
|||||||
from axolotl.cli.args import (
|
from axolotl.cli.args import (
|
||||||
EvaluateCliArgs,
|
EvaluateCliArgs,
|
||||||
PreprocessCliArgs,
|
PreprocessCliArgs,
|
||||||
|
QuantizeCliArgs,
|
||||||
TrainerCliArgs,
|
TrainerCliArgs,
|
||||||
VllmServeCliArgs,
|
VllmServeCliArgs,
|
||||||
)
|
)
|
||||||
@@ -30,8 +30,11 @@ from axolotl.cli.utils import (
|
|||||||
)
|
)
|
||||||
from axolotl.integrations.lm_eval.cli import lm_eval
|
from axolotl.integrations.lm_eval.cli import lm_eval
|
||||||
from axolotl.utils import patch_optimized_env
|
from axolotl.utils import patch_optimized_env
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.version_option(version=axolotl.__version__, prog_name="axolotl")
|
@click.version_option(version=axolotl.__version__, prog_name="axolotl")
|
||||||
@@ -176,7 +179,7 @@ def train(
|
|||||||
|
|
||||||
do_cli(config=cfg_file, **kwargs)
|
do_cli(config=cfg_file, **kwargs)
|
||||||
except subprocess.CalledProcessError as exc:
|
except subprocess.CalledProcessError as exc:
|
||||||
logging.error(f"Failed to train/fine-tune config '{cfg_file}': {exc}")
|
LOG.error(f"Failed to train/fine-tune config '{cfg_file}': {exc}")
|
||||||
if not sweep:
|
if not sweep:
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
@@ -333,6 +336,16 @@ def vllm_serve(config: str, **cli_args: VllmServeCliArgs):
|
|||||||
do_vllm_serve(config, cli_args)
|
do_vllm_serve(config, cli_args)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("config", type=click.Path(exists=True, path_type=str))
|
||||||
|
@add_options_from_dataclass(QuantizeCliArgs)
|
||||||
|
@filter_none_kwargs
|
||||||
|
def quantize(config: str, **cli_args: QuantizeCliArgs):
|
||||||
|
from axolotl.cli.quantize import do_quantize
|
||||||
|
|
||||||
|
do_quantize(config, cli_args)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.argument("model", type=click.Path(exists=True, path_type=str))
|
@click.argument("model", type=click.Path(exists=True, path_type=str))
|
||||||
@click.argument("output", type=click.Path(exists=False, path_type=str))
|
@click.argument("output", type=click.Path(exists=False, path_type=str))
|
||||||
|
|||||||
@@ -1,20 +1,18 @@
|
|||||||
"""CLI to merge a trained LoRA into a base model."""
|
"""CLI to merge a trained LoRA into a base model."""
|
||||||
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
import fire
|
import fire
|
||||||
import transformers
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
from axolotl.cli.art import print_axolotl_text_art
|
||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.cli.utils import load_model_and_tokenizer
|
from axolotl.cli.utils import load_model_and_tokenizer
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def do_merge_lora(*, cfg: DictDefault) -> None:
|
def do_merge_lora(*, cfg: DictDefault) -> None:
|
||||||
@@ -68,12 +66,6 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
|||||||
Raises:
|
Raises:
|
||||||
ValueError: If target directory for LoRA merged model does not exist.
|
ValueError: If target directory for LoRA merged model does not exist.
|
||||||
"""
|
"""
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
parser = transformers.HfArgumentParser(TrainerCliArgs)
|
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
|
||||||
return_remaining_strings=True
|
|
||||||
)
|
|
||||||
parsed_cli_args.merge_lora = True
|
|
||||||
|
|
||||||
parsed_cfg = load_cfg(
|
parsed_cfg = load_cfg(
|
||||||
config,
|
config,
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
"""CLI to merge sharded FSDP model checkpoints into a single combined checkpoint."""
|
"""CLI to merge sharded FSDP model checkpoints into a single combined checkpoint."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -11,7 +10,6 @@ import fire
|
|||||||
import torch
|
import torch
|
||||||
import torch.distributed.checkpoint as dist_cp
|
import torch.distributed.checkpoint as dist_cp
|
||||||
import torch.distributed.checkpoint.format_utils as dist_cp_format_utils
|
import torch.distributed.checkpoint.format_utils as dist_cp_format_utils
|
||||||
import transformers
|
|
||||||
from accelerate.utils import (
|
from accelerate.utils import (
|
||||||
SAFE_WEIGHTS_INDEX_NAME,
|
SAFE_WEIGHTS_INDEX_NAME,
|
||||||
SAFE_WEIGHTS_NAME,
|
SAFE_WEIGHTS_NAME,
|
||||||
@@ -24,11 +22,11 @@ from huggingface_hub import split_torch_state_dict_into_shards
|
|||||||
from safetensors.torch import save_file as safe_save_file
|
from safetensors.torch import save_file as safe_save_file
|
||||||
from torch.distributed.checkpoint.format_utils import _EmptyStateDictLoadPlanner
|
from torch.distributed.checkpoint.format_utils import _EmptyStateDictLoadPlanner
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
from axolotl.cli.art import print_axolotl_text_art
|
||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BFloat16CastPlanner(_EmptyStateDictLoadPlanner):
|
class BFloat16CastPlanner(_EmptyStateDictLoadPlanner):
|
||||||
@@ -197,11 +195,6 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
|||||||
"""
|
"""
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
parser = transformers.HfArgumentParser(TrainerCliArgs)
|
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
|
||||||
return_remaining_strings=True
|
|
||||||
)
|
|
||||||
parsed_cli_args.merge_lora = True
|
|
||||||
parsed_cfg = load_cfg(config, **kwargs)
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
|
|
||||||
fsdp_dir = Path(parsed_cfg.output_dir) / "pytorch_model_fsdp_0"
|
fsdp_dir = Path(parsed_cfg.output_dir) / "pytorch_model_fsdp_0"
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""CLI to run preprocessing of a dataset."""
|
"""CLI to run preprocessing of a dataset."""
|
||||||
|
|
||||||
import logging
|
|
||||||
import warnings
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
@@ -20,9 +19,10 @@ from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
|
|||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.trainer import disable_datasets_caching
|
from axolotl.utils.trainer import disable_datasets_caching
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
||||||
|
|||||||
90
src/axolotl/cli/quantize.py
Normal file
90
src/axolotl/cli/quantize.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
"""
|
||||||
|
CLI to post-training quantize a model using torchao
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from transformers import AutoModelForCausalLM
|
||||||
|
|
||||||
|
from axolotl.cli.art import print_axolotl_text_art
|
||||||
|
from axolotl.cli.config import load_cfg
|
||||||
|
from axolotl.loaders import load_tokenizer
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
from axolotl.utils.quantization import TorchIntDType, quantize_model_for_ptq
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def do_quantize(
|
||||||
|
config: Union[Path, str],
|
||||||
|
cli_args: dict,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Quantizes a model's model's weights
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (Union[Path, str]): The path to the config file
|
||||||
|
cli_args (dict): Additional command-line arguments
|
||||||
|
"""
|
||||||
|
print_axolotl_text_art()
|
||||||
|
|
||||||
|
cfg = load_cfg(config)
|
||||||
|
|
||||||
|
if cfg.qat and cfg.quantization:
|
||||||
|
raise ValueError(
|
||||||
|
"QAT and quantization cannot be used together. Please specify only one of qat or quantization in your config file."
|
||||||
|
)
|
||||||
|
|
||||||
|
if cfg.qat:
|
||||||
|
quantize_cfg = cfg.qat
|
||||||
|
elif cfg.quantization:
|
||||||
|
quantize_cfg = cfg.quantization
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"No quantization configuration found. Please specify either qat or quantization in your config file."
|
||||||
|
)
|
||||||
|
|
||||||
|
model_path = cli_args.get("model_path") or cfg.output_dir
|
||||||
|
if weight_dtype := cli_args.get("weight_dtype"):
|
||||||
|
weight_dtype = TorchIntDType[weight_dtype]
|
||||||
|
else:
|
||||||
|
weight_dtype = quantize_cfg.weight_dtype
|
||||||
|
if activation_dtype := cli_args.get("activation_dtype"):
|
||||||
|
activation_dtype = TorchIntDType[activation_dtype]
|
||||||
|
else:
|
||||||
|
activation_dtype = quantize_cfg.activation_dtype
|
||||||
|
group_size = cli_args.get("group_size") or quantize_cfg.group_size
|
||||||
|
quantize_embedding = (
|
||||||
|
cli_args.get("quantize_embedding") or quantize_cfg.quantize_embedding
|
||||||
|
)
|
||||||
|
output_dir = cli_args.get("output_dir") or cfg.output_dir
|
||||||
|
|
||||||
|
LOG.info(f"Loading model from {model_path}...")
|
||||||
|
tokenizer = load_tokenizer(cfg)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")
|
||||||
|
|
||||||
|
LOG.info(
|
||||||
|
f"Quantizing model with configuration: \n"
|
||||||
|
f"\tweight_dtype: {weight_dtype}\n"
|
||||||
|
f"\tactivation_dtype: {activation_dtype}\n"
|
||||||
|
f"\tgroup_size: {group_size}\n"
|
||||||
|
f"\tquantize_embedding: {quantize_embedding}"
|
||||||
|
)
|
||||||
|
|
||||||
|
quantize_model_for_ptq(
|
||||||
|
model, weight_dtype, group_size, activation_dtype, quantize_embedding
|
||||||
|
)
|
||||||
|
|
||||||
|
LOG.info(f"Saving quantized model to: {str(Path(output_dir) / 'quantized')}...")
|
||||||
|
model.save_pretrained(
|
||||||
|
str(Path(output_dir) / "quantized"),
|
||||||
|
safe_serialization=False,
|
||||||
|
progressbar=True,
|
||||||
|
)
|
||||||
|
tokenizer.save_pretrained(
|
||||||
|
str(Path(output_dir) / "quantized"),
|
||||||
|
safe_serialization=False,
|
||||||
|
progressbar=True,
|
||||||
|
)
|
||||||
|
LOG.info(f"Quantized model saved to: {str(Path(output_dir) / 'quantized')}...")
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
"""CLI to run training on a model."""
|
"""CLI to run training on a model."""
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
@@ -22,8 +21,6 @@ from axolotl.utils import patch_optimized_env
|
|||||||
from axolotl.utils.config import normalize_config, resolve_dtype
|
from axolotl.utils.config import normalize_config, resolve_dtype
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import concurrent.futures
|
|||||||
import dataclasses
|
import dataclasses
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from types import NoneType
|
from types import NoneType
|
||||||
@@ -23,8 +22,9 @@ from transformers import (
|
|||||||
from axolotl.loaders import load_processor, load_tokenizer
|
from axolotl.loaders import load_processor, load_tokenizer
|
||||||
from axolotl.loaders.model import ModelLoader
|
from axolotl.loaders.model import ModelLoader
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def strip_optional_type(field_type: type | str | None):
|
def strip_optional_type(field_type: type | str | None):
|
||||||
|
|||||||
@@ -2,14 +2,27 @@
|
|||||||
CLI to start the vllm server for online RL
|
CLI to start the vllm server for online RL
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass, field
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
|
import trl
|
||||||
from trl.scripts.vllm_serve import ScriptArguments
|
from trl.scripts.vllm_serve import ScriptArguments
|
||||||
|
|
||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AxolotlScriptArguments(ScriptArguments):
|
||||||
|
"""
|
||||||
|
Additional arguments for the VLLM server
|
||||||
|
"""
|
||||||
|
|
||||||
|
reasoning_parser: str = field(default="", kw_only=True)
|
||||||
|
enable_reasoning: bool | None = field(default=None, kw_only=True)
|
||||||
|
|
||||||
|
|
||||||
def do_vllm_serve(
|
def do_vllm_serve(
|
||||||
config: Union[Path, str],
|
config: Union[Path, str],
|
||||||
cli_args: dict,
|
cli_args: dict,
|
||||||
@@ -24,6 +37,7 @@ def do_vllm_serve(
|
|||||||
Returns:
|
Returns:
|
||||||
process_id: the process id of the started VLLM server
|
process_id: the process id of the started VLLM server
|
||||||
"""
|
"""
|
||||||
|
patch_vllm_worker()
|
||||||
cfg = load_cfg(config)
|
cfg = load_cfg(config)
|
||||||
model = cfg.base_model
|
model = cfg.base_model
|
||||||
|
|
||||||
@@ -43,9 +57,16 @@ def do_vllm_serve(
|
|||||||
enable_prefix_caching = (
|
enable_prefix_caching = (
|
||||||
cli_args.get("enable_prefix_caching") or cfg.vllm.enable_prefix_caching
|
cli_args.get("enable_prefix_caching") or cfg.vllm.enable_prefix_caching
|
||||||
)
|
)
|
||||||
|
reasoning_parser = (
|
||||||
|
cli_args.get("reasoning_parser") or cfg.vllm.reasoning_parser or ""
|
||||||
|
)
|
||||||
|
enable_reasoning = (
|
||||||
|
cli_args.get("enable_reasoning") or cfg.vllm.enable_reasoning or False
|
||||||
|
)
|
||||||
|
|
||||||
vllm_script_args = ScriptArguments(
|
# pylint: disable=unexpected-keyword-arg
|
||||||
model,
|
vllm_script_args = AxolotlScriptArguments(
|
||||||
|
model=model,
|
||||||
tensor_parallel_size=tensor_parallel_size,
|
tensor_parallel_size=tensor_parallel_size,
|
||||||
host=host,
|
host=host,
|
||||||
port=port,
|
port=port,
|
||||||
@@ -53,5 +74,67 @@ def do_vllm_serve(
|
|||||||
dtype=dtype,
|
dtype=dtype,
|
||||||
max_model_len=max_model_len,
|
max_model_len=max_model_len,
|
||||||
enable_prefix_caching=enable_prefix_caching,
|
enable_prefix_caching=enable_prefix_caching,
|
||||||
|
reasoning_parser=reasoning_parser,
|
||||||
|
enable_reasoning=enable_reasoning,
|
||||||
)
|
)
|
||||||
vllm_serve_main(vllm_script_args)
|
vllm_serve_main(vllm_script_args)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_vllm_worker():
|
||||||
|
from multiprocessing.connection import Connection
|
||||||
|
|
||||||
|
from vllm import LLM
|
||||||
|
|
||||||
|
def llm_worker(
|
||||||
|
script_args: AxolotlScriptArguments,
|
||||||
|
data_parallel_rank: int,
|
||||||
|
master_port: int,
|
||||||
|
connection: Connection,
|
||||||
|
) -> None:
|
||||||
|
# Set required environment variables for DP to work with vLLM
|
||||||
|
os.environ["VLLM_DP_RANK"] = str(data_parallel_rank)
|
||||||
|
os.environ["VLLM_DP_RANK_LOCAL"] = str(data_parallel_rank)
|
||||||
|
os.environ["VLLM_DP_SIZE"] = str(script_args.data_parallel_size)
|
||||||
|
os.environ["VLLM_DP_MASTER_PORT"] = str(master_port)
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model=script_args.model,
|
||||||
|
revision=script_args.revision,
|
||||||
|
tensor_parallel_size=script_args.tensor_parallel_size,
|
||||||
|
gpu_memory_utilization=script_args.gpu_memory_utilization,
|
||||||
|
enforce_eager=script_args.enforce_eager,
|
||||||
|
dtype=script_args.dtype,
|
||||||
|
# Automatic Prefix Caching caches the KV cache of existing queries, so that a new query can
|
||||||
|
# directly reuse the KV cache if it shares the same prefix with one of the existing queries.
|
||||||
|
# This is particularly useful here because we generate completions from the same prompts.
|
||||||
|
enable_prefix_caching=script_args.enable_prefix_caching,
|
||||||
|
kv_cache_dtype=script_args.kv_cache_dtype,
|
||||||
|
max_model_len=script_args.max_model_len,
|
||||||
|
worker_extension_cls="trl.scripts.vllm_serve.WeightSyncWorkerExtension",
|
||||||
|
enable_reasoning=script_args.enable_reasoning,
|
||||||
|
reasoning_parser=script_args.reasoning_parser,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Send ready signal to parent process
|
||||||
|
connection.send({"status": "ready"})
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# Wait for commands from the parent process
|
||||||
|
try:
|
||||||
|
command = connection.recv()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
llm.collective_rpc(method="close_communicator")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Handle commands
|
||||||
|
if command["type"] in ["call", "fire_and_forget"]:
|
||||||
|
method_name = command["method"]
|
||||||
|
args, kwargs = command.get("args", ()), command.get("kwargs", {})
|
||||||
|
method = getattr(llm, method_name)
|
||||||
|
result = method(*args, **kwargs)
|
||||||
|
if command["type"] == "call":
|
||||||
|
connection.send(result)
|
||||||
|
elif command["type"] == "shutdown":
|
||||||
|
break
|
||||||
|
|
||||||
|
trl.scripts.vllm_serve.llm_worker = llm_worker
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
"""
|
"""Various shared constants"""
|
||||||
Various shared constants
|
|
||||||
"""
|
|
||||||
|
|
||||||
DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
|
DEFAULT_DATASET_PREPARED_PATH = "last_run_prepared"
|
||||||
|
|||||||
@@ -1,23 +1,21 @@
|
|||||||
"""Dataset loading utilities."""
|
"""Dataset loading utilities."""
|
||||||
|
|
||||||
import logging
|
|
||||||
import math
|
import math
|
||||||
import random
|
import random
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Optional, Union
|
|
||||||
|
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
|
|
||||||
import axolotl.monkeypatch.data.batch_dataset_fetcher # pylint: disable=unused-import # noqa: F401
|
import axolotl.monkeypatch.data.batch_dataset_fetcher # pylint: disable=unused-import # noqa: F401
|
||||||
from axolotl.cli.args import PreprocessCliArgs, TrainerCliArgs
|
from axolotl.cli.args import PreprocessCliArgs, TrainerCliArgs
|
||||||
from axolotl.loaders import load_processor, load_tokenizer
|
from axolotl.loaders import load_processor, load_tokenizer
|
||||||
from axolotl.utils.data import prepare_dataset
|
from axolotl.utils.data import prepare_datasets, prepare_preference_datasets
|
||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.schemas.enums import RLType
|
from axolotl.utils.schemas.enums import RLType
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -30,16 +28,7 @@ class TrainDatasetMeta:
|
|||||||
|
|
||||||
|
|
||||||
def sample_dataset(dataset: Dataset, num_samples: int) -> Dataset:
|
def sample_dataset(dataset: Dataset, num_samples: int) -> Dataset:
|
||||||
"""
|
"""Randomly sample `num_samples` samples with replacement from `dataset`."""
|
||||||
Randomly sample `num_samples` samples from `dataset`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dataset: Dataset.
|
|
||||||
num_samples: Number of samples to return.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Random sample (with replacement) of examples in `dataset`.
|
|
||||||
"""
|
|
||||||
return dataset.select(
|
return dataset.select(
|
||||||
[random.randrange(0, len(dataset) - 1) for _ in range(num_samples)] # nosec
|
[random.randrange(0, len(dataset) - 1) for _ in range(num_samples)] # nosec
|
||||||
)
|
)
|
||||||
@@ -51,44 +40,37 @@ def load_datasets(
|
|||||||
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||||
debug: bool = False,
|
debug: bool = False,
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""Loads one or more training or evaluation datasets, calling
|
||||||
Loads one or more training or evaluation datasets, calling
|
`axolotl.utils.data.prepare_datasets`. Optionally, logs out debug information.
|
||||||
`axolotl.utils.data.prepare_dataset`. Optionally, logs out debug information.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: Command-specific CLI arguments.
|
cli_args: Command-specific CLI arguments.
|
||||||
debug: Whether to print out tokenization of sample
|
debug: Whether to print out tokenization of sample. This is duplicated in
|
||||||
|
`cfg` and `cli_args`, but is kept due to use in our Colab notebooks.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
Dataclass with fields for training and evaluation datasets and the computed
|
||||||
`total_num_steps`.
|
`total_num_steps`.
|
||||||
"""
|
"""
|
||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
||||||
preprocess_iterable = (
|
preprocess_iterable = getattr(cli_args, "iterable", False)
|
||||||
cli_args
|
|
||||||
and hasattr(cli_args, "iterable")
|
|
||||||
and cli_args.iterable is not None
|
|
||||||
and cli_args.iterable
|
|
||||||
)
|
|
||||||
|
|
||||||
train_dataset, eval_dataset, total_num_steps, prompters = prepare_dataset(
|
train_dataset, eval_dataset, total_num_steps, prompters = prepare_datasets(
|
||||||
cfg,
|
cfg,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
preprocess_iterable=preprocess_iterable,
|
preprocess_iterable=preprocess_iterable,
|
||||||
)
|
)
|
||||||
|
|
||||||
if ( # pylint: disable=too-many-boolean-expressions
|
if (
|
||||||
cli_args
|
cfg.debug
|
||||||
and (
|
or getattr(cli_args, "debug", False)
|
||||||
cli_args.debug
|
or getattr(cli_args, "debug_text_only", False)
|
||||||
or cfg.debug
|
or getattr(cli_args, "debug_num_examples", 0) > 0
|
||||||
or cli_args.debug_text_only
|
or debug
|
||||||
or int(cli_args.debug_num_examples) > 0
|
):
|
||||||
)
|
|
||||||
) or debug:
|
|
||||||
LOG.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
num_examples = cli_args.debug_num_examples if cli_args else 1
|
num_examples = cli_args.debug_num_examples if cli_args else 1
|
||||||
@@ -113,13 +95,10 @@ def load_datasets(
|
|||||||
|
|
||||||
|
|
||||||
def load_preference_datasets(
|
def load_preference_datasets(
|
||||||
*,
|
*, cfg: DictDefault, cli_args: PreprocessCliArgs | TrainerCliArgs | None = None
|
||||||
cfg: DictDefault,
|
|
||||||
cli_args: Union[PreprocessCliArgs, TrainerCliArgs],
|
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""Loads one or more training or evaluation datasets for RL training using paired
|
||||||
Loads one or more training or evaluation datasets for RL training using paired
|
preference data, calling `axolotl.utils.data.rl.prepare_preference_datasets`.
|
||||||
preference data, calling `axolotl.utils.data.rl.load_prepare_preference_datasets`.
|
|
||||||
Optionally, logs out debug information.
|
Optionally, logs out debug information.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -130,23 +109,28 @@ def load_preference_datasets(
|
|||||||
Dataclass with fields for training and evaluation datasets and the computed
|
Dataclass with fields for training and evaluation datasets and the computed
|
||||||
`total_num_steps`.
|
`total_num_steps`.
|
||||||
"""
|
"""
|
||||||
train_dataset, eval_dataset = load_prepare_preference_datasets(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
total_num_steps: Optional[int] = int(
|
train_dataset, eval_dataset = prepare_preference_datasets(cfg, tokenizer)
|
||||||
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
|
||||||
)
|
|
||||||
if cfg.rl is RLType.GRPO:
|
|
||||||
total_num_steps = None
|
|
||||||
|
|
||||||
if cli_args.debug or cfg.debug:
|
total_num_steps: int | None = None
|
||||||
|
if cfg.rl is not RLType.GRPO:
|
||||||
|
total_num_steps = int(
|
||||||
|
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (cli_args and cli_args.debug) or cfg.debug:
|
||||||
LOG.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
|
num_examples = cli_args.debug_num_examples if cli_args else 1
|
||||||
|
text_only = cli_args.debug_text_only if cli_args else False
|
||||||
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
train_samples = sample_dataset(train_dataset, num_examples)
|
||||||
check_dataset_labels(
|
check_dataset_labels(
|
||||||
train_samples,
|
dataset=train_samples,
|
||||||
tokenizer,
|
tokenizer=tokenizer,
|
||||||
num_examples=cli_args.debug_num_examples,
|
num_examples=num_examples,
|
||||||
text_only=cli_args.debug_text_only,
|
text_only=text_only,
|
||||||
rl_mode=True,
|
rl_mode=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
6
src/axolotl/core/builders/__init__.py
Normal file
6
src/axolotl/core/builders/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
"""Trainer builder classes"""
|
||||||
|
|
||||||
|
from .causal import HFCausalTrainerBuilder
|
||||||
|
from .rl import HFRLTrainerBuilder
|
||||||
|
|
||||||
|
__all__ = ["HFCausalTrainerBuilder", "HFRLTrainerBuilder"]
|
||||||
508
src/axolotl/core/builders/base.py
Normal file
508
src/axolotl/core/builders/base.py
Normal file
@@ -0,0 +1,508 @@
|
|||||||
|
# Copyright 2024 Axolotl AI. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Base class for trainer builder"""
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import importlib
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
from abc import abstractmethod
|
||||||
|
from contextlib import suppress
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from transformers import (
|
||||||
|
TrainerCallback,
|
||||||
|
)
|
||||||
|
from transformers.training_args import OptimizerNames
|
||||||
|
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
|
from axolotl.monkeypatch.trainer.lr import patch_trainer_get_lr
|
||||||
|
from axolotl.utils import is_comet_available, is_mlflow_available
|
||||||
|
from axolotl.utils.callbacks import (
|
||||||
|
GCCallback,
|
||||||
|
GPUStatsCallback,
|
||||||
|
SaveAxolotlConfigtoWandBCallback,
|
||||||
|
)
|
||||||
|
from axolotl.utils.callbacks.profiler import PytorchProfilerCallback
|
||||||
|
from axolotl.utils.schemas.enums import CustomSupportedOptimizers
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
with suppress(ImportError):
|
||||||
|
import torch._dynamo # pylint: disable=ungrouped-imports
|
||||||
|
|
||||||
|
|
||||||
|
class TrainerBuilderBase(abc.ABC):
|
||||||
|
"""Base class for trainer builder."""
|
||||||
|
|
||||||
|
def __init__(self, cfg, model, tokenizer, processor=None):
|
||||||
|
self.cfg = cfg
|
||||||
|
self.model = model
|
||||||
|
self.tokenizer = tokenizer
|
||||||
|
self.processor = processor
|
||||||
|
|
||||||
|
self._train_dataset = None
|
||||||
|
self._eval_dataset = None
|
||||||
|
self._model_ref = None
|
||||||
|
self._peft_config = None
|
||||||
|
|
||||||
|
# If the model supports tagging, add the axolotl tag.
|
||||||
|
# This makes sure the tag is correctly pushed even if a user calls
|
||||||
|
# model.push_to_hub instead of trainer.push_to_hub.
|
||||||
|
if hasattr(model, "add_model_tags"):
|
||||||
|
model.add_model_tags(["axolotl"])
|
||||||
|
|
||||||
|
patch_trainer_get_lr()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_ref(self):
|
||||||
|
return self._model_ref
|
||||||
|
|
||||||
|
@model_ref.setter
|
||||||
|
def model_ref(self, model):
|
||||||
|
self._model_ref = model
|
||||||
|
|
||||||
|
@property
|
||||||
|
def train_dataset(self):
|
||||||
|
return self._train_dataset
|
||||||
|
|
||||||
|
@train_dataset.setter
|
||||||
|
def train_dataset(self, dataset):
|
||||||
|
self._train_dataset = dataset
|
||||||
|
|
||||||
|
@property
|
||||||
|
def eval_dataset(self):
|
||||||
|
return self._eval_dataset
|
||||||
|
|
||||||
|
@eval_dataset.setter
|
||||||
|
def eval_dataset(self, dataset):
|
||||||
|
self._eval_dataset = dataset
|
||||||
|
|
||||||
|
@property
|
||||||
|
def peft_config(self):
|
||||||
|
return self._peft_config
|
||||||
|
|
||||||
|
@peft_config.setter
|
||||||
|
def peft_config(self, peft_config):
|
||||||
|
self._peft_config = peft_config
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def build(self, total_num_steps):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_callbacks(self) -> list[TrainerCallback]:
|
||||||
|
callbacks = []
|
||||||
|
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
callbacks.extend(
|
||||||
|
plugin_manager.add_callbacks_pre_trainer(cfg=self.cfg, model=self.model)
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.profiler_steps:
|
||||||
|
callbacks.append(
|
||||||
|
PytorchProfilerCallback(
|
||||||
|
steps_to_profile=self.cfg.profiler_steps,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.gc_steps:
|
||||||
|
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
||||||
|
|
||||||
|
if self.cfg.use_wandb:
|
||||||
|
callbacks.append(
|
||||||
|
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||||
|
)
|
||||||
|
if self.cfg.use_mlflow and is_mlflow_available():
|
||||||
|
from axolotl.utils.callbacks.mlflow_ import (
|
||||||
|
SaveAxolotlConfigtoMlflowCallback,
|
||||||
|
)
|
||||||
|
|
||||||
|
callbacks.extend(
|
||||||
|
[
|
||||||
|
SaveAxolotlConfigtoMlflowCallback(self.cfg.axolotl_config_path),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
if self.cfg.use_comet and is_comet_available():
|
||||||
|
from axolotl.utils.callbacks.comet_ import SaveAxolotlConfigtoCometCallback
|
||||||
|
|
||||||
|
callbacks.append(
|
||||||
|
SaveAxolotlConfigtoCometCallback(self.cfg.axolotl_config_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
callbacks.append(GPUStatsCallback(cfg=self.cfg))
|
||||||
|
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
|
"""
|
||||||
|
Callbacks added after the trainer is created, usually b/c these need access to the trainer
|
||||||
|
"""
|
||||||
|
callbacks = []
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
callbacks.extend(
|
||||||
|
[
|
||||||
|
cb
|
||||||
|
for cb in plugin_manager.add_callbacks_post_trainer(
|
||||||
|
self.cfg, trainer
|
||||||
|
)
|
||||||
|
if cb
|
||||||
|
]
|
||||||
|
)
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def hook_pre_create_training_args(self, training_arguments_kwargs):
|
||||||
|
# TODO
|
||||||
|
return training_arguments_kwargs
|
||||||
|
|
||||||
|
def hook_post_create_training_args(self, training_arguments):
|
||||||
|
# TODO
|
||||||
|
return training_arguments
|
||||||
|
|
||||||
|
def hook_pre_create_trainer(self, trainer_kwargs, trainer_cls):
|
||||||
|
# TODO
|
||||||
|
return trainer_kwargs, trainer_cls
|
||||||
|
|
||||||
|
def hook_post_create_trainer(self, trainer):
|
||||||
|
# TODO
|
||||||
|
return trainer
|
||||||
|
|
||||||
|
def _configure_warmup_and_logging(
|
||||||
|
self, total_num_steps: int, training_args_kwargs: dict
|
||||||
|
):
|
||||||
|
warmup_steps = 0
|
||||||
|
warmup_ratio = 0.0
|
||||||
|
if self.cfg.warmup_steps:
|
||||||
|
warmup_steps = self.cfg.warmup_steps
|
||||||
|
elif self.cfg.warmup_ratio:
|
||||||
|
if total_num_steps:
|
||||||
|
warmup_steps = max(int(self.cfg.warmup_ratio * total_num_steps), 0)
|
||||||
|
else:
|
||||||
|
warmup_ratio = self.cfg.warmup_ratio
|
||||||
|
elif total_num_steps:
|
||||||
|
warmup_steps = min(int(0.03 * total_num_steps), 100)
|
||||||
|
else:
|
||||||
|
warmup_ratio = 0.03
|
||||||
|
|
||||||
|
if warmup_steps == 1:
|
||||||
|
warmup_steps = 2
|
||||||
|
|
||||||
|
if self.cfg.logging_steps is not None:
|
||||||
|
training_args_kwargs["logging_steps"] = self.cfg.logging_steps
|
||||||
|
else:
|
||||||
|
training_args_kwargs["logging_steps"] = (
|
||||||
|
500 # transformers defaults to 500
|
||||||
|
if not total_num_steps
|
||||||
|
else max(min(int(0.005 * total_num_steps), 10), 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
training_args_kwargs["warmup_ratio"] = warmup_ratio
|
||||||
|
training_args_kwargs["warmup_steps"] = warmup_steps
|
||||||
|
|
||||||
|
def _configure_precision_settings(self, training_args_kwargs: dict):
|
||||||
|
training_args_kwargs["fp16"] = (self.cfg.fp16 and not self.cfg.bf16) or False
|
||||||
|
training_args_kwargs["tf32"] = self.cfg.tf32
|
||||||
|
if self.cfg.bf16 == "full":
|
||||||
|
training_args_kwargs["bf16_full_eval"] = True
|
||||||
|
else:
|
||||||
|
training_args_kwargs["bf16"] = self.cfg.bf16 or self.cfg.bfloat16
|
||||||
|
|
||||||
|
def _configure_scheduler(self, training_args_kwargs: dict):
|
||||||
|
if self.cfg.lr_scheduler in ["one_cycle", "rex"]:
|
||||||
|
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
||||||
|
training_args_kwargs["alternate_lr_scheduler_type"] = self.cfg.lr_scheduler
|
||||||
|
else:
|
||||||
|
training_args_kwargs["lr_scheduler_type"] = (
|
||||||
|
self.cfg.lr_scheduler if self.cfg.lr_scheduler else "cosine"
|
||||||
|
)
|
||||||
|
training_args_kwargs["lr_scheduler_kwargs"] = (
|
||||||
|
self.cfg.lr_scheduler_kwargs if self.cfg.lr_scheduler_kwargs else {}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _configure_optimizer(self, training_args_kwargs: dict, trainer_kwargs: dict):
|
||||||
|
def _configure_custom_optimizer(
|
||||||
|
training_args_kwargs: dict, trainer_kwargs: dict
|
||||||
|
):
|
||||||
|
# Common optimizer kwargs
|
||||||
|
optimizer_kwargs = {
|
||||||
|
"lr": training_args_kwargs["learning_rate"],
|
||||||
|
"weight_decay": training_args_kwargs["weight_decay"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Adam-specific kwargs
|
||||||
|
adam_kwargs: dict = {}
|
||||||
|
if training_args_kwargs.get("adam_beta1") and training_args_kwargs.get(
|
||||||
|
"adam_beta2"
|
||||||
|
):
|
||||||
|
adam_kwargs["betas"] = (
|
||||||
|
training_args_kwargs.get("adam_beta1"),
|
||||||
|
training_args_kwargs.get("adam_beta2"),
|
||||||
|
)
|
||||||
|
if training_args_kwargs.get("adam_epsilon"):
|
||||||
|
adam_kwargs["eps"] = training_args_kwargs.get("adam_epsilon")
|
||||||
|
|
||||||
|
if self.cfg.optimizer == "muon":
|
||||||
|
from axolotl.contribs.mit.muon import ( # pylint: disable=no-name-in-module
|
||||||
|
MuonOptimizerFactory,
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer_cls = MuonOptimizerFactory
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
elif self.cfg.optimizer == "optimi_adamw":
|
||||||
|
from optimi import AdamW
|
||||||
|
|
||||||
|
optimizer_kwargs["foreach"] = False
|
||||||
|
optimizer_cls = AdamW
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
elif self.cfg.optimizer == "ao_adamw_4bit":
|
||||||
|
# TODO remove 20250401
|
||||||
|
from torchao.prototype.low_bit_optim import AdamW4bit
|
||||||
|
|
||||||
|
optimizer_cls = AdamW4bit
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
|
||||||
|
LOG.warning(
|
||||||
|
f"`ao_adamw_4bit` will be deprecated soon. Please use `{OptimizerNames.ADAMW_TORCH_4BIT}` instead."
|
||||||
|
)
|
||||||
|
elif self.cfg.optimizer == "ao_adamw_8bit":
|
||||||
|
from torchao.prototype.low_bit_optim import AdamW8bit
|
||||||
|
|
||||||
|
optimizer_cls = AdamW8bit
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
elif self.cfg.optimizer == "ao_adamw_fp8":
|
||||||
|
from torchao.prototype.low_bit_optim import AdamWFp8
|
||||||
|
|
||||||
|
optimizer_cls = AdamWFp8
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
elif self.cfg.optimizer == "adopt_adamw":
|
||||||
|
from axolotl.utils.optimizers.adopt import ADOPT
|
||||||
|
|
||||||
|
optimizer_cls = ADOPT
|
||||||
|
adam_kwargs["decouple"] = True
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
elif self.cfg.optimizer == "came_pytorch":
|
||||||
|
from came_pytorch import CAME
|
||||||
|
|
||||||
|
optimizer_cls = CAME
|
||||||
|
|
||||||
|
beta1 = training_args_kwargs.get("adam_beta1", 0.9)
|
||||||
|
beta2 = training_args_kwargs.get("adam_beta2", 0.999)
|
||||||
|
beta3 = training_args_kwargs.get("adam_beta3", 0.9999)
|
||||||
|
eps1 = training_args_kwargs.get("adam_epsilon", 1e-30)
|
||||||
|
eps2 = training_args_kwargs.get("adam_epsilon2", 1e-16)
|
||||||
|
adam_kwargs["betas"] = (beta1, beta2, beta3)
|
||||||
|
adam_kwargs["eps"] = (eps1, eps2)
|
||||||
|
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unhandled optimizer: {self.cfg.optimizer}. Please raise an Issue."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse any additional optimizer args from config
|
||||||
|
if self.cfg.optim_args:
|
||||||
|
if isinstance(self.cfg.optim_args, dict):
|
||||||
|
optimizer_kwargs.update(self.cfg.optim_args)
|
||||||
|
else:
|
||||||
|
# Parse string format "key1=value1,key2=value2"
|
||||||
|
for mapping in self.cfg.optim_args.replace(" ", "").split(","):
|
||||||
|
key, value = mapping.split("=")
|
||||||
|
optimizer_kwargs[key] = value
|
||||||
|
|
||||||
|
# Note: This is not used in training_args_kwargs, but in trainer_kwargs
|
||||||
|
trainer_kwargs["optimizer_cls_and_kwargs"] = (
|
||||||
|
optimizer_cls,
|
||||||
|
optimizer_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle custom optimizer
|
||||||
|
custom_supported_optimizers = [opt.value for opt in CustomSupportedOptimizers]
|
||||||
|
if self.cfg.optimizer in custom_supported_optimizers:
|
||||||
|
_configure_custom_optimizer(training_args_kwargs, trainer_kwargs)
|
||||||
|
else:
|
||||||
|
# Use transformers' optimizer
|
||||||
|
training_args_kwargs["optim"] = self.cfg.optimizer
|
||||||
|
|
||||||
|
# Parse any additional optimizer args from config
|
||||||
|
if self.cfg.optim_args:
|
||||||
|
if isinstance(self.cfg.optim_args, dict):
|
||||||
|
optim_args = ",".join(
|
||||||
|
[f"{key}={value}" for key, value in self.cfg.optim_args.items()]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
optim_args = self.cfg.optim_args
|
||||||
|
training_args_kwargs["optim_args"] = optim_args
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.cfg.optimizer == "adamw_anyprecision"
|
||||||
|
and Path(self.cfg.torchdistx_path).exists()
|
||||||
|
):
|
||||||
|
sys.path.append(self.cfg.torchdistx_path)
|
||||||
|
importlib.import_module("torchdistx")
|
||||||
|
|
||||||
|
def _configure_hub_parameters(self, training_args_kwargs: dict):
|
||||||
|
if self.cfg.hub_model_id:
|
||||||
|
training_args_kwargs["hub_model_id"] = self.cfg.hub_model_id
|
||||||
|
training_args_kwargs["push_to_hub"] = True
|
||||||
|
training_args_kwargs["hub_private_repo"] = True
|
||||||
|
training_args_kwargs["hub_always_push"] = True
|
||||||
|
|
||||||
|
if self.cfg.hub_strategy:
|
||||||
|
training_args_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
||||||
|
|
||||||
|
def _configure_save_and_eval_strategy(self, training_args_kwargs: dict):
|
||||||
|
# save_strategy and save_steps
|
||||||
|
if self.cfg.save_steps:
|
||||||
|
training_args_kwargs["save_strategy"] = "steps"
|
||||||
|
training_args_kwargs["save_steps"] = self.cfg.save_steps
|
||||||
|
elif self.cfg.save_strategy:
|
||||||
|
training_args_kwargs["save_strategy"] = self.cfg.save_strategy
|
||||||
|
else:
|
||||||
|
# default to saving each epoch if not defined
|
||||||
|
training_args_kwargs["save_strategy"] = "epoch"
|
||||||
|
|
||||||
|
training_args_kwargs["save_total_limit"] = (
|
||||||
|
self.cfg.save_total_limit if self.cfg.save_total_limit else 4
|
||||||
|
)
|
||||||
|
|
||||||
|
# eval_strategy and eval_steps
|
||||||
|
if not self.eval_dataset and self.cfg.val_set_size == 0:
|
||||||
|
# do not eval if no eval_dataset and val_set_size=0
|
||||||
|
training_args_kwargs["eval_strategy"] = "no"
|
||||||
|
elif self.cfg.eval_steps:
|
||||||
|
training_args_kwargs["eval_strategy"] = "steps"
|
||||||
|
training_args_kwargs["eval_steps"] = self.cfg.eval_steps
|
||||||
|
training_args_kwargs["eval_on_start"] = True
|
||||||
|
elif self.cfg.eval_strategy:
|
||||||
|
training_args_kwargs["eval_strategy"] = self.cfg.eval_strategy
|
||||||
|
training_args_kwargs["eval_on_start"] = True
|
||||||
|
|
||||||
|
def _configure_reporting(self, training_args_kwargs: dict):
|
||||||
|
report_to = []
|
||||||
|
if self.cfg.use_wandb:
|
||||||
|
report_to.append("wandb")
|
||||||
|
if self.cfg.use_mlflow:
|
||||||
|
report_to.append("mlflow")
|
||||||
|
if self.cfg.use_tensorboard:
|
||||||
|
report_to.append("tensorboard")
|
||||||
|
if self.cfg.use_comet:
|
||||||
|
report_to.append("comet_ml")
|
||||||
|
|
||||||
|
training_args_kwargs["report_to"] = report_to
|
||||||
|
|
||||||
|
if self.cfg.use_wandb:
|
||||||
|
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||||
|
elif self.cfg.use_mlflow:
|
||||||
|
training_args_kwargs["run_name"] = self.cfg.mlflow_run_name
|
||||||
|
else:
|
||||||
|
training_args_kwargs["run_name"] = None
|
||||||
|
|
||||||
|
def _configure_torch_compile(self, training_args_kwargs: dict):
|
||||||
|
if self.cfg.torch_compile and getattr(torch, "_dynamo", None):
|
||||||
|
torch._dynamo.config.suppress_errors = ( # pylint: disable=protected-access
|
||||||
|
True
|
||||||
|
)
|
||||||
|
training_args_kwargs["torch_compile"] = self.cfg.torch_compile
|
||||||
|
if self.cfg.torch_compile_backend:
|
||||||
|
training_args_kwargs["torch_compile_backend"] = (
|
||||||
|
self.cfg.torch_compile_backend
|
||||||
|
)
|
||||||
|
if self.cfg.torch_compile_mode:
|
||||||
|
training_args_kwargs["torch_compile_mode"] = self.cfg.torch_compile_mode
|
||||||
|
|
||||||
|
def _configure_gradient_checkpointing(self, training_args_kwargs: dict):
|
||||||
|
if self.cfg.gradient_checkpointing:
|
||||||
|
training_args_kwargs["gradient_checkpointing"] = (
|
||||||
|
self.cfg.gradient_checkpointing
|
||||||
|
)
|
||||||
|
if self.cfg.gradient_checkpointing_kwargs is not None:
|
||||||
|
training_args_kwargs["gradient_checkpointing_kwargs"] = (
|
||||||
|
self.cfg.gradient_checkpointing_kwargs
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
training_args_kwargs["gradient_checkpointing_kwargs"] = {
|
||||||
|
"use_reentrant": False
|
||||||
|
}
|
||||||
|
|
||||||
|
def _set_base_training_args(
|
||||||
|
self, total_num_steps
|
||||||
|
) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||||
|
training_args_kwargs: dict[str, Any] = {}
|
||||||
|
trainer_kwargs: dict[str, Any] = {}
|
||||||
|
|
||||||
|
self._configure_warmup_and_logging(total_num_steps, training_args_kwargs)
|
||||||
|
self._configure_precision_settings(training_args_kwargs)
|
||||||
|
self._configure_save_and_eval_strategy(training_args_kwargs)
|
||||||
|
self._configure_gradient_checkpointing(training_args_kwargs)
|
||||||
|
|
||||||
|
# set arg into trainer_args_kwargs with same name if value not None
|
||||||
|
for arg in [
|
||||||
|
# optim/scheduler
|
||||||
|
"adam_beta1",
|
||||||
|
"adam_beta2",
|
||||||
|
"adam_beta3",
|
||||||
|
"adam_epsilon",
|
||||||
|
"adam_epsilon2",
|
||||||
|
"cosine_min_lr_ratio",
|
||||||
|
"cosine_constant_lr_ratio",
|
||||||
|
"optim_target_modules",
|
||||||
|
# trainer
|
||||||
|
"max_grad_norm",
|
||||||
|
"dataloader_num_workers",
|
||||||
|
"dataloader_pin_memory",
|
||||||
|
"dataloader_prefetch_factor",
|
||||||
|
"gradient_accumulation_steps",
|
||||||
|
"learning_rate",
|
||||||
|
"embedding_lr",
|
||||||
|
"embedding_lr_scale",
|
||||||
|
"lr_groups",
|
||||||
|
"loraplus_lr_ratio",
|
||||||
|
"loraplus_lr_embedding",
|
||||||
|
"output_dir",
|
||||||
|
"save_safetensors",
|
||||||
|
"save_only_model",
|
||||||
|
"include_tokens_per_second",
|
||||||
|
"weight_decay",
|
||||||
|
"seed",
|
||||||
|
]:
|
||||||
|
if hasattr(self.cfg, arg) and getattr(self.cfg, arg) is not None:
|
||||||
|
training_args_kwargs[arg] = getattr(self.cfg, arg)
|
||||||
|
|
||||||
|
training_args_kwargs["per_device_train_batch_size"] = self.cfg.micro_batch_size
|
||||||
|
|
||||||
|
if self.cfg.eval_batch_size:
|
||||||
|
training_args_kwargs["per_device_eval_batch_size"] = (
|
||||||
|
self.cfg.eval_batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
training_args_kwargs["max_steps"] = self.cfg.max_steps or total_num_steps or -1
|
||||||
|
training_args_kwargs["num_train_epochs"] = self.cfg.num_epochs
|
||||||
|
|
||||||
|
if self.cfg.dataset_processes:
|
||||||
|
training_args_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
|
||||||
|
|
||||||
|
# max_length is not used in CausalTrainer
|
||||||
|
if self.cfg.reward_model or self.cfg.rl:
|
||||||
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
|
|
||||||
|
self._configure_reporting(training_args_kwargs)
|
||||||
|
self._configure_hub_parameters(training_args_kwargs)
|
||||||
|
self._configure_scheduler(training_args_kwargs)
|
||||||
|
self._configure_optimizer(training_args_kwargs, trainer_kwargs)
|
||||||
|
self._configure_torch_compile(training_args_kwargs)
|
||||||
|
|
||||||
|
return training_args_kwargs, trainer_kwargs
|
||||||
488
src/axolotl/core/builders/causal.py
Normal file
488
src/axolotl/core/builders/causal.py
Normal file
@@ -0,0 +1,488 @@
|
|||||||
|
"""Builder for causal trainers"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Type, Union
|
||||||
|
|
||||||
|
import transformers
|
||||||
|
from transformers import (
|
||||||
|
DataCollatorWithFlattening,
|
||||||
|
EarlyStoppingCallback,
|
||||||
|
)
|
||||||
|
from trl.trainer.utils import RewardDataCollatorWithPadding
|
||||||
|
|
||||||
|
from axolotl.core.builders.base import TrainerBuilderBase
|
||||||
|
from axolotl.core.trainers import (
|
||||||
|
AxolotlMambaTrainer,
|
||||||
|
AxolotlPRMTrainer,
|
||||||
|
AxolotlRewardTrainer,
|
||||||
|
AxolotlTrainer,
|
||||||
|
ReLoRATrainer,
|
||||||
|
)
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
|
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
||||||
|
from axolotl.monkeypatch.relora import ReLoRACallback
|
||||||
|
from axolotl.processing_strategies import get_processing_strategy
|
||||||
|
from axolotl.utils import is_comet_available, is_mlflow_available
|
||||||
|
from axolotl.utils.callbacks import (
|
||||||
|
LossWatchDogCallback,
|
||||||
|
SaveBetterTransformerModelCallback,
|
||||||
|
bench_eval_callback_factory,
|
||||||
|
causal_lm_bench_eval_callback_factory,
|
||||||
|
colab_inference_post_train_callback,
|
||||||
|
log_prediction_callback_factory,
|
||||||
|
)
|
||||||
|
from axolotl.utils.callbacks.lisa import lisa_callback_factory
|
||||||
|
from axolotl.utils.callbacks.qat import QATCallback
|
||||||
|
from axolotl.utils.chat_templates import get_chat_template_from_config
|
||||||
|
from axolotl.utils.collators import (
|
||||||
|
BatchSamplerDataCollatorForSeq2Seq,
|
||||||
|
DataCollatorForSeq2Seq,
|
||||||
|
MambaDataCollator,
|
||||||
|
V2BatchSamplerDataCollatorForSeq2Seq,
|
||||||
|
)
|
||||||
|
from axolotl.utils.collators.mm_chat import MultiModalChatDataCollator
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class HFCausalTrainerBuilder(TrainerBuilderBase):
|
||||||
|
"""
|
||||||
|
Build the HuggingFace training args/trainer for causal models and reward modeling
|
||||||
|
using TRL.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_callbacks(self):
|
||||||
|
callbacks = super().get_callbacks()
|
||||||
|
|
||||||
|
if self.cfg.relora_steps:
|
||||||
|
callbacks.append(ReLoRACallback(self.cfg))
|
||||||
|
|
||||||
|
if (
|
||||||
|
hasattr(self.model, "use_bettertransformer")
|
||||||
|
and self.model.use_bettertransformer is True
|
||||||
|
):
|
||||||
|
callbacks.append(SaveBetterTransformerModelCallback())
|
||||||
|
|
||||||
|
# TODO: check if can move to base class
|
||||||
|
if self.cfg.loss_watchdog_threshold is not None:
|
||||||
|
callbacks.append(LossWatchDogCallback(self.cfg))
|
||||||
|
|
||||||
|
if self.cfg.qat:
|
||||||
|
callbacks.append(QATCallback(self.cfg.qat))
|
||||||
|
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
|
callbacks = []
|
||||||
|
if self.cfg.use_wandb and self.cfg.eval_table_size > 0:
|
||||||
|
LogPredictionCallback = log_prediction_callback_factory(
|
||||||
|
trainer, self.tokenizer, "wandb"
|
||||||
|
)
|
||||||
|
callbacks.append(LogPredictionCallback(self.cfg))
|
||||||
|
if (
|
||||||
|
self.cfg.use_mlflow
|
||||||
|
and is_mlflow_available()
|
||||||
|
and self.cfg.eval_table_size > 0
|
||||||
|
):
|
||||||
|
LogPredictionCallback = log_prediction_callback_factory(
|
||||||
|
trainer, self.tokenizer, "mlflow"
|
||||||
|
)
|
||||||
|
callbacks.append(LogPredictionCallback(self.cfg))
|
||||||
|
if self.cfg.use_comet and is_comet_available() and self.cfg.eval_table_size > 0:
|
||||||
|
LogPredictionCallback = log_prediction_callback_factory(
|
||||||
|
trainer, self.tokenizer, "comet_ml"
|
||||||
|
)
|
||||||
|
callbacks.append(LogPredictionCallback(self.cfg))
|
||||||
|
|
||||||
|
if self.cfg.do_bench_eval:
|
||||||
|
callbacks.append(bench_eval_callback_factory(trainer, self.tokenizer))
|
||||||
|
if self.cfg.do_causal_lm_eval:
|
||||||
|
CausalLMBenchEvalCallback = causal_lm_bench_eval_callback_factory(
|
||||||
|
trainer, self.tokenizer
|
||||||
|
)
|
||||||
|
callbacks.append(CausalLMBenchEvalCallback(self.cfg))
|
||||||
|
|
||||||
|
if self.cfg.early_stopping_patience:
|
||||||
|
early_stop_cb = EarlyStoppingCallback(
|
||||||
|
self.cfg.early_stopping_patience,
|
||||||
|
)
|
||||||
|
callbacks.append(early_stop_cb)
|
||||||
|
|
||||||
|
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
||||||
|
callbacks.append(lisa_callback_factory(trainer))
|
||||||
|
|
||||||
|
if any("COLAB_" in key for key in os.environ):
|
||||||
|
ColabCallback = colab_inference_post_train_callback(trainer)
|
||||||
|
callbacks.append(ColabCallback(self.cfg))
|
||||||
|
|
||||||
|
callbacks.extend(super().get_post_trainer_create_callbacks(trainer=trainer))
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def _get_trainer_cls(self):
|
||||||
|
"""
|
||||||
|
Gets the trainer class for the given configuration.
|
||||||
|
"""
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
trainer_cls = plugin_manager.get_trainer_cls(self.cfg)
|
||||||
|
if trainer_cls:
|
||||||
|
return trainer_cls
|
||||||
|
if self.cfg.relora_steps:
|
||||||
|
return ReLoRATrainer
|
||||||
|
if self.cfg.model_config_type == "mamba":
|
||||||
|
return AxolotlMambaTrainer
|
||||||
|
if self.cfg.reward_model:
|
||||||
|
return AxolotlRewardTrainer
|
||||||
|
if self.cfg.process_reward_model:
|
||||||
|
return AxolotlPRMTrainer
|
||||||
|
return AxolotlTrainer
|
||||||
|
|
||||||
|
def build(self, total_num_steps):
|
||||||
|
from axolotl.core.training_args import (
|
||||||
|
AxolotlPRMConfig,
|
||||||
|
AxolotlRewardConfig,
|
||||||
|
AxolotlTrainingArguments,
|
||||||
|
)
|
||||||
|
|
||||||
|
training_arguments_kwargs, trainer_kwargs = self._set_base_training_args(
|
||||||
|
total_num_steps
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.fsdp:
|
||||||
|
training_arguments_kwargs["fsdp"] = self.cfg.fsdp
|
||||||
|
if self.cfg.fsdp_config:
|
||||||
|
training_arguments_kwargs["fsdp_config"] = {
|
||||||
|
k.lstrip("fsdp_"): v for k, v in dict(self.cfg.fsdp_config).items()
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.cfg.adapter == "qlora":
|
||||||
|
training_arguments_kwargs["qlora"] = True
|
||||||
|
|
||||||
|
# deepspeed
|
||||||
|
if self.cfg.deepspeed:
|
||||||
|
training_arguments_kwargs["deepspeed"] = self.cfg.deepspeed
|
||||||
|
|
||||||
|
if self.cfg.lr_quadratic_warmup is not None:
|
||||||
|
training_arguments_kwargs["lr_quadratic_warmup"] = (
|
||||||
|
self.cfg.lr_quadratic_warmup
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.dataloader_drop_last is not None:
|
||||||
|
training_arguments_kwargs["dataloader_drop_last"] = (
|
||||||
|
self.cfg.dataloader_drop_last
|
||||||
|
)
|
||||||
|
elif self.cfg.sample_packing and self.cfg.eval_sample_packing is False:
|
||||||
|
training_arguments_kwargs["dataloader_drop_last"] = True
|
||||||
|
|
||||||
|
if self.cfg.remove_unused_columns is not None:
|
||||||
|
training_arguments_kwargs["remove_unused_columns"] = (
|
||||||
|
self.cfg.remove_unused_columns
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.do_bench_eval:
|
||||||
|
training_arguments_kwargs["do_bench_eval"] = self.cfg.do_bench_eval
|
||||||
|
if self.cfg.bench_dataset:
|
||||||
|
training_arguments_kwargs["bench_dataset"] = self.cfg.bench_dataset
|
||||||
|
if self.cfg.do_causal_lm_eval:
|
||||||
|
training_arguments_kwargs["do_causal_lm_eval"] = self.cfg.do_causal_lm_eval
|
||||||
|
if self.cfg.metric_for_best_model:
|
||||||
|
training_arguments_kwargs["metric_for_best_model"] = (
|
||||||
|
self.cfg.metric_for_best_model
|
||||||
|
)
|
||||||
|
if self.cfg.greater_is_better:
|
||||||
|
training_arguments_kwargs["greater_is_better"] = self.cfg.greater_is_better
|
||||||
|
|
||||||
|
# DDP Config
|
||||||
|
if self.cfg.ddp_timeout:
|
||||||
|
training_arguments_kwargs["ddp_timeout"] = self.cfg.ddp_timeout
|
||||||
|
# see https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html
|
||||||
|
if self.cfg.ddp_bucket_cap_mb:
|
||||||
|
training_arguments_kwargs["ddp_bucket_cap_mb"] = self.cfg.ddp_bucket_cap_mb
|
||||||
|
if self.cfg.ddp_broadcast_buffers is not None:
|
||||||
|
training_arguments_kwargs["ddp_broadcast_buffers"] = (
|
||||||
|
self.cfg.ddp_broadcast_buffers
|
||||||
|
)
|
||||||
|
|
||||||
|
# these are all the "standard" kwargs that are def used
|
||||||
|
training_arguments_kwargs["max_seq_length"] = self.cfg.sequence_len
|
||||||
|
|
||||||
|
if self.cfg.auto_find_batch_size is not None:
|
||||||
|
training_arguments_kwargs["auto_find_batch_size"] = (
|
||||||
|
self.cfg.auto_find_batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
training_arguments_kwargs["eval_accumulation_steps"] = (
|
||||||
|
self.cfg.gradient_accumulation_steps
|
||||||
|
)
|
||||||
|
|
||||||
|
training_arguments_kwargs["load_best_model_at_end"] = (
|
||||||
|
(
|
||||||
|
self.cfg.load_best_model_at_end is not False
|
||||||
|
or self.cfg.early_stopping_patience
|
||||||
|
)
|
||||||
|
and (
|
||||||
|
(not self.cfg.test_datasets and self.cfg.val_set_size > 0)
|
||||||
|
or (self.cfg.test_datasets and self.cfg.val_set_size == 0)
|
||||||
|
)
|
||||||
|
and self.cfg.save_steps
|
||||||
|
and self.cfg.eval_steps
|
||||||
|
and self.cfg.save_steps % self.cfg.eval_steps == 0
|
||||||
|
) or False
|
||||||
|
|
||||||
|
# handle ddp
|
||||||
|
ddp_find_unused_parameters = None
|
||||||
|
if self.cfg.ddp:
|
||||||
|
ddp_find_unused_parameters = bool(self.cfg.ddp_find_unused_parameters)
|
||||||
|
training_arguments_kwargs["ddp_find_unused_parameters"] = (
|
||||||
|
ddp_find_unused_parameters
|
||||||
|
)
|
||||||
|
|
||||||
|
training_arguments_kwargs["group_by_length"] = self.cfg.group_by_length
|
||||||
|
training_arguments_kwargs["curriculum_sampling"] = self.cfg.curriculum_sampling
|
||||||
|
|
||||||
|
training_arguments_kwargs["sample_packing"] = bool(self.cfg.sample_packing)
|
||||||
|
training_arguments_kwargs["multipack_real_batches"] = (
|
||||||
|
self.cfg.multipack_real_batches
|
||||||
|
if self.cfg.multipack_real_batches is not None
|
||||||
|
else not self.cfg.flash_attention
|
||||||
|
)
|
||||||
|
training_arguments_kwargs["eval_sample_packing"] = bool(
|
||||||
|
self.cfg.eval_sample_packing
|
||||||
|
)
|
||||||
|
if self.cfg.sample_packing_bin_size is not None:
|
||||||
|
training_arguments_kwargs["sample_packing_bin_size"] = (
|
||||||
|
self.cfg.sample_packing_bin_size
|
||||||
|
)
|
||||||
|
if self.cfg.sample_packing_group_size is not None:
|
||||||
|
training_arguments_kwargs["sample_packing_group_size"] = (
|
||||||
|
self.cfg.sample_packing_group_size
|
||||||
|
)
|
||||||
|
if self.cfg.sample_packing_eff_est:
|
||||||
|
training_arguments_kwargs["sample_packing_efficiency"] = (
|
||||||
|
self.cfg.sample_packing_eff_est
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.relora_steps:
|
||||||
|
training_arguments_kwargs["relora_steps"] = self.cfg.relora_steps
|
||||||
|
training_arguments_kwargs["relora_warmup_steps"] = (
|
||||||
|
self.cfg.relora_warmup_steps
|
||||||
|
)
|
||||||
|
if self.cfg.relora_anneal_steps:
|
||||||
|
training_arguments_kwargs["relora_anneal_steps"] = (
|
||||||
|
self.cfg.relora_anneal_steps
|
||||||
|
)
|
||||||
|
if self.cfg.relora_prune_ratio:
|
||||||
|
training_arguments_kwargs["relora_prune_ratio"] = (
|
||||||
|
self.cfg.relora_prune_ratio
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
||||||
|
training_arguments_kwargs["lisa_n_layers"] = self.cfg.lisa_n_layers
|
||||||
|
training_arguments_kwargs["lisa_step_interval"] = (
|
||||||
|
self.cfg.lisa_step_interval
|
||||||
|
)
|
||||||
|
training_arguments_kwargs["lisa_layers_attribute"] = (
|
||||||
|
self.cfg.lisa_layers_attribute
|
||||||
|
)
|
||||||
|
|
||||||
|
training_arguments_kwargs = self.hook_pre_create_training_args(
|
||||||
|
training_arguments_kwargs
|
||||||
|
)
|
||||||
|
training_arguments_kwargs["model_type"] = self.cfg.model_config_type
|
||||||
|
training_arguments_kwargs["pretraining"] = bool(self.cfg.pretraining_dataset)
|
||||||
|
if self.cfg.chat_template:
|
||||||
|
training_arguments_kwargs["chat_template"] = get_chat_template_from_config(
|
||||||
|
cfg=self.cfg,
|
||||||
|
tokenizer=self.tokenizer,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.neftune_noise_alpha is not None:
|
||||||
|
training_arguments_kwargs["neftune_noise_alpha"] = (
|
||||||
|
self.cfg.neftune_noise_alpha
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.accelerator_config:
|
||||||
|
training_arguments_kwargs["accelerator_config"] = (
|
||||||
|
self.cfg.accelerator_config
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.image_size:
|
||||||
|
training_arguments_kwargs["image_size"] = self.cfg.image_size
|
||||||
|
if self.cfg.image_resize_algorithm:
|
||||||
|
training_arguments_kwargs["image_resize_algorithm"] = (
|
||||||
|
self.cfg.image_resize_algorithm
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
plugin_training_args = plugin_manager.get_training_args(self.cfg)
|
||||||
|
if plugin_training_args:
|
||||||
|
training_arguments_kwargs.update(plugin_training_args)
|
||||||
|
|
||||||
|
if self.cfg.reward_model:
|
||||||
|
training_args_cls = AxolotlRewardConfig
|
||||||
|
elif self.cfg.process_reward_model:
|
||||||
|
training_args_cls = AxolotlPRMConfig
|
||||||
|
else:
|
||||||
|
training_args_cls = AxolotlTrainingArguments
|
||||||
|
training_args = training_args_cls( # pylint: disable=unexpected-keyword-arg
|
||||||
|
**training_arguments_kwargs,
|
||||||
|
)
|
||||||
|
training_args = self.hook_post_create_training_args(training_args)
|
||||||
|
|
||||||
|
# unset run_name so wandb sets up experiment names
|
||||||
|
if self.cfg.use_wandb and training_args.run_name == training_args.output_dir:
|
||||||
|
training_args.run_name = ( # pylint: disable=attribute-defined-outside-init
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
|
data_collator_kwargs = {
|
||||||
|
"padding": True, # True/"longest" is the default
|
||||||
|
}
|
||||||
|
multiple = 64
|
||||||
|
if self.cfg.pad_to_sequence_len:
|
||||||
|
data_collator_kwargs["pad_to_multiple_of"] = multiple * math.ceil(
|
||||||
|
self.cfg.sequence_len / multiple
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
|
||||||
|
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
||||||
|
data_collator_kwargs["pad_to_multiple_of"] = multiple
|
||||||
|
|
||||||
|
trainer_cls = self._get_trainer_cls()
|
||||||
|
|
||||||
|
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
||||||
|
trainer_kwargs, trainer_cls
|
||||||
|
)
|
||||||
|
if eval_data_collator := self.build_collator(
|
||||||
|
training_args, is_eval=True, **data_collator_kwargs
|
||||||
|
):
|
||||||
|
if not (self.cfg.reward_model or self.cfg.process_reward_model):
|
||||||
|
trainer_kwargs["eval_data_collator"] = eval_data_collator
|
||||||
|
if not (self.cfg.reward_model or self.cfg.process_reward_model):
|
||||||
|
trainer_kwargs["bench_data_collator"] = transformers.DataCollatorForSeq2Seq(
|
||||||
|
self.tokenizer,
|
||||||
|
return_tensors="pt",
|
||||||
|
**data_collator_kwargs,
|
||||||
|
)
|
||||||
|
sig = inspect.signature(trainer_cls)
|
||||||
|
if "processing_class" in sig.parameters:
|
||||||
|
trainer_kwargs["processing_class"] = self.tokenizer
|
||||||
|
elif "tokenizer" in sig.parameters:
|
||||||
|
trainer_kwargs["tokenizer"] = self.tokenizer
|
||||||
|
if (
|
||||||
|
trainer_cls not in [AxolotlRewardTrainer, AxolotlPRMTrainer]
|
||||||
|
and self.cfg.datasets is not None
|
||||||
|
):
|
||||||
|
trainer_kwargs["dataset_tags"] = [
|
||||||
|
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
||||||
|
]
|
||||||
|
trainer = trainer_cls(
|
||||||
|
model=self.model,
|
||||||
|
train_dataset=self.train_dataset,
|
||||||
|
eval_dataset=self.eval_dataset,
|
||||||
|
args=training_args,
|
||||||
|
data_collator=self.build_collator(training_args, **data_collator_kwargs),
|
||||||
|
callbacks=self.get_callbacks(),
|
||||||
|
**trainer_kwargs,
|
||||||
|
)
|
||||||
|
trainer = self.hook_post_create_trainer(trainer)
|
||||||
|
for callback in self.get_post_trainer_create_callbacks(trainer):
|
||||||
|
trainer.add_callback(callback)
|
||||||
|
|
||||||
|
if self.cfg.deepspeed and self.cfg.sample_packing:
|
||||||
|
trainer.accelerator.state.deepspeed_plugin.deepspeed_config[
|
||||||
|
"train_micro_batch_size_per_gpu"
|
||||||
|
] = self.cfg.micro_batch_size
|
||||||
|
|
||||||
|
return trainer
|
||||||
|
|
||||||
|
def build_collator(
|
||||||
|
self,
|
||||||
|
training_args, # type: "AxolotlTrainingArguments" # type: ignore
|
||||||
|
is_eval=False,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
if training_args.pretraining:
|
||||||
|
if (
|
||||||
|
self.cfg.pretraining_sample_concatenation is False
|
||||||
|
or self.cfg.micro_batch_size > 1
|
||||||
|
):
|
||||||
|
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
|
||||||
|
return None
|
||||||
|
|
||||||
|
if self.cfg.model_config_type == "mamba":
|
||||||
|
return MambaDataCollator(tokenizer=self.tokenizer)
|
||||||
|
|
||||||
|
use_batch_sampler_collator = False
|
||||||
|
if is_eval is False and training_args.sample_packing:
|
||||||
|
use_batch_sampler_collator = True
|
||||||
|
if is_eval and training_args.eval_sample_packing:
|
||||||
|
use_batch_sampler_collator = True
|
||||||
|
|
||||||
|
collator: Type[
|
||||||
|
Union[
|
||||||
|
V2BatchSamplerDataCollatorForSeq2Seq,
|
||||||
|
BatchSamplerDataCollatorForSeq2Seq,
|
||||||
|
DataCollatorForSeq2Seq,
|
||||||
|
DataCollatorWithFlattening,
|
||||||
|
RewardDataCollatorWithPadding,
|
||||||
|
]
|
||||||
|
]
|
||||||
|
collator_args = [self.tokenizer]
|
||||||
|
|
||||||
|
collator_cls_and_kwargs = None
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
collator_cls_and_kwargs = plugin_manager.get_collator_cls_and_kwargs(
|
||||||
|
self.cfg, is_eval=is_eval
|
||||||
|
)
|
||||||
|
|
||||||
|
if collator_cls_and_kwargs:
|
||||||
|
collator = collator_cls_and_kwargs[0]
|
||||||
|
if kwargs and isinstance(kwargs, dict):
|
||||||
|
kwargs.update(collator_cls_and_kwargs[1])
|
||||||
|
elif self.cfg.reward_model:
|
||||||
|
collator = RewardDataCollatorWithPadding
|
||||||
|
elif use_batch_sampler_collator:
|
||||||
|
# Use V2BatchSamplerDataCollatorForSeq2Seq for flex attention,
|
||||||
|
# supported multipack models, or non-flash-attention llama
|
||||||
|
if (
|
||||||
|
self.cfg.flex_attention
|
||||||
|
or self.cfg.model_config_type in SUPPORTED_MULTIPACK_MODEL_TYPES
|
||||||
|
or (
|
||||||
|
self.cfg.model_config_type in ["llama"]
|
||||||
|
and self.cfg.flash_attention is not True
|
||||||
|
)
|
||||||
|
):
|
||||||
|
collator = V2BatchSamplerDataCollatorForSeq2Seq
|
||||||
|
else:
|
||||||
|
collator = BatchSamplerDataCollatorForSeq2Seq
|
||||||
|
else:
|
||||||
|
if self.cfg.processor_type and self.processor:
|
||||||
|
collator = MultiModalChatDataCollator
|
||||||
|
kwargs["processing_strategy"] = get_processing_strategy(
|
||||||
|
self.processor,
|
||||||
|
training_args.chat_template,
|
||||||
|
self.cfg.chat_template,
|
||||||
|
image_size=training_args.image_size,
|
||||||
|
image_resize_algorithm=training_args.image_resize_algorithm,
|
||||||
|
)
|
||||||
|
elif self.cfg.batch_flattening:
|
||||||
|
collator = DataCollatorWithFlattening
|
||||||
|
collator_args.pop(0)
|
||||||
|
kwargs.pop("pad_to_multiple_of", None)
|
||||||
|
kwargs.pop("padding", None)
|
||||||
|
else:
|
||||||
|
collator = DataCollatorForSeq2Seq
|
||||||
|
|
||||||
|
kwargs["return_tensors"] = "pt"
|
||||||
|
|
||||||
|
return collator(
|
||||||
|
*collator_args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
238
src/axolotl/core/builders/rl.py
Normal file
238
src/axolotl/core/builders/rl.py
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
"""Builder for RLHF trainers"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from axolotl.core.builders.base import TrainerBuilderBase
|
||||||
|
from axolotl.core.trainers import (
|
||||||
|
AxolotlCPOTrainer,
|
||||||
|
AxolotlKTOTrainer,
|
||||||
|
AxolotlORPOTrainer,
|
||||||
|
)
|
||||||
|
from axolotl.core.trainers.dpo import DPOStrategy
|
||||||
|
from axolotl.core.trainers.dpo.args import AxolotlDPOConfig
|
||||||
|
from axolotl.core.trainers.grpo import GRPOStrategy
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
|
from axolotl.loaders.utils import ensure_dtype
|
||||||
|
from axolotl.utils.callbacks.qat import QATCallback
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
from axolotl.utils.schemas.enums import RLType
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||||
|
"""Trainer factory class for TRL-based RLHF trainers (e.g. DPO)"""
|
||||||
|
|
||||||
|
def get_callbacks(self):
|
||||||
|
callbacks = super().get_callbacks()
|
||||||
|
|
||||||
|
if self.cfg.qat:
|
||||||
|
callbacks.append(QATCallback(self.cfg.qat))
|
||||||
|
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
|
callbacks = super().get_post_trainer_create_callbacks(trainer=trainer)
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def _get_trainer_cls(self, trainer_kwargs: dict):
|
||||||
|
"""
|
||||||
|
Returns trainer_cls and trainer_cls_args
|
||||||
|
"""
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
trainer_cls = plugin_manager.get_trainer_cls(self.cfg)
|
||||||
|
trainer_cls_args = [] # type: ignore
|
||||||
|
|
||||||
|
if trainer_cls is not None:
|
||||||
|
return trainer_cls, trainer_cls_args
|
||||||
|
|
||||||
|
trainer_cls = None
|
||||||
|
trainer_cls_args = [self.model]
|
||||||
|
|
||||||
|
if self.cfg.rl is RLType.GRPO:
|
||||||
|
trainer_cls = GRPOStrategy.get_trainer_class(
|
||||||
|
sequence_parallel=self.cfg.sequence_parallel_degree > 1
|
||||||
|
)
|
||||||
|
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
||||||
|
|
||||||
|
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
||||||
|
|
||||||
|
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
||||||
|
trainer_cls = DPOStrategy.get_trainer_class()
|
||||||
|
trainer_cls_args.append(self.model_ref)
|
||||||
|
|
||||||
|
elif self.cfg.rl is RLType.ORPO:
|
||||||
|
trainer_cls = AxolotlORPOTrainer
|
||||||
|
elif self.cfg.rl is RLType.KTO:
|
||||||
|
trainer_cls = AxolotlKTOTrainer
|
||||||
|
elif self.cfg.rl is RLType.SIMPO:
|
||||||
|
trainer_cls = AxolotlCPOTrainer
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported RL: {self.cfg.rl}")
|
||||||
|
|
||||||
|
return trainer_cls, trainer_cls_args
|
||||||
|
|
||||||
|
def _build_training_arguments(self, total_num_steps):
|
||||||
|
"""
|
||||||
|
Returns training_args and trainer_kwargs
|
||||||
|
"""
|
||||||
|
from axolotl.core.training_args import (
|
||||||
|
AxolotlCPOConfig,
|
||||||
|
AxolotlKTOConfig,
|
||||||
|
AxolotlORPOConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
training_args_kwargs, trainer_kwargs = self._set_base_training_args(
|
||||||
|
total_num_steps=total_num_steps
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.remove_unused_columns is not None:
|
||||||
|
training_args_kwargs["remove_unused_columns"] = (
|
||||||
|
self.cfg.remove_unused_columns
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
training_args_kwargs["remove_unused_columns"] = False
|
||||||
|
|
||||||
|
if self.cfg.trl and self.cfg.trl.beta is not None:
|
||||||
|
training_args_kwargs["beta"] = self.cfg.trl.beta
|
||||||
|
elif self.cfg.rl_beta is not None:
|
||||||
|
training_args_kwargs["beta"] = self.cfg.rl_beta
|
||||||
|
elif self.cfg.orpo_alpha is not None:
|
||||||
|
# trl does some odd mapping of alpha to beta to reuse the beta parameter ???
|
||||||
|
training_args_kwargs["beta"] = self.cfg.orpo_alpha
|
||||||
|
|
||||||
|
if self.cfg.rpo_alpha is not None:
|
||||||
|
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
||||||
|
|
||||||
|
if self.cfg.use_wandb:
|
||||||
|
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||||
|
|
||||||
|
training_args_cls = None
|
||||||
|
blocklist_args_kwargs = []
|
||||||
|
if self.cfg.rl is RLType.SIMPO:
|
||||||
|
training_args_cls = AxolotlCPOConfig
|
||||||
|
training_args_kwargs["loss_type"] = "simpo"
|
||||||
|
training_args_kwargs["simpo_gamma"] = self.cfg.simpo_gamma
|
||||||
|
if self.cfg.cpo_alpha is not None:
|
||||||
|
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
||||||
|
|
||||||
|
elif self.cfg.rl is RLType.ORPO:
|
||||||
|
training_args_cls = AxolotlORPOConfig
|
||||||
|
if self.cfg.max_prompt_len:
|
||||||
|
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||||
|
|
||||||
|
elif self.cfg.rl is RLType.KTO:
|
||||||
|
training_args_cls = AxolotlKTOConfig
|
||||||
|
|
||||||
|
training_args_kwargs["desirable_weight"] = (
|
||||||
|
self.cfg.kto_desirable_weight or 1.0
|
||||||
|
)
|
||||||
|
training_args_kwargs["undesirable_weight"] = (
|
||||||
|
self.cfg.kto_undesirable_weight or 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.cfg.max_prompt_len:
|
||||||
|
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||||
|
|
||||||
|
elif self.cfg.rl is RLType.GRPO:
|
||||||
|
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||||
|
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||||
|
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||||
|
|
||||||
|
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
||||||
|
training_args_cls = AxolotlDPOConfig
|
||||||
|
training_args_kwargs.update(DPOStrategy.set_training_args_kwargs(self.cfg))
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported RL: {self.cfg.rl}")
|
||||||
|
|
||||||
|
for blocklist_key in blocklist_args_kwargs:
|
||||||
|
if blocklist_key in training_args_kwargs:
|
||||||
|
del training_args_kwargs[blocklist_key]
|
||||||
|
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
plugin_training_args = plugin_manager.get_training_args(self.cfg)
|
||||||
|
if plugin_training_args:
|
||||||
|
training_args_kwargs.update(plugin_training_args)
|
||||||
|
|
||||||
|
training_args = training_args_cls( # pylint: disable=unexpected-keyword-arg
|
||||||
|
logging_first_step=True,
|
||||||
|
**training_args_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# unset run_name so wandb sets up experiment names
|
||||||
|
if self.cfg.use_wandb and training_args.run_name == training_args.output_dir:
|
||||||
|
training_args.run_name = ( # pylint: disable=attribute-defined-outside-init
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
|
return training_args, trainer_kwargs
|
||||||
|
|
||||||
|
def build(self, total_num_steps):
|
||||||
|
training_args, trainer_kwargs = self._build_training_arguments(total_num_steps)
|
||||||
|
|
||||||
|
if self.eval_dataset:
|
||||||
|
trainer_kwargs["eval_dataset"] = self.eval_dataset
|
||||||
|
if self.cfg.adapter and self.peft_config and self.cfg.rl is not RLType.GRPO:
|
||||||
|
trainer_kwargs["peft_config"] = self.peft_config
|
||||||
|
if self.cfg.precompute_ref_log_probs is not None:
|
||||||
|
trainer_kwargs["precompute_ref_log_probs"] = (
|
||||||
|
self.cfg.precompute_ref_log_probs
|
||||||
|
)
|
||||||
|
|
||||||
|
trainer_cls, trainer_cls_args = self._get_trainer_cls(trainer_kwargs)
|
||||||
|
|
||||||
|
sig = inspect.signature(trainer_cls)
|
||||||
|
if "tokenizer" in sig.parameters:
|
||||||
|
trainer_kwargs["tokenizer"] = self.tokenizer
|
||||||
|
else:
|
||||||
|
trainer_kwargs["processing_class"] = self.tokenizer
|
||||||
|
|
||||||
|
if self.cfg.datasets is not None and (
|
||||||
|
trainer_cls is DPOStrategy.get_trainer_class()
|
||||||
|
):
|
||||||
|
trainer_kwargs["dataset_tags"] = [
|
||||||
|
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
||||||
|
]
|
||||||
|
|
||||||
|
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
||||||
|
trainer_kwargs, trainer_cls
|
||||||
|
)
|
||||||
|
|
||||||
|
trainer = trainer_cls(
|
||||||
|
*trainer_cls_args,
|
||||||
|
args=training_args,
|
||||||
|
train_dataset=self.train_dataset,
|
||||||
|
callbacks=self.get_callbacks(),
|
||||||
|
**trainer_kwargs,
|
||||||
|
)
|
||||||
|
if self.cfg.fsdp:
|
||||||
|
ensure_dtype(trainer.model, dtype=self.cfg.torch_dtype)
|
||||||
|
if self.cfg.rl in [RLType.DPO, RLType.IPO] and trainer.ref_model:
|
||||||
|
ensure_dtype(trainer.ref_model, dtype=self.cfg.torch_dtype)
|
||||||
|
|
||||||
|
trainer = self.hook_post_create_trainer(trainer)
|
||||||
|
for callback in self.get_post_trainer_create_callbacks(trainer):
|
||||||
|
trainer.add_callback(callback)
|
||||||
|
|
||||||
|
return trainer
|
||||||
|
|
||||||
|
|
||||||
|
class HFPPOTrainerBuilder(TrainerBuilderBase):
|
||||||
|
"""
|
||||||
|
HF Factory class for PPO Trainer
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_callbacks(self):
|
||||||
|
callbacks = super().get_callbacks()
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
|
callbacks = super().get_post_trainer_create_callbacks(trainer=trainer)
|
||||||
|
return callbacks
|
||||||
|
|
||||||
|
def build(self, total_num_steps):
|
||||||
|
# TODO: build PPOConfig
|
||||||
|
raise NotImplementedError("PPO trainer builder is not implemented yet.")
|
||||||
@@ -156,7 +156,6 @@ class Messages(BaseModel):
|
|||||||
len(input_ids) : len(input_ids) + len(pending_input_ids)
|
len(input_ids) : len(input_ids) + len(pending_input_ids)
|
||||||
]
|
]
|
||||||
if new_pending_inputs != pending_input_ids:
|
if new_pending_inputs != pending_input_ids:
|
||||||
# logging.warning("tokenization mismatch from concatenation.")
|
|
||||||
pending_input_ids = new_pending_inputs
|
pending_input_ids = new_pending_inputs
|
||||||
input_ids.extend(pending_input_ids)
|
input_ids.extend(pending_input_ids)
|
||||||
if pending_weight:
|
if pending_weight:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -4,11 +4,10 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from functools import wraps
|
from functools import partial, wraps
|
||||||
from typing import Literal
|
from typing import Callable, Literal, Optional
|
||||||
|
|
||||||
import datasets
|
import datasets
|
||||||
import torch
|
import torch
|
||||||
@@ -26,6 +25,7 @@ from trl.trainer.utils import pad_to_length
|
|||||||
from typing_extensions import override
|
from typing_extensions import override
|
||||||
|
|
||||||
from axolotl.core.trainers.mixins import (
|
from axolotl.core.trainers.mixins import (
|
||||||
|
CheckpointSaveMixin,
|
||||||
OptimizerMixin,
|
OptimizerMixin,
|
||||||
RngLoaderMixin,
|
RngLoaderMixin,
|
||||||
SchedulerMixin,
|
SchedulerMixin,
|
||||||
@@ -34,12 +34,16 @@ from axolotl.core.trainers.utils import (
|
|||||||
sanitize_kwargs_for_ds_tagging,
|
sanitize_kwargs_for_ds_tagging,
|
||||||
sanitize_kwargs_for_tagging,
|
sanitize_kwargs_for_tagging,
|
||||||
)
|
)
|
||||||
|
from axolotl.utils import get_not_null
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.samplers import MultipackBatchSampler, get_dataset_lengths
|
from axolotl.utils.samplers import MultipackBatchSampler, get_dataset_lengths
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class AxolotlTrainer(SchedulerMixin, OptimizerMixin, RngLoaderMixin, Trainer):
|
class AxolotlTrainer(
|
||||||
|
SchedulerMixin, OptimizerMixin, RngLoaderMixin, CheckpointSaveMixin, Trainer
|
||||||
|
):
|
||||||
"""Extend the base Trainer for axolotl helpers"""
|
"""Extend the base Trainer for axolotl helpers"""
|
||||||
|
|
||||||
args = None # type: "AxolotlTrainingArguments" # type: ignore[name-defined]
|
args = None # type: "AxolotlTrainingArguments" # type: ignore[name-defined]
|
||||||
@@ -101,7 +105,7 @@ class AxolotlTrainer(SchedulerMixin, OptimizerMixin, RngLoaderMixin, Trainer):
|
|||||||
)
|
)
|
||||||
batch_max_len = train_batch_size * self.args.max_seq_length
|
batch_max_len = train_batch_size * self.args.max_seq_length
|
||||||
|
|
||||||
return MultipackBatchSampler(
|
sampler = MultipackBatchSampler(
|
||||||
base_sampler,
|
base_sampler,
|
||||||
lengths=get_dataset_lengths(dataset),
|
lengths=get_dataset_lengths(dataset),
|
||||||
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
||||||
@@ -111,9 +115,15 @@ class AxolotlTrainer(SchedulerMixin, OptimizerMixin, RngLoaderMixin, Trainer):
|
|||||||
bin_size=self.args.sample_packing_bin_size,
|
bin_size=self.args.sample_packing_bin_size,
|
||||||
sequential=self.args.sample_packing_sequentially,
|
sequential=self.args.sample_packing_sequentially,
|
||||||
drop_last=True,
|
drop_last=True,
|
||||||
|
num_processes=self.args.dataset_num_proc,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_train_sampler(self) -> Sampler | None:
|
len(sampler)
|
||||||
|
return sampler
|
||||||
|
|
||||||
|
def _get_train_sampler(
|
||||||
|
self, train_dataset: Optional[Dataset] = None
|
||||||
|
) -> Optional[Sampler]:
|
||||||
"""
|
"""
|
||||||
Helper method to get the sampler for training. Handles cases for sample packing
|
Helper method to get the sampler for training. Handles cases for sample packing
|
||||||
and curriculum sampling (sequential).
|
and curriculum sampling (sequential).
|
||||||
@@ -137,7 +147,7 @@ class AxolotlTrainer(SchedulerMixin, OptimizerMixin, RngLoaderMixin, Trainer):
|
|||||||
if use_sample_packing:
|
if use_sample_packing:
|
||||||
return self._create_multipack_sampler(
|
return self._create_multipack_sampler(
|
||||||
base_sampler=base_sampler,
|
base_sampler=base_sampler,
|
||||||
dataset=self.train_dataset,
|
dataset=train_dataset,
|
||||||
)
|
)
|
||||||
|
|
||||||
return base_sampler
|
return base_sampler
|
||||||
@@ -150,8 +160,6 @@ class AxolotlTrainer(SchedulerMixin, OptimizerMixin, RngLoaderMixin, Trainer):
|
|||||||
If the dataset is non-empty, a sampler is returned, the type of which
|
If the dataset is non-empty, a sampler is returned, the type of which
|
||||||
depends on the passed training args.
|
depends on the passed training args.
|
||||||
"""
|
"""
|
||||||
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
|
|
||||||
|
|
||||||
# Multipacking enabled if training is enabled and eval is not explicitly disabled
|
# Multipacking enabled if training is enabled and eval is not explicitly disabled
|
||||||
use_multipack = (
|
use_multipack = (
|
||||||
self.args.sample_packing and self.args.eval_sample_packing is not False
|
self.args.sample_packing and self.args.eval_sample_packing is not False
|
||||||
@@ -172,125 +180,93 @@ class AxolotlTrainer(SchedulerMixin, OptimizerMixin, RngLoaderMixin, Trainer):
|
|||||||
|
|
||||||
return base_sampler
|
return base_sampler
|
||||||
|
|
||||||
def _create_dataloader_params(self, is_eval=False, custom_batch_size=None):
|
def _get_dataloader(
|
||||||
"""Create common dataloader parameters for train or eval."""
|
self,
|
||||||
batch_size = custom_batch_size or (
|
dataset: Dataset,
|
||||||
self.args.eval_batch_size if is_eval else self._train_batch_size
|
description: str,
|
||||||
)
|
batch_size: int,
|
||||||
|
sampler_fn: Optional[Callable[[Dataset], torch.utils.data.Sampler]] = None,
|
||||||
|
is_training: bool = False,
|
||||||
|
dataloader_key: Optional[str] = None,
|
||||||
|
) -> DataLoader:
|
||||||
|
"""Create a [`~torch.utils.data.DataLoader`] from the given dataset."""
|
||||||
|
|
||||||
params = {
|
data_collator = self.data_collator if is_training else self.eval_data_collator
|
||||||
|
|
||||||
|
if dataset.column_names and "length" in dataset.column_names:
|
||||||
|
dataset = dataset.remove_columns(["length"])
|
||||||
|
|
||||||
|
if isinstance(dataset, datasets.Dataset):
|
||||||
|
if is_training:
|
||||||
|
if not self.args.sample_packing or self.args.pretraining:
|
||||||
|
dataset = self._remove_unused_columns(
|
||||||
|
dataset, description="training"
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
not is_training
|
||||||
|
and self.args.sample_packing
|
||||||
|
and self.args.eval_sample_packing is not False
|
||||||
|
):
|
||||||
|
batch_size = (
|
||||||
|
batch_size
|
||||||
|
if self.args.sample_packing
|
||||||
|
else self.args.per_device_eval_batch_size
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
dataset = self._remove_unused_columns(dataset, description=description)
|
||||||
|
else:
|
||||||
|
data_collator = self._get_collator_with_removed_columns(
|
||||||
|
self.data_collator, description=description
|
||||||
|
)
|
||||||
|
|
||||||
|
dataloader_params = {
|
||||||
"batch_size": batch_size,
|
"batch_size": batch_size,
|
||||||
"collate_fn": self.data_collator,
|
"collate_fn": data_collator,
|
||||||
"num_workers": self.args.dataloader_num_workers,
|
"num_workers": self.args.dataloader_num_workers,
|
||||||
"pin_memory": self.args.dataloader_pin_memory,
|
"pin_memory": self.args.dataloader_pin_memory,
|
||||||
|
"persistent_workers": self.args.dataloader_persistent_workers,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add persistent workers only for training
|
|
||||||
if not is_eval and hasattr(self.args, "dataloader_persistent_workers"):
|
|
||||||
params["persistent_workers"] = self.args.dataloader_persistent_workers
|
|
||||||
|
|
||||||
# Add prefetch factor if specified
|
|
||||||
if self.args.dataloader_prefetch_factor:
|
|
||||||
params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
|
||||||
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _prepare_dataloader(
|
|
||||||
self, dataset, sampler, is_eval=False, custom_batch_size=None
|
|
||||||
):
|
|
||||||
"""Prepare a dataloader with the given dataset and sampler."""
|
|
||||||
# Get base parameters
|
|
||||||
dataloader_params = self._create_dataloader_params(is_eval, custom_batch_size)
|
|
||||||
|
|
||||||
# Add sampler configuration
|
|
||||||
if not isinstance(dataset, torch.utils.data.IterableDataset):
|
if not isinstance(dataset, torch.utils.data.IterableDataset):
|
||||||
if isinstance(sampler, BatchSampler):
|
dataloader_params["drop_last"] = get_not_null(
|
||||||
# batch_size and batch_sampler are mutually exclusive
|
self.args.dataloader_drop_last, True
|
||||||
dataloader_params["batch_sampler"] = sampler
|
)
|
||||||
del dataloader_params["batch_size"]
|
if sampler_fn is not None:
|
||||||
else:
|
sampler = sampler_fn(dataset)
|
||||||
dataloader_params["sampler"] = sampler
|
if isinstance(sampler, BatchSampler):
|
||||||
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
# batch_size and batch_sampler are mutually exclusive
|
||||||
|
dataloader_params["batch_sampler"] = sampler
|
||||||
if not is_eval:
|
del dataloader_params["batch_size"]
|
||||||
dataloader_params["worker_init_fn"] = seed_worker
|
del dataloader_params["drop_last"]
|
||||||
|
else:
|
||||||
# Create the dataloader
|
dataloader_params["sampler"] = sampler
|
||||||
dataloader = DataLoader(dataset, **dataloader_params)
|
|
||||||
|
|
||||||
|
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
||||||
|
if is_training:
|
||||||
|
dataloader_params["worker_init_fn"] = partial(
|
||||||
|
seed_worker,
|
||||||
|
num_workers=self.args.dataloader_num_workers,
|
||||||
|
rank=self.args.process_index,
|
||||||
|
)
|
||||||
if self.args.sample_packing and (
|
if self.args.sample_packing and (
|
||||||
(not is_eval and not self.args.pretraining)
|
(is_training and not self.args.pretraining)
|
||||||
or (is_eval and self.args.eval_sample_packing is not False)
|
or (not is_training and self.args.eval_sample_packing is not False)
|
||||||
):
|
):
|
||||||
self.accelerator.even_batches = False
|
self.accelerator.even_batches = False
|
||||||
|
|
||||||
return self.accelerator.prepare_data_loader(dataloader)
|
dataloader = DataLoader(dataset, **dataloader_params)
|
||||||
|
|
||||||
def get_train_dataloader(self) -> DataLoader:
|
# Accelerator.free_memory() will destroy the references, so
|
||||||
"""Get dataloader for training"""
|
# we need to store the non-prepared version for eval dataloaders.
|
||||||
train_dataset = self.train_dataset
|
# fmt: off
|
||||||
data_collator = self.data_collator # type: ignore
|
if dataloader_key is not None and self.args.dataloader_persistent_workers:
|
||||||
|
if hasattr(self, "_eval_dataloaders"):
|
||||||
|
self._eval_dataloaders[dataloader_key] = dataloader # type: ignore # pylint: disable=access-member-before-definition
|
||||||
|
else:
|
||||||
|
self._eval_dataloaders = {dataloader_key: dataloader} # pylint: disable=attribute-defined-outside-init
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
# Handle dataset preprocessing
|
return self.accelerator.prepare(dataloader)
|
||||||
if isinstance(train_dataset, datasets.Dataset):
|
|
||||||
if self.args.sample_packing and not self.args.pretraining:
|
|
||||||
train_dataset = train_dataset.remove_columns(["length"])
|
|
||||||
if not self.args.sample_packing or self.args.pretraining:
|
|
||||||
train_dataset = self._remove_unused_columns(
|
|
||||||
train_dataset, description="training"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.data_collator = self._get_collator_with_removed_columns( # pylint: disable=attribute-defined-outside-init
|
|
||||||
data_collator,
|
|
||||||
description="training",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get sampler and create dataloader
|
|
||||||
sampler = self._get_train_sampler()
|
|
||||||
return self._prepare_dataloader(train_dataset, sampler, is_eval=False)
|
|
||||||
|
|
||||||
def get_eval_dataloader(self, eval_dataset: Dataset | None = None) -> DataLoader:
|
|
||||||
"""Get dataloader for evaluation"""
|
|
||||||
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
|
|
||||||
|
|
||||||
# Handle special case: sample packing is enabled but eval_sample_packing is False
|
|
||||||
if self.args.sample_packing and self.args.eval_sample_packing is False:
|
|
||||||
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
|
||||||
self.eval_data_collator
|
|
||||||
)
|
|
||||||
if "length" in eval_dataset.column_names:
|
|
||||||
eval_dataset = eval_dataset.remove_columns(["length"])
|
|
||||||
dataloader = super().get_eval_dataloader(eval_dataset)
|
|
||||||
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
|
||||||
self.train_data_collator
|
|
||||||
)
|
|
||||||
|
|
||||||
return dataloader
|
|
||||||
|
|
||||||
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
|
||||||
# Get appropriate data collator
|
|
||||||
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
|
||||||
self.eval_data_collator
|
|
||||||
if hasattr(self, "eval_data_collator") and self.eval_data_collator
|
|
||||||
else self.data_collator
|
|
||||||
)
|
|
||||||
if "length" in eval_dataset.column_names:
|
|
||||||
eval_dataset = eval_dataset.remove_columns(["length"])
|
|
||||||
|
|
||||||
# Use eval_batch_size for sample packing, per_device_eval_batch_size otherwise
|
|
||||||
batch_size = (
|
|
||||||
self.args.eval_batch_size
|
|
||||||
if self.args.sample_packing
|
|
||||||
else self.args.per_device_eval_batch_size
|
|
||||||
)
|
|
||||||
sampler = self._get_eval_sampler(eval_dataset)
|
|
||||||
dataloader = self._prepare_dataloader(
|
|
||||||
eval_dataset, sampler, is_eval=True, custom_batch_size=batch_size
|
|
||||||
)
|
|
||||||
|
|
||||||
return dataloader
|
|
||||||
|
|
||||||
return super().get_eval_dataloader(eval_dataset)
|
|
||||||
|
|
||||||
def _get_bench_sampler(
|
def _get_bench_sampler(
|
||||||
self, bench_dataset: Dataset
|
self, bench_dataset: Dataset
|
||||||
|
|||||||
@@ -22,10 +22,19 @@ class DPOStrategy:
|
|||||||
training_args_kwargs = {}
|
training_args_kwargs = {}
|
||||||
if cfg.rl is RLType.IPO:
|
if cfg.rl is RLType.IPO:
|
||||||
training_args_kwargs["loss_type"] = "ipo"
|
training_args_kwargs["loss_type"] = "ipo"
|
||||||
training_args_kwargs["max_length"] = cfg.sequence_len
|
# Label smoothing is not compatible with IPO
|
||||||
|
if cfg.rl is RLType.DPO and cfg.dpo_label_smoothing:
|
||||||
|
training_args_kwargs["label_smoothing"] = cfg.dpo_label_smoothing
|
||||||
training_args_kwargs["max_completion_length"] = None
|
training_args_kwargs["max_completion_length"] = None
|
||||||
|
training_args_kwargs["max_length"] = cfg.sequence_len
|
||||||
training_args_kwargs["max_prompt_length"] = cfg.sequence_len
|
training_args_kwargs["max_prompt_length"] = cfg.sequence_len
|
||||||
training_args_kwargs["generate_during_eval"] = cfg.use_wandb
|
training_args_kwargs["generate_during_eval"] = cfg.use_wandb
|
||||||
if cfg.dpo_use_weighting is not None:
|
if cfg.dpo_use_weighting is not None:
|
||||||
training_args_kwargs["use_weighting"] = cfg.dpo_use_weighting
|
training_args_kwargs["use_weighting"] = cfg.dpo_use_weighting
|
||||||
|
if cfg.dpo_padding_free is not None:
|
||||||
|
training_args_kwargs["padding_free"] = cfg.dpo_padding_free
|
||||||
|
if cfg.dpo_norm_loss is not None:
|
||||||
|
training_args_kwargs["dpo_norm_loss"] = cfg.dpo_norm_loss
|
||||||
|
if cfg.dpo_use_logits_to_keep is not None:
|
||||||
|
training_args_kwargs["use_logits_to_keep"] = cfg.dpo_use_logits_to_keep
|
||||||
return training_args_kwargs
|
return training_args_kwargs
|
||||||
|
|||||||
@@ -14,3 +14,5 @@ class AxolotlDPOConfig(AxolotlTrainingMixins, DPOConfig):
|
|||||||
"""
|
"""
|
||||||
DPO config for DPO training
|
DPO config for DPO training
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
dpo_norm_loss: bool | None = False
|
||||||
|
|||||||
@@ -5,65 +5,31 @@ from functools import wraps
|
|||||||
from typing import Any, Dict, Union
|
from typing import Any, Dict, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from peft.optimizers import create_loraplus_optimizer
|
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers import Trainer
|
|
||||||
from transformers.utils import is_sagemaker_mp_enabled
|
|
||||||
from trl import DPOTrainer
|
from trl import DPOTrainer
|
||||||
|
|
||||||
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
||||||
|
from axolotl.core.trainers.mixins.optimizer import OptimizerInitMixin, OptimizerMixin
|
||||||
from axolotl.core.trainers.utils import (
|
from axolotl.core.trainers.utils import (
|
||||||
sanitize_kwargs_for_ds_tagging,
|
sanitize_kwargs_for_ds_tagging,
|
||||||
sanitize_kwargs_for_tagging,
|
sanitize_kwargs_for_tagging,
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_sagemaker_mp_enabled():
|
|
||||||
import smdistributed.modelparallel.torch as smp
|
|
||||||
|
|
||||||
|
class AxolotlDPOTrainer(
|
||||||
class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, DPOTrainer
|
||||||
|
):
|
||||||
"""Extend the base DPOTrainer for axolotl helpers."""
|
"""Extend the base DPOTrainer for axolotl helpers."""
|
||||||
|
|
||||||
tag_names = ["axolotl", "dpo"]
|
tag_names = ["axolotl", "dpo"]
|
||||||
|
|
||||||
def __init__(self, *args, dataset_tags=None, **kwargs):
|
def __init__(self, *args, dataset_tags=None, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
self.dataset_tags = dataset_tags
|
self.dataset_tags = dataset_tags
|
||||||
self.optimizer = None
|
self.optimizer = None
|
||||||
self.model_accepts_loss_kwargs = False
|
self.model_accepts_loss_kwargs = False
|
||||||
|
|
||||||
def create_optimizer(self):
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
if self.args.loraplus_lr_ratio is None:
|
|
||||||
return super().create_optimizer()
|
|
||||||
|
|
||||||
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
|
|
||||||
if self.optimizer is None: # pylint: disable=access-member-before-definition
|
|
||||||
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(
|
|
||||||
self.args,
|
|
||||||
opt_model,
|
|
||||||
)
|
|
||||||
|
|
||||||
loraplus_lr_ratio = getattr(self.args, "loraplus_lr_ratio", None)
|
|
||||||
if loraplus_lr_ratio:
|
|
||||||
print("Using lora+")
|
|
||||||
loraplus_lr_embedding = getattr(self.args, "loraplus_lr_embedding", None)
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
self.optimizer = create_loraplus_optimizer( # pylint: disable=attribute-defined-outside-init
|
|
||||||
opt_model,
|
|
||||||
optimizer_cls,
|
|
||||||
loraplus_lr_ratio=loraplus_lr_ratio,
|
|
||||||
loraplus_lr_embedding=loraplus_lr_embedding,
|
|
||||||
**optimizer_kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_sagemaker_mp_enabled():
|
|
||||||
self.optimizer = smp.DistributedOptimizer( # pylint: disable=attribute-defined-outside-init
|
|
||||||
self.optimizer
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.optimizer
|
|
||||||
|
|
||||||
@wraps(DPOTrainer.push_to_hub)
|
@wraps(DPOTrainer.push_to_hub)
|
||||||
def push_to_hub(self, *args, **kwargs) -> str:
|
def push_to_hub(self, *args, **kwargs) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -117,3 +83,20 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
return loss
|
return loss
|
||||||
|
|
||||||
|
def concatenated_forward(
|
||||||
|
self,
|
||||||
|
model: nn.Module,
|
||||||
|
batch: dict[str, Union[list, torch.LongTensor]],
|
||||||
|
is_ref_model: bool = False,
|
||||||
|
) -> dict[str, torch.Tensor]:
|
||||||
|
if self.args.dpo_norm_loss:
|
||||||
|
# fmt: off
|
||||||
|
loss_type: str = self.loss_type # type: ignore[has-type] # pylint: disable=access-member-before-definition
|
||||||
|
# fmt: on
|
||||||
|
# concatenated_forward handles avg token logprob for ipo case already
|
||||||
|
self.loss_type = "ipo" # pylint: disable=attribute-defined-outside-init
|
||||||
|
res = super().concatenated_forward(model, batch, is_ref_model=is_ref_model)
|
||||||
|
self.loss_type = loss_type # pylint: disable=attribute-defined-outside-init
|
||||||
|
return res
|
||||||
|
return super().concatenated_forward(model, batch, is_ref_model=is_ref_model)
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from trl.trainer.grpo_trainer import RewardFunc
|
from trl.trainer.grpo_trainer import RewardFunc
|
||||||
@@ -13,9 +12,10 @@ from axolotl.core.trainers.grpo.trainer import (
|
|||||||
AxolotlGRPOTrainer,
|
AxolotlGRPOTrainer,
|
||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.schemas.trl import TRLConfig
|
from axolotl.utils.schemas.trl import TRLConfig
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class GRPOStrategy:
|
class GRPOStrategy:
|
||||||
@@ -69,6 +69,9 @@ class GRPOStrategy:
|
|||||||
grpo_args_kwargs["log_completions"] = trl.log_completions
|
grpo_args_kwargs["log_completions"] = trl.log_completions
|
||||||
grpo_args_kwargs["num_completions_to_print"] = trl.num_completions_to_print
|
grpo_args_kwargs["num_completions_to_print"] = trl.num_completions_to_print
|
||||||
|
|
||||||
|
if cfg.sequence_parallel_degree > 1:
|
||||||
|
grpo_args_kwargs["sequence_parallel_degree"] = cfg.sequence_parallel_degree
|
||||||
|
|
||||||
if trl.reward_weights:
|
if trl.reward_weights:
|
||||||
grpo_args_kwargs["reward_weights"] = trl.reward_weights
|
grpo_args_kwargs["reward_weights"] = trl.reward_weights
|
||||||
|
|
||||||
@@ -106,7 +109,9 @@ class GRPOStrategy:
|
|||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_trainer_args(cls, cfg: DictDefault) -> list[Any]:
|
def set_trainer_args(
|
||||||
|
cls, cfg: DictDefault
|
||||||
|
) -> list[Any]: # pylint: disable=unused-argument
|
||||||
trainer_args = []
|
trainer_args = []
|
||||||
if cfg.trl and cfg.trl.reward_funcs:
|
if cfg.trl and cfg.trl.reward_funcs:
|
||||||
reward_funcs = []
|
reward_funcs = []
|
||||||
@@ -123,6 +128,7 @@ class GRPOStrategy:
|
|||||||
trainer_kwargs["reward_processing_classes"] = (
|
trainer_kwargs["reward_processing_classes"] = (
|
||||||
cfg.trl.reward_processing_classes
|
cfg.trl.reward_processing_classes
|
||||||
)
|
)
|
||||||
|
|
||||||
return trainer_kwargs
|
return trainer_kwargs
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -132,7 +138,7 @@ class GRPOStrategy:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_blocklist_args_kwargs(cls) -> list[str]:
|
def get_blocklist_args_kwargs(cls) -> list[str]:
|
||||||
return ["dataset_num_proc"]
|
return ["dataset_num_proc", "max_length"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_reward_func(cls, reward_func_fqn: str) -> RewardFunc:
|
def get_reward_func(cls, reward_func_fqn: str) -> RewardFunc:
|
||||||
@@ -167,4 +173,4 @@ class GRPOStrategy:
|
|||||||
LOG.info(
|
LOG.info(
|
||||||
f"Reward function {reward_func_fqn} is a pre-trained model path - if this is unexpected, please check the reward function path."
|
f"Reward function {reward_func_fqn} is a pre-trained model path - if this is unexpected, please check the reward function path."
|
||||||
)
|
)
|
||||||
return reward_func
|
return reward_func_fqn
|
||||||
|
|||||||
@@ -12,3 +12,5 @@ from axolotl.core.training_args import AxolotlTrainingMixins
|
|||||||
@dataclass
|
@dataclass
|
||||||
class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
||||||
"""Axolotl GRPO Config for GRPO training"""
|
"""Axolotl GRPO Config for GRPO training"""
|
||||||
|
|
||||||
|
sequence_parallel_degree: int | None = None
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
# pylint: disable=too-many-lines,duplicate-code,protected-access,no-member
|
# pylint: disable=too-many-lines,duplicate-code,protected-access,no-member
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
|
from functools import partial
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import datasets
|
import datasets
|
||||||
@@ -43,6 +44,7 @@ from trl.trainer.utils import pad
|
|||||||
|
|
||||||
from axolotl.core.trainers.grpo.sampler import SequenceParallelRepeatRandomSampler
|
from axolotl.core.trainers.grpo.sampler import SequenceParallelRepeatRandomSampler
|
||||||
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
||||||
|
from axolotl.core.trainers.mixins.optimizer import OptimizerInitMixin, OptimizerMixin
|
||||||
from axolotl.monkeypatch.ring_attn import get_ring_attn_group
|
from axolotl.monkeypatch.ring_attn import get_ring_attn_group
|
||||||
|
|
||||||
if is_peft_available():
|
if is_peft_available():
|
||||||
@@ -50,11 +52,49 @@ if is_peft_available():
|
|||||||
from peft import PeftConfig
|
from peft import PeftConfig
|
||||||
|
|
||||||
|
|
||||||
class AxolotlGRPOTrainer(RngLoaderMixin, SchedulerMixin, GRPOTrainer):
|
class AxolotlGRPOTrainer(
|
||||||
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, GRPOTrainer
|
||||||
|
):
|
||||||
"""Extend the base GRPOTrainer for axolotl helpers"""
|
"""Extend the base GRPOTrainer for axolotl helpers"""
|
||||||
|
|
||||||
_tag_names = ["trl", "grpo", "axolotl"]
|
_tag_names = ["trl", "grpo", "axolotl"]
|
||||||
|
|
||||||
|
def get_train_dataloader(self):
|
||||||
|
if self.train_dataset is None:
|
||||||
|
raise ValueError("Trainer: training requires a train_dataset.")
|
||||||
|
|
||||||
|
train_dataset = self.train_dataset
|
||||||
|
data_collator = self.data_collator
|
||||||
|
if isinstance(train_dataset, datasets.Dataset):
|
||||||
|
train_dataset = self._remove_unused_columns(
|
||||||
|
train_dataset, description="training"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
data_collator = self._get_collator_with_removed_columns(
|
||||||
|
data_collator, description="training"
|
||||||
|
)
|
||||||
|
|
||||||
|
dataloader_params = {
|
||||||
|
"batch_size": self._train_batch_size
|
||||||
|
* self.args.steps_per_generation, # < this is the change
|
||||||
|
"collate_fn": data_collator,
|
||||||
|
"num_workers": self.args.dataloader_num_workers,
|
||||||
|
"pin_memory": self.args.dataloader_pin_memory,
|
||||||
|
"persistent_workers": self.args.dataloader_persistent_workers,
|
||||||
|
}
|
||||||
|
|
||||||
|
if not isinstance(train_dataset, torch.utils.data.IterableDataset):
|
||||||
|
dataloader_params["sampler"] = self._get_train_sampler()
|
||||||
|
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
||||||
|
dataloader_params["worker_init_fn"] = partial(
|
||||||
|
seed_worker,
|
||||||
|
num_workers=self.args.dataloader_num_workers,
|
||||||
|
rank=self.args.process_index,
|
||||||
|
)
|
||||||
|
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
||||||
|
|
||||||
|
return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params))
|
||||||
|
|
||||||
|
|
||||||
class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
||||||
"""Extend the base GRPOTrainer for sequence parallelism handling"""
|
"""Extend the base GRPOTrainer for sequence parallelism handling"""
|
||||||
@@ -77,6 +117,7 @@ class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
|||||||
torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None
|
torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None
|
||||||
] = (None, None),
|
] = (None, None),
|
||||||
peft_config: "PeftConfig | None" = None,
|
peft_config: "PeftConfig | None" = None,
|
||||||
|
optimizer_cls_and_kwargs: tuple[type, dict] | None = None,
|
||||||
):
|
):
|
||||||
# First call the superclass constructor with all arguments
|
# First call the superclass constructor with all arguments
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -90,6 +131,7 @@ class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
|||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
optimizers=optimizers,
|
optimizers=optimizers,
|
||||||
peft_config=peft_config,
|
peft_config=peft_config,
|
||||||
|
optimizer_cls_and_kwargs=optimizer_cls_and_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get number of SP groups (number of processes divided by SP degree)
|
# Get number of SP groups (number of processes divided by SP degree)
|
||||||
@@ -131,6 +173,13 @@ class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
|||||||
f"the valid values for the number of generations are: {possible_values}."
|
f"the valid values for the number of generations are: {possible_values}."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.sp_group = None
|
||||||
|
self.rank = dist.get_rank()
|
||||||
|
self.world_size = dist.get_world_size()
|
||||||
|
self.local_rank = 0
|
||||||
|
self.local_world_size = 1
|
||||||
|
|
||||||
|
def train(self, *args, **kwargs):
|
||||||
# Initialize the SP group
|
# Initialize the SP group
|
||||||
self.sp_group = get_ring_attn_group()
|
self.sp_group = get_ring_attn_group()
|
||||||
self.rank = dist.get_rank()
|
self.rank = dist.get_rank()
|
||||||
@@ -138,6 +187,8 @@ class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
|||||||
self.local_rank = dist.get_rank(group=self.sp_group)
|
self.local_rank = dist.get_rank(group=self.sp_group)
|
||||||
self.local_world_size = dist.get_world_size(group=self.sp_group)
|
self.local_world_size = dist.get_world_size(group=self.sp_group)
|
||||||
|
|
||||||
|
return super().train(*args, **kwargs)
|
||||||
|
|
||||||
def _get_train_sampler(self) -> Sampler:
|
def _get_train_sampler(self) -> Sampler:
|
||||||
effective_batch_size = (
|
effective_batch_size = (
|
||||||
self.args.per_device_train_batch_size
|
self.args.per_device_train_batch_size
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
# pylint: disable=unused-import
|
# pylint: disable=unused-import
|
||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
|
|
||||||
|
from .checkpoints import CheckpointSaveMixin
|
||||||
from .optimizer import OptimizerMixin
|
from .optimizer import OptimizerMixin
|
||||||
from .rng_state_loader import RngLoaderMixin
|
from .rng_state_loader import RngLoaderMixin
|
||||||
from .scheduler import SchedulerMixin
|
from .scheduler import SchedulerMixin
|
||||||
|
|||||||
21
src/axolotl/core/trainers/mixins/checkpoints.py
Normal file
21
src/axolotl/core/trainers/mixins/checkpoints.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
"""Custom handling to not fail training if fsdp optimizer is not savable"""
|
||||||
|
|
||||||
|
from transformers import Trainer
|
||||||
|
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CheckpointSaveMixin(Trainer):
|
||||||
|
"""Mixin to handle saving the optimizer and scheduler if they are not savable."""
|
||||||
|
|
||||||
|
def _save_optimizer_and_scheduler(self, output_dir):
|
||||||
|
try:
|
||||||
|
super()._save_optimizer_and_scheduler(output_dir)
|
||||||
|
except NotImplementedError as exc:
|
||||||
|
LOG.warning(
|
||||||
|
f"Trainer does not support saving optimizer and scheduler: {exc}\n"
|
||||||
|
"Optimizer and scheduler states were not saved - resuming from checkpoints "
|
||||||
|
"for this training run will not be possible."
|
||||||
|
)
|
||||||
@@ -1,18 +1,17 @@
|
|||||||
"""Module for Axolotl trainer optimizer mixin"""
|
"""Module for Axolotl trainer optimizer mixin"""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from peft.optimizers import create_loraplus_optimizer
|
from peft.optimizers import create_loraplus_optimizer
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
from transformers.utils import is_sagemaker_mp_enabled
|
from transformers.utils import is_sagemaker_mp_enabled
|
||||||
|
|
||||||
from axolotl.integrations.base import BaseOptimizerFactory
|
from axolotl.integrations.base import BaseOptimizerFactory
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
if is_sagemaker_mp_enabled():
|
if is_sagemaker_mp_enabled():
|
||||||
import smdistributed.modelparallel.torch as smp
|
import smdistributed.modelparallel.torch as smp
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class OptimizerMixin(Trainer):
|
class OptimizerMixin(Trainer):
|
||||||
@@ -199,3 +198,20 @@ class OptimizerMixin(Trainer):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return self.optimizer
|
return self.optimizer
|
||||||
|
|
||||||
|
|
||||||
|
class OptimizerInitMixin:
|
||||||
|
"""
|
||||||
|
Mixin to handle common optimizer initialization logic for Trainers (mostly TRL) that do not
|
||||||
|
accept optimizer_cls_and_kwargs as kwarg in constructor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
optimizer_cls_and_kwargs = kwargs.pop("optimizer_cls_and_kwargs", None)
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
if (
|
||||||
|
optimizer_cls_and_kwargs
|
||||||
|
and self.optimizer_cls_and_kwargs is None
|
||||||
|
and self.optimizer is None
|
||||||
|
):
|
||||||
|
self.optimizer_cls_and_kwargs = optimizer_cls_and_kwargs
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ See https://github.com/huggingface/transformers/pull/37162
|
|||||||
TODO: Remove when upstream added PR to release
|
TODO: Remove when upstream added PR to release
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
|
||||||
@@ -17,7 +16,9 @@ from transformers.trainer import safe_globals
|
|||||||
from transformers.trainer_pt_utils import set_rng_state_for_device
|
from transformers.trainer_pt_utils import set_rng_state_for_device
|
||||||
from transformers.training_args import ParallelMode
|
from transformers.training_args import ParallelMode
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class RngLoaderMixin(Trainer):
|
class RngLoaderMixin(Trainer):
|
||||||
|
|||||||
@@ -1,12 +1,11 @@
|
|||||||
"""Module for Axolotl trainer scheduler mixin"""
|
"""Module for Axolotl trainer scheduler mixin"""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from torch.optim.lr_scheduler import LRScheduler, OneCycleLR
|
from torch.optim.lr_scheduler import LRScheduler, OneCycleLR
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.schedulers import (
|
from axolotl.utils.schedulers import (
|
||||||
RexLR,
|
RexLR,
|
||||||
get_cosine_schedule_with_min_lr,
|
get_cosine_schedule_with_min_lr,
|
||||||
@@ -14,7 +13,7 @@ from axolotl.utils.schedulers import (
|
|||||||
get_cosine_schedule_with_warmup_decay_constant,
|
get_cosine_schedule_with_warmup_decay_constant,
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SchedulerMixin(Trainer):
|
class SchedulerMixin(Trainer):
|
||||||
@@ -80,13 +79,15 @@ class SchedulerMixin(Trainer):
|
|||||||
self.lr_scheduler = RexLR(
|
self.lr_scheduler = RexLR(
|
||||||
optimizer=optimizer,
|
optimizer=optimizer,
|
||||||
max_lr=self.args.learning_rate,
|
max_lr=self.args.learning_rate,
|
||||||
min_lr=0 if not use_cosine_min_lr else (self.args.learning_rate * self.args.cosine_min_lr_ratio),
|
min_lr=0 if not use_cosine_min_lr else (
|
||||||
|
self.args.learning_rate * self.args.cosine_min_lr_ratio),
|
||||||
total_steps=num_training_steps,
|
total_steps=num_training_steps,
|
||||||
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
|
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
|
||||||
)
|
)
|
||||||
elif use_cosine_quadratic:
|
elif use_cosine_quadratic:
|
||||||
if use_cosine_min_lr:
|
if use_cosine_min_lr:
|
||||||
LOG.warning("Both cosine quadratic warmup and min lr detected. Using quadratic warmup.")
|
LOG.warning(
|
||||||
|
"Both cosine quadratic warmup and min lr detected. Using quadratic warmup.")
|
||||||
|
|
||||||
self.lr_scheduler = get_cosine_schedule_with_quadratic_warmup( # pylint: disable=attribute-defined-outside-init
|
self.lr_scheduler = get_cosine_schedule_with_quadratic_warmup( # pylint: disable=attribute-defined-outside-init
|
||||||
optimizer,
|
optimizer,
|
||||||
@@ -115,9 +116,11 @@ class SchedulerMixin(Trainer):
|
|||||||
return super().create_scheduler(num_training_steps, optimizer=optimizer)
|
return super().create_scheduler(num_training_steps, optimizer=optimizer)
|
||||||
else:
|
else:
|
||||||
if use_cosine_quadratic:
|
if use_cosine_quadratic:
|
||||||
LOG.warning("axolotl's cosine scheduler with quadratic warmup not used (e.g., because of deepspeed).")
|
LOG.warning(
|
||||||
|
"axolotl's cosine scheduler with quadratic warmup not used (e.g., because of deepspeed).")
|
||||||
|
|
||||||
if use_cosine_min_lr:
|
if use_cosine_min_lr:
|
||||||
LOG.warning("axolotl's cosine scheduler with min lr not used (e.g., because of deepspeed).")
|
LOG.warning(
|
||||||
|
"axolotl's cosine scheduler with min lr not used (e.g., because of deepspeed).")
|
||||||
|
|
||||||
return self.lr_scheduler # type: ignore
|
return self.lr_scheduler # type: ignore
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
"""Module for TRL PPO trainer"""
|
"""Module for TRL PPO trainer"""
|
||||||
|
|
||||||
from typing import Literal, Union
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from trl import (
|
from trl import (
|
||||||
@@ -14,6 +12,7 @@ from trl import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from axolotl.core.trainers.mixins import RngLoaderMixin
|
from axolotl.core.trainers.mixins import RngLoaderMixin
|
||||||
|
from axolotl.core.trainers.mixins.optimizer import OptimizerInitMixin, OptimizerMixin
|
||||||
from axolotl.core.trainers.mixins.scheduler import SchedulerMixin
|
from axolotl.core.trainers.mixins.scheduler import SchedulerMixin
|
||||||
|
|
||||||
|
|
||||||
@@ -75,87 +74,19 @@ class TRLPPOTrainer(PPOTrainer):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AxolotlORPOTrainer(RngLoaderMixin, SchedulerMixin, ORPOTrainer):
|
class AxolotlORPOTrainer(
|
||||||
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, ORPOTrainer
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Extend the base ORPOTrainer for axolotl helpers
|
Extend the base ORPOTrainer for axolotl helpers
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tag_names = ["axolotl", "orpo"]
|
tag_names = ["axolotl", "orpo"]
|
||||||
|
|
||||||
def get_batch_loss_metrics(
|
|
||||||
self,
|
|
||||||
model,
|
|
||||||
batch: dict[str, Union[list, torch.LongTensor]],
|
|
||||||
train_eval: Literal["train", "eval"] = "train",
|
|
||||||
):
|
|
||||||
"""Compute the ORPO loss and other metrics for the given batch of inputs for train or test."""
|
|
||||||
|
|
||||||
# TODO remove once https://github.com/huggingface/trl/pull/3069 is included in a trl release
|
class AxolotlKTOTrainer(
|
||||||
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, KTOTrainer
|
||||||
metrics = {}
|
):
|
||||||
|
|
||||||
forward_output = self.concatenated_forward(model, batch)
|
|
||||||
(
|
|
||||||
policy_chosen_logps,
|
|
||||||
policy_rejected_logps,
|
|
||||||
policy_chosen_logits,
|
|
||||||
policy_rejected_logits,
|
|
||||||
policy_nll_loss,
|
|
||||||
) = forward_output[:5]
|
|
||||||
if self.aux_loss_enabled:
|
|
||||||
aux_loss = forward_output[5]
|
|
||||||
|
|
||||||
losses, chosen_rewards, rejected_rewards, log_odds_ratio, log_odds_chosen = (
|
|
||||||
self.odds_ratio_loss(policy_chosen_logps, policy_rejected_logps)
|
|
||||||
)
|
|
||||||
# full ORPO loss
|
|
||||||
loss = policy_nll_loss - losses.mean()
|
|
||||||
|
|
||||||
reward_accuracies = (chosen_rewards > rejected_rewards).float()
|
|
||||||
|
|
||||||
prefix = "eval_" if train_eval == "eval" else ""
|
|
||||||
metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(
|
|
||||||
chosen_rewards
|
|
||||||
).mean()
|
|
||||||
metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(
|
|
||||||
rejected_rewards
|
|
||||||
).mean()
|
|
||||||
metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(
|
|
||||||
reward_accuracies
|
|
||||||
).mean()
|
|
||||||
metrics[f"{prefix}rewards/margins"] = self.accelerator.gather_for_metrics(
|
|
||||||
chosen_rewards - rejected_rewards
|
|
||||||
).mean()
|
|
||||||
metrics[f"{prefix}logps/rejected"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}logps/chosen"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}logits/rejected"] = self.accelerator.gather_for_metrics(
|
|
||||||
policy_rejected_logits.detach().mean()
|
|
||||||
).mean()
|
|
||||||
metrics[f"{prefix}logits/chosen"] = self.accelerator.gather_for_metrics(
|
|
||||||
policy_chosen_logits.detach().mean()
|
|
||||||
).mean()
|
|
||||||
metrics[f"{prefix}nll_loss"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}log_odds_ratio"] = (
|
|
||||||
self.accelerator.gather_for_metrics(log_odds_ratio).detach().mean()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}log_odds_chosen"] = (
|
|
||||||
self.accelerator.gather_for_metrics(log_odds_chosen).detach().mean()
|
|
||||||
)
|
|
||||||
for k, v in metrics.items():
|
|
||||||
metrics[k] = v.item()
|
|
||||||
if self.aux_loss_enabled:
|
|
||||||
loss += self.aux_loss_coef * aux_loss
|
|
||||||
|
|
||||||
return loss, metrics
|
|
||||||
|
|
||||||
|
|
||||||
class AxolotlKTOTrainer(RngLoaderMixin, SchedulerMixin, KTOTrainer):
|
|
||||||
"""
|
"""
|
||||||
Extend the base KTOTrainer for axolotl helpers
|
Extend the base KTOTrainer for axolotl helpers
|
||||||
"""
|
"""
|
||||||
@@ -163,89 +94,19 @@ class AxolotlKTOTrainer(RngLoaderMixin, SchedulerMixin, KTOTrainer):
|
|||||||
tag_names = ["axolotl", "kto"]
|
tag_names = ["axolotl", "kto"]
|
||||||
|
|
||||||
|
|
||||||
class AxolotlCPOTrainer(RngLoaderMixin, SchedulerMixin, CPOTrainer):
|
class AxolotlCPOTrainer(
|
||||||
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, CPOTrainer
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Extend the base CPOTrainer for axolotl helpers
|
Extend the base CPOTrainer for axolotl helpers
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tag_names = ["axolotl", "cpo"]
|
tag_names = ["axolotl", "cpo"]
|
||||||
|
|
||||||
def get_batch_loss_metrics(
|
|
||||||
self,
|
|
||||||
model,
|
|
||||||
batch: dict[str, Union[list, torch.LongTensor]],
|
|
||||||
train_eval: Literal["train", "eval"] = "train",
|
|
||||||
):
|
|
||||||
"""Compute the CPO loss and other metrics for the given batch of inputs for train or test."""
|
|
||||||
metrics = {}
|
|
||||||
|
|
||||||
forward_output = self.concatenated_forward(model, batch)
|
class AxolotlRewardTrainer(
|
||||||
(
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, RewardTrainer
|
||||||
policy_chosen_logps,
|
):
|
||||||
policy_rejected_logps,
|
|
||||||
policy_chosen_logits,
|
|
||||||
policy_rejected_logits,
|
|
||||||
policy_nll_loss,
|
|
||||||
) = forward_output[:5]
|
|
||||||
if self.aux_loss_enabled:
|
|
||||||
aux_loss = forward_output[5]
|
|
||||||
|
|
||||||
losses, chosen_rewards, rejected_rewards = self.cpo_loss(
|
|
||||||
policy_chosen_logps,
|
|
||||||
policy_rejected_logps,
|
|
||||||
)
|
|
||||||
|
|
||||||
loss = losses.mean() + self.cpo_alpha * policy_nll_loss
|
|
||||||
reward_accuracies = (chosen_rewards > rejected_rewards).float()
|
|
||||||
|
|
||||||
prefix = "eval_" if train_eval == "eval" else ""
|
|
||||||
metrics[f"{prefix}rewards/chosen"] = (
|
|
||||||
self.accelerator.gather_for_metrics(chosen_rewards).mean().item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}rewards/rejected"] = (
|
|
||||||
self.accelerator.gather_for_metrics(rejected_rewards).mean().item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}rewards/accuracies"] = (
|
|
||||||
self.accelerator.gather_for_metrics(reward_accuracies).mean().item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}rewards/margins"] = (
|
|
||||||
self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards)
|
|
||||||
.mean()
|
|
||||||
.item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}logps/rejected"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_rejected_logps)
|
|
||||||
.detach()
|
|
||||||
.mean()
|
|
||||||
.item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}logps/chosen"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_chosen_logps)
|
|
||||||
.detach()
|
|
||||||
.mean()
|
|
||||||
.item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}logits/rejected"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_rejected_logits.detach().mean())
|
|
||||||
.mean()
|
|
||||||
.item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}logits/chosen"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_chosen_logits.detach().mean())
|
|
||||||
.mean()
|
|
||||||
.item()
|
|
||||||
)
|
|
||||||
metrics[f"{prefix}nll_loss"] = (
|
|
||||||
self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean().item()
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.aux_loss_enabled:
|
|
||||||
loss += self.aux_loss_coef * aux_loss
|
|
||||||
|
|
||||||
return loss, metrics
|
|
||||||
|
|
||||||
|
|
||||||
class AxolotlRewardTrainer(RngLoaderMixin, SchedulerMixin, RewardTrainer):
|
|
||||||
"""
|
"""
|
||||||
Extend the base RewardTrainer for axolotl helpers
|
Extend the base RewardTrainer for axolotl helpers
|
||||||
"""
|
"""
|
||||||
@@ -253,7 +114,9 @@ class AxolotlRewardTrainer(RngLoaderMixin, SchedulerMixin, RewardTrainer):
|
|||||||
tag_names = ["axolotl", "reward"]
|
tag_names = ["axolotl", "reward"]
|
||||||
|
|
||||||
|
|
||||||
class AxolotlPRMTrainer(RngLoaderMixin, SchedulerMixin, PRMTrainer):
|
class AxolotlPRMTrainer(
|
||||||
|
RngLoaderMixin, SchedulerMixin, OptimizerMixin, OptimizerInitMixin, PRMTrainer
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Extend the base trl.PRMTrainer for axolotl helpers
|
Extend the base trl.PRMTrainer for axolotl helpers
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -2,244 +2,17 @@
|
|||||||
extra axolotl specific training args
|
extra axolotl specific training args
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from dataclasses import dataclass, field
|
from __future__ import annotations
|
||||||
from typing import Optional
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Optional, Type
|
||||||
|
|
||||||
from PIL.Image import Resampling
|
|
||||||
from transformers import TrainingArguments
|
from transformers import TrainingArguments
|
||||||
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
||||||
|
|
||||||
|
from axolotl.integrations.config import merge_training_args
|
||||||
|
|
||||||
@dataclass
|
AxolotlTrainingMixins: Type = merge_training_args()
|
||||||
class AxolotlTrainingMixins:
|
|
||||||
"""
|
|
||||||
Mixin class for the Axolotl training args.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
model_type: Optional[str] = field(
|
|
||||||
default=None, metadata={"help": "HF model configuration model_type."}
|
|
||||||
)
|
|
||||||
lr_quadratic_warmup: bool = field(
|
|
||||||
default=False,
|
|
||||||
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
|
||||||
)
|
|
||||||
pretraining: bool = field(
|
|
||||||
default=False,
|
|
||||||
metadata={
|
|
||||||
"help": "Indicates to trainer whether we are doing continued pretraining."
|
|
||||||
},
|
|
||||||
)
|
|
||||||
sample_packing: bool = field(
|
|
||||||
default=False,
|
|
||||||
metadata={"help": "Use sample packing for efficient training."},
|
|
||||||
)
|
|
||||||
sample_packing_sequentially: bool = field(
|
|
||||||
default=False,
|
|
||||||
metadata={
|
|
||||||
"help": "Use next-fit sample packing that preserves the order of samples coming from the sampler. Use in combination with curriculum_sampling for fully sequential packing."
|
|
||||||
},
|
|
||||||
)
|
|
||||||
multipack_real_batches: bool = field(
|
|
||||||
default=False,
|
|
||||||
metadata={"help": "Use real batches for efficient training."},
|
|
||||||
)
|
|
||||||
eval_sample_packing: Optional[bool] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Use sample packing for efficient evals."},
|
|
||||||
)
|
|
||||||
sample_packing_efficiency: float = field(
|
|
||||||
default=1.0,
|
|
||||||
metadata={"help": "Sample packing efficiency for calculating batch length."},
|
|
||||||
)
|
|
||||||
sample_packing_bin_size: int = field(
|
|
||||||
default=200,
|
|
||||||
metadata={
|
|
||||||
"help": "The max number of samples that packed sample can contain after packing. Increase for better packing."
|
|
||||||
},
|
|
||||||
)
|
|
||||||
sample_packing_group_size: int = field(
|
|
||||||
default=100000,
|
|
||||||
metadata={
|
|
||||||
"help": "The number of samples to group together for packing. Increase for better packing."
|
|
||||||
},
|
|
||||||
)
|
|
||||||
max_seq_length: int = field(
|
|
||||||
default=2048,
|
|
||||||
metadata={"help": "The maximum sequence length the model can handle"},
|
|
||||||
)
|
|
||||||
relora_steps: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "how often to reset for ReLoRA"},
|
|
||||||
)
|
|
||||||
relora_warmup_steps: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "how many warmup steps to take after reset for ReLoRA"},
|
|
||||||
)
|
|
||||||
relora_anneal_steps: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "how many warmup steps to take after reset for ReLoRA"},
|
|
||||||
)
|
|
||||||
relora_prune_ratio: Optional[float] = field(
|
|
||||||
default=0.9,
|
|
||||||
metadata={"help": "prune ratio for magnitude pruning of the optimizer"},
|
|
||||||
)
|
|
||||||
bench_split: Optional[str] = field(
|
|
||||||
default="eval", metadata={"help": "The benchmark split to run on"}
|
|
||||||
)
|
|
||||||
bench_dataset: Optional[str] = field(
|
|
||||||
default="pharaouk/dharma-1/dharma_1_mini.json",
|
|
||||||
metadata={
|
|
||||||
"help": "Benchmark dataset to use: options are `mmlu-zs`, `mmlu-fs`, or the full path to the dataset file"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
do_bench_eval: Optional[bool] = field(
|
|
||||||
default=False, metadata={"help": "Whether to run the Benchmark evaluation."}
|
|
||||||
)
|
|
||||||
do_causal_lm_eval: Optional[bool] = field(
|
|
||||||
default=False, metadata={"help": "Whether to run the Causal LM evaluation."}
|
|
||||||
)
|
|
||||||
max_bench_samples: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "If set, only evaluates on `max_bench_samples` of the benchmark dataset."
|
|
||||||
},
|
|
||||||
)
|
|
||||||
bench_source_max_len: int = field(
|
|
||||||
default=2048, metadata={"help": "Maximum source sequence length for bench."}
|
|
||||||
)
|
|
||||||
dataloader_prefetch_factor: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "prefetch_factor argument to the dataloader"},
|
|
||||||
)
|
|
||||||
cosine_min_lr_ratio: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Minimum learning rate is min_lr_ratio * learning_rate"},
|
|
||||||
)
|
|
||||||
cosine_constant_lr_ratio: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "Starting constant learning rate step is cosine_constant_lr_ratio * max_steps"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
loraplus_lr_ratio: Optional[float] = field(
|
|
||||||
default=None, metadata={"help": "loraplus learning rate ratio lr_B / lr_A."}
|
|
||||||
)
|
|
||||||
loraplus_lr_embedding: Optional[float] = field(
|
|
||||||
default=1e-6,
|
|
||||||
metadata={"help": "loraplus learning rate for lora embedding layers."},
|
|
||||||
)
|
|
||||||
embedding_lr_scale: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Scale the learning rate for the embedding layers."},
|
|
||||||
)
|
|
||||||
lr_groups: Optional[list[dict]] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Specify learning rate groups for with different LRs."},
|
|
||||||
)
|
|
||||||
embedding_lr: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "absolute learning rate for the embedding layers."},
|
|
||||||
)
|
|
||||||
qlora: bool = field(
|
|
||||||
default=False,
|
|
||||||
metadata={"help": "whether this is a qlora training"},
|
|
||||||
)
|
|
||||||
orpo_alpha: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
lisa_n_layers: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "the number of activate layers in LISA"},
|
|
||||||
)
|
|
||||||
lisa_step_interval: Optional[int] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "how often to switch layers in LISA"},
|
|
||||||
)
|
|
||||||
lisa_layers_attribute: Optional[str] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "path under the model to access the layers"},
|
|
||||||
)
|
|
||||||
curriculum_sampling: Optional[bool] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "whether to use sequential sampling for curriculum learning"},
|
|
||||||
)
|
|
||||||
alternate_optimizer: Optional[str] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "workaround to pass an alternate optimizer to the HF trainer"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
alternate_lr_scheduler_type: Optional[str] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "workaround to pass an alternate lr scheduler to the HF trainer"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
chat_template: Optional[str] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "Chat template converting chat messages to text"},
|
|
||||||
)
|
|
||||||
|
|
||||||
kd_ce_alpha: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "The alpha scaling parameter for SFT cross entropy loss when using KD"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
kd_alpha: Optional[float] = field(
|
|
||||||
default=1.0,
|
|
||||||
metadata={"help": "The alpha scaling parameter for KD loss"},
|
|
||||||
)
|
|
||||||
|
|
||||||
kd_temperature: Optional[float] = field(
|
|
||||||
default=1.0,
|
|
||||||
metadata={
|
|
||||||
"help": "the temperature parameter for KL divergence loss when using KD"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
kd_zscore_base_temp: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "the base temperature parameter for KL divergence with z-score when using KD"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
kd_top_k_before_softmax: Optional[bool] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "Whether to apply top_k_before_softmax to the logits when using KD"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
adam_beta3: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "The beta3 hyperparameter used in some optimizers such as CAME"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
adam_epsilon2: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "The epsilon2 hyperparameter used in some optimizers such as CAME"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
# multi-modal section
|
|
||||||
|
|
||||||
image_size: int | tuple[int, int] | None = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "The size of the image to resize to"},
|
|
||||||
)
|
|
||||||
|
|
||||||
image_resize_algorithm: Resampling | None = field(
|
|
||||||
default=None,
|
|
||||||
metadata={"help": "The algorithm to use for image resizing"},
|
|
||||||
)
|
|
||||||
|
|
||||||
# end of multi-modal section
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|||||||
224
src/axolotl/core/training_args_base.py
Normal file
224
src/axolotl/core/training_args_base.py
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
"""
|
||||||
|
Base Axolotl Training Mixins shared across various trainer configs
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from PIL.Image import Resampling
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AxolotlTrainingMixins:
|
||||||
|
"""
|
||||||
|
Mixin class for the Axolotl training args.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
model_type: Optional[str] = field(
|
||||||
|
default=None, metadata={"help": "HF model configuration model_type."}
|
||||||
|
)
|
||||||
|
lr_quadratic_warmup: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
||||||
|
)
|
||||||
|
pretraining: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
"help": "Indicates to trainer whether we are doing continued pretraining."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sample_packing: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={"help": "Use sample packing for efficient training."},
|
||||||
|
)
|
||||||
|
sample_packing_sequentially: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={
|
||||||
|
"help": "Use next-fit sample packing that preserves the order of samples coming from the sampler. Use in combination with curriculum_sampling for fully sequential packing."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
multipack_real_batches: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={"help": "Use real batches for efficient training."},
|
||||||
|
)
|
||||||
|
eval_sample_packing: Optional[bool] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Use sample packing for efficient evals."},
|
||||||
|
)
|
||||||
|
sample_packing_efficiency: float = field(
|
||||||
|
default=1.0,
|
||||||
|
metadata={"help": "Sample packing efficiency for calculating batch length."},
|
||||||
|
)
|
||||||
|
sample_packing_bin_size: int = field(
|
||||||
|
default=200,
|
||||||
|
metadata={
|
||||||
|
"help": "The max number of samples that packed sample can contain after packing. Increase for better packing."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
sample_packing_group_size: int = field(
|
||||||
|
default=100000,
|
||||||
|
metadata={
|
||||||
|
"help": "The number of samples to group together for packing. Increase for better packing."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
max_seq_length: int = field(
|
||||||
|
default=2048,
|
||||||
|
metadata={"help": "The maximum sequence length the model can handle"},
|
||||||
|
)
|
||||||
|
dataset_num_proc: int | None = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "The number of processes to use for data processing"},
|
||||||
|
)
|
||||||
|
relora_steps: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "how often to reset for ReLoRA"},
|
||||||
|
)
|
||||||
|
relora_warmup_steps: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "how many warmup steps to take after reset for ReLoRA"},
|
||||||
|
)
|
||||||
|
relora_anneal_steps: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "how many warmup steps to take after reset for ReLoRA"},
|
||||||
|
)
|
||||||
|
relora_prune_ratio: Optional[float] = field(
|
||||||
|
default=0.9,
|
||||||
|
metadata={"help": "prune ratio for magnitude pruning of the optimizer"},
|
||||||
|
)
|
||||||
|
bench_split: Optional[str] = field(
|
||||||
|
default="eval", metadata={"help": "The benchmark split to run on"}
|
||||||
|
)
|
||||||
|
bench_dataset: Optional[str] = field(
|
||||||
|
default="pharaouk/dharma-1/dharma_1_mini.json",
|
||||||
|
metadata={
|
||||||
|
"help": "Benchmark dataset to use: options are `mmlu-zs`, `mmlu-fs`, or the full path to the dataset file"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
do_bench_eval: Optional[bool] = field(
|
||||||
|
default=False, metadata={"help": "Whether to run the Benchmark evaluation."}
|
||||||
|
)
|
||||||
|
do_causal_lm_eval: Optional[bool] = field(
|
||||||
|
default=False, metadata={"help": "Whether to run the Causal LM evaluation."}
|
||||||
|
)
|
||||||
|
max_bench_samples: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "If set, only evaluates on `max_bench_samples` of the benchmark dataset."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
bench_source_max_len: int = field(
|
||||||
|
default=2048, metadata={"help": "Maximum source sequence length for bench."}
|
||||||
|
)
|
||||||
|
dataloader_prefetch_factor: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "prefetch_factor argument to the dataloader"},
|
||||||
|
)
|
||||||
|
cosine_min_lr_ratio: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Minimum learning rate is min_lr_ratio * learning_rate"},
|
||||||
|
)
|
||||||
|
cosine_constant_lr_ratio: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "Starting constant learning rate step is cosine_constant_lr_ratio * max_steps"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
loraplus_lr_ratio: Optional[float] = field(
|
||||||
|
default=None, metadata={"help": "loraplus learning rate ratio lr_B / lr_A."}
|
||||||
|
)
|
||||||
|
loraplus_lr_embedding: Optional[float] = field(
|
||||||
|
default=1e-6,
|
||||||
|
metadata={"help": "loraplus learning rate for lora embedding layers."},
|
||||||
|
)
|
||||||
|
embedding_lr_scale: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Scale the learning rate for the embedding layers."},
|
||||||
|
)
|
||||||
|
lr_groups: Optional[list[dict]] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Specify learning rate groups for with different LRs."},
|
||||||
|
)
|
||||||
|
embedding_lr: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "absolute learning rate for the embedding layers."},
|
||||||
|
)
|
||||||
|
qlora: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={"help": "whether this is a qlora training"},
|
||||||
|
)
|
||||||
|
orpo_alpha: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
|
lisa_n_layers: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "the number of activate layers in LISA"},
|
||||||
|
)
|
||||||
|
lisa_step_interval: Optional[int] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "how often to switch layers in LISA"},
|
||||||
|
)
|
||||||
|
lisa_layers_attribute: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "path under the model to access the layers"},
|
||||||
|
)
|
||||||
|
curriculum_sampling: Optional[bool] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "whether to use sequential sampling for curriculum learning"},
|
||||||
|
)
|
||||||
|
alternate_lr_scheduler_type: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "workaround to pass an alternate lr scheduler to the HF trainer"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
chat_template: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Chat template converting chat messages to text"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# kd_ce_alpha: Optional[float] = field(
|
||||||
|
# default=None,
|
||||||
|
# metadata={
|
||||||
|
# "help": "The alpha scaling parameter for SFT cross entropy loss when using KD"
|
||||||
|
# },
|
||||||
|
# )
|
||||||
|
#
|
||||||
|
# kd_alpha: Optional[float] = field(
|
||||||
|
# default=1.0,
|
||||||
|
# metadata={"help": "The alpha scaling parameter for KD loss"},
|
||||||
|
# )
|
||||||
|
#
|
||||||
|
# kd_temperature: Optional[float] = field(
|
||||||
|
# default=1.0,
|
||||||
|
# metadata={
|
||||||
|
# "help": "the temperature parameter for KL divergence loss when using KD"
|
||||||
|
# },
|
||||||
|
# )
|
||||||
|
|
||||||
|
adam_beta3: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "The beta3 hyperparameter used in some optimizers such as CAME"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
adam_epsilon2: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "The epsilon2 hyperparameter used in some optimizers such as CAME"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# multi-modal section
|
||||||
|
|
||||||
|
image_size: int | tuple[int, int] | None = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "The size of the image to resize to"},
|
||||||
|
)
|
||||||
|
|
||||||
|
image_resize_algorithm: Resampling | None = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "The algorithm to use for image resizing"},
|
||||||
|
)
|
||||||
|
|
||||||
|
# end of multi-modal section
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
"""Module containing Dataset functionality"""
|
"""Module containing Dataset functionality"""
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
from typing import List, Optional, Union
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from datasets import Dataset, IterableDataset
|
from datasets import Dataset, IterableDataset
|
||||||
|
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
from .prompt_tokenizers import PromptTokenizingStrategy
|
from .prompt_tokenizers import PromptTokenizingStrategy
|
||||||
|
|
||||||
# We want this to be a wrapper for an existing dataset that we have loaded
|
# We want this to be a wrapper for an existing dataset that we have loaded
|
||||||
@@ -15,25 +15,25 @@ from .prompt_tokenizers import PromptTokenizingStrategy
|
|||||||
# let's check to ensure we don't truncate an item in the middle, we'll use
|
# let's check to ensure we don't truncate an item in the middle, we'll use
|
||||||
# the collators later on to pad the datasets
|
# the collators later on to pad the datasets
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl")
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TokenizedPromptDataset(Dataset):
|
class TokenizedPromptDataset(Dataset):
|
||||||
"""
|
"""Dataset that returns tokenized prompts from a stream of text files.
|
||||||
Dataset that returns tokenized prompts from a stream of text files.
|
|
||||||
Args:
|
Args:
|
||||||
prompt_tokenizer (PromptTokenizingStrategy): The prompt tokenizing method for processing the data.
|
prompt_tokenizer: The prompt tokenizing method for processing the data.
|
||||||
dataset (dataset.Dataset): Dataset with text files.
|
dataset: Dataset with text files.
|
||||||
process_count (int): Number of processes to use for tokenizing.
|
process_count: Number of processes to use for tokenizing.
|
||||||
keep_in_memory (bool): Whether to keep the tokenized dataset in memory.
|
keep_in_memory: Whether to keep the tokenized dataset in memory.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__( # pylint: disable=super-init-not-called
|
def __init__( # pylint: disable=super-init-not-called
|
||||||
self,
|
self,
|
||||||
prompt_tokenizer: PromptTokenizingStrategy,
|
prompt_tokenizer: PromptTokenizingStrategy,
|
||||||
dataset: Dataset,
|
dataset: Dataset,
|
||||||
process_count: Optional[int] = None,
|
process_count: int | None = None,
|
||||||
keep_in_memory: Optional[bool] = False,
|
keep_in_memory: bool | None = False,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
self.prompt_tokenizer = prompt_tokenizer
|
self.prompt_tokenizer = prompt_tokenizer
|
||||||
@@ -48,6 +48,13 @@ class TokenizedPromptDataset(Dataset):
|
|||||||
features = dataset.features.keys()
|
features = dataset.features.keys()
|
||||||
num_proc = min(64, self.process_count if self.process_count else os.cpu_count())
|
num_proc = min(64, self.process_count if self.process_count else os.cpu_count())
|
||||||
|
|
||||||
|
# Disable multiprocessing if the tokenizer doesn't support it (e.g., mistral_common)
|
||||||
|
if not getattr(self.prompt_tokenizer, "supports_multiprocessing", True):
|
||||||
|
LOG.info(
|
||||||
|
"Disabling multiprocessing for tokenizer as it doesn't support it (e.g., mistral_common)"
|
||||||
|
)
|
||||||
|
num_proc = 1
|
||||||
|
|
||||||
map_kwargs = {}
|
map_kwargs = {}
|
||||||
if self.prompt_tokenizer.supports_batched:
|
if self.prompt_tokenizer.supports_batched:
|
||||||
map_kwargs["batched"] = True
|
map_kwargs["batched"] = True
|
||||||
@@ -75,14 +82,14 @@ class TokenizedPromptDataset(Dataset):
|
|||||||
|
|
||||||
def wrap_dataset_for_tokenized_prompt(
|
def wrap_dataset_for_tokenized_prompt(
|
||||||
prompt_tokenizer: PromptTokenizingStrategy,
|
prompt_tokenizer: PromptTokenizingStrategy,
|
||||||
dataset: Union[Dataset, IterableDataset],
|
dataset: Dataset | IterableDataset,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
if isinstance(dataset, IterableDataset):
|
if isinstance(dataset, IterableDataset):
|
||||||
map_kwargs = {}
|
map_kwargs = {}
|
||||||
if prompt_tokenizer.supports_batched:
|
if prompt_tokenizer.supports_batched:
|
||||||
map_kwargs["batched"] = True
|
map_kwargs["batched"] = True
|
||||||
features = dataset.features.keys()
|
features = list(dataset.features.keys())
|
||||||
return dataset.map(
|
return dataset.map(
|
||||||
prompt_tokenizer.tokenize_prompt,
|
prompt_tokenizer.tokenize_prompt,
|
||||||
remove_columns=features,
|
remove_columns=features,
|
||||||
@@ -93,12 +100,13 @@ def wrap_dataset_for_tokenized_prompt(
|
|||||||
|
|
||||||
# TODO this isn't the best since it can't interleave datasets
|
# TODO this isn't the best since it can't interleave datasets
|
||||||
class ConstantLengthDataset(IterableDataset):
|
class ConstantLengthDataset(IterableDataset):
|
||||||
"""
|
"""Iterable dataset that returns constant length chunks of tokens from stream of
|
||||||
Iterable dataset that returns constant length chunks of tokens from stream of text files.
|
text files.
|
||||||
Args:
|
|
||||||
tokenizer (Tokenizer): The processor used for processing the data.
|
Args:
|
||||||
dataset (dataset.Dataset): Dataset with text files.
|
tokenizer: The processor used for processing the data.
|
||||||
seq_length (int): Length of token sequences to return.
|
dataset: Dataset with text files.
|
||||||
|
seq_length: Length of token sequences to return.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__( # pylint: disable=super-init-not-called
|
def __init__( # pylint: disable=super-init-not-called
|
||||||
@@ -109,7 +117,7 @@ class ConstantLengthDataset(IterableDataset):
|
|||||||
):
|
):
|
||||||
self.tokenizer = tokenizer
|
self.tokenizer = tokenizer
|
||||||
self.concat_token_id = tokenizer.eos_token_id
|
self.concat_token_id = tokenizer.eos_token_id
|
||||||
self.datasets: List[IterableDataset] = datasets
|
self.datasets: list[IterableDataset] = datasets
|
||||||
self.seq_length = seq_length
|
self.seq_length = seq_length
|
||||||
|
|
||||||
vocab_size = len(tokenizer.get_vocab())
|
vocab_size = len(tokenizer.get_vocab())
|
||||||
@@ -173,7 +181,10 @@ class ConstantLengthDataset(IterableDataset):
|
|||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
f"dropping batch due to tensor size mismatch input_ids: {input_ids.size()}, labels: {labels.size()}, attention_mask: {attention_mask.size()}"
|
"Dropping batch due to tensor size mismatch "
|
||||||
|
f"input_ids: {input_ids.size()}, "
|
||||||
|
f"labels: {labels.size()}, "
|
||||||
|
f"attention_mask: {attention_mask.size()}"
|
||||||
)
|
)
|
||||||
buffer = {
|
buffer = {
|
||||||
"input_ids": [],
|
"input_ids": [],
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ from pathlib import Path
|
|||||||
from typing import Dict, Optional
|
from typing import Dict, Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from accelerate.logging import get_logger
|
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
@@ -17,6 +16,7 @@ from axolotl.train import (
|
|||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import cleanup_distributed
|
from axolotl.utils.distributed import cleanup_distributed
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
from axolotl.utils.trainer import setup_trainer
|
from axolotl.utils.trainer import setup_trainer
|
||||||
|
|
||||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import collections
|
import collections
|
||||||
import importlib
|
import importlib
|
||||||
import logging
|
import traceback
|
||||||
from typing import TYPE_CHECKING, Callable, OrderedDict, Union
|
from typing import TYPE_CHECKING, Callable, OrderedDict, Union
|
||||||
|
|
||||||
from peft import PeftModel
|
from peft import PeftModel
|
||||||
@@ -31,6 +31,9 @@ from torch.optim.lr_scheduler import LRScheduler
|
|||||||
from transformers import PreTrainedModel, Trainer
|
from transformers import PreTrainedModel, Trainer
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.logging import get_logger
|
||||||
|
|
||||||
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from axolotl.common.datasets import TrainDatasetMeta
|
from axolotl.common.datasets import TrainDatasetMeta
|
||||||
@@ -39,31 +42,39 @@ if TYPE_CHECKING:
|
|||||||
class BasePlugin:
|
class BasePlugin:
|
||||||
"""Base class for all plugins. Defines the interface for plugin methods.
|
"""Base class for all plugins. Defines the interface for plugin methods.
|
||||||
|
|
||||||
Methods:
|
A plugin is a reusable, modular, and self-contained piece of code that extends
|
||||||
register(cfg): Registers the plugin with the given configuration.
|
the functionality of Axolotl. Plugins can be used to integrate third-party models,
|
||||||
load_datasets(cfg): Loads and preprocesses the dataset for training.
|
modify the training process, or add new features.
|
||||||
pre_model_load(cfg): Performs actions before the model is loaded.
|
|
||||||
post_model_build(cfg, model): Performs actions after the model is loaded, but
|
To create a new plugin, you need to inherit from the BasePlugin class and
|
||||||
|
implement the required methods.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Plugin methods include:
|
||||||
|
- register(cfg): Registers the plugin with the given configuration.
|
||||||
|
- load_datasets(cfg): Loads and preprocesses the dataset for training.
|
||||||
|
- pre_model_load(cfg): Performs actions before the model is loaded.
|
||||||
|
- post_model_build(cfg, model): Performs actions after the model is loaded, but
|
||||||
before LoRA adapters are applied.
|
before LoRA adapters are applied.
|
||||||
pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.
|
- pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.
|
||||||
post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
- post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
||||||
post_model_load(cfg, model): Performs actions after the model is loaded,
|
- post_model_load(cfg, model): Performs actions after the model is loaded,
|
||||||
inclusive of any adapters.
|
inclusive of any adapters.
|
||||||
post_trainer_create(cfg, trainer): Performs actions after the trainer is
|
- post_trainer_create(cfg, trainer): Performs actions after the trainer is
|
||||||
created.
|
created.
|
||||||
create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
- create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
||||||
create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and
|
- create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and
|
||||||
returns a learning rate scheduler.
|
returns a learning rate scheduler.
|
||||||
add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before
|
- add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before
|
||||||
training.
|
training.
|
||||||
add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after
|
- add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after
|
||||||
training.
|
training.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initializes the BasePlugin."""
|
"""Initializes the BasePlugin."""
|
||||||
|
|
||||||
def register(self, cfg): # pylint: disable=unused-argument
|
def register(self, cfg: DictDefault): # pylint: disable=unused-argument
|
||||||
"""Registers the plugin with the given configuration.
|
"""Registers the plugin with the given configuration.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -73,6 +84,11 @@ class BasePlugin:
|
|||||||
def get_input_args(self) -> str | None:
|
def get_input_args(self) -> str | None:
|
||||||
"""Returns a pydantic model for the plugin's input arguments."""
|
"""Returns a pydantic model for the plugin's input arguments."""
|
||||||
|
|
||||||
|
def get_training_args_mixin(self) -> str | None:
|
||||||
|
"""
|
||||||
|
Returns a dataclass model for the plugin's training arguments.
|
||||||
|
"""
|
||||||
|
|
||||||
def load_datasets(
|
def load_datasets(
|
||||||
self, cfg: DictDefault, preprocess: bool = False
|
self, cfg: DictDefault, preprocess: bool = False
|
||||||
) -> Union["TrainDatasetMeta", None]:
|
) -> Union["TrainDatasetMeta", None]:
|
||||||
@@ -148,6 +164,31 @@ class BasePlugin:
|
|||||||
trainer: The trainer object for training.
|
trainer: The trainer object for training.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def get_training_args(self, cfg: DictDefault): # pylint: disable=unused-argument):
|
||||||
|
"""
|
||||||
|
Returns custom training arguments to set on TrainingArgs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg: The global axolotl configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
object: dict containing the training arguments.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_collator_cls_and_kwargs(
|
||||||
|
self, cfg: DictDefault, is_eval: bool = False
|
||||||
|
): # pylint: disable=unused-argument):
|
||||||
|
"""
|
||||||
|
Returns a custom class for the collator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg: The global axolotl configuration.
|
||||||
|
is_eval: Whether this is an eval split.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
class: The class for the collator.
|
||||||
|
"""
|
||||||
|
|
||||||
# pylint: disable=unused-argument
|
# pylint: disable=unused-argument
|
||||||
def create_optimizer(self, cfg: DictDefault, trainer: Trainer) -> Optimizer | None:
|
def create_optimizer(self, cfg: DictDefault, trainer: Trainer) -> Optimizer | None:
|
||||||
"""Creates and returns an optimizer for training.
|
"""Creates and returns an optimizer for training.
|
||||||
@@ -268,17 +309,18 @@ def load_plugin(plugin_name: str) -> BasePlugin:
|
|||||||
return plugin
|
return plugin
|
||||||
|
|
||||||
|
|
||||||
class PluginManager:
|
class PluginManager: # pylint: disable=too-many-public-methods
|
||||||
"""The `PluginManager` class is responsible for loading and managing plugins. It
|
"""The `PluginManager` class is responsible for loading and managing plugins. It
|
||||||
should be a singleton so it can be accessed from anywhere in the codebase.
|
should be a singleton so it can be accessed from anywhere in the codebase.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
plugins: A list of loaded plugins.
|
plugins: A list of loaded plugins.
|
||||||
|
|
||||||
Methods:
|
Note:
|
||||||
get_instance(): Static method to get the singleton instance of `PluginManager`.
|
Key methods include:
|
||||||
register(plugin_name: str): Registers a new plugin by its name.
|
- get_instance(): Static method to get the singleton instance of `PluginManager`.
|
||||||
pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.
|
- register(plugin_name: str): Registers a new plugin by its name.
|
||||||
|
- pre_model_load(cfg): Calls the pre_model_load method of all registered plugins.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
|
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
|
||||||
@@ -322,12 +364,15 @@ class PluginManager:
|
|||||||
ImportError: If the plugin module cannot be imported.
|
ImportError: If the plugin module cannot be imported.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
logging.info(f"Attempting to load plugin: {plugin_name}")
|
LOG.info(f"Attempting to load plugin: {plugin_name}")
|
||||||
plugin = load_plugin(plugin_name)
|
plugin = load_plugin(plugin_name)
|
||||||
self.plugins[plugin_name] = plugin
|
self.plugins[plugin_name] = plugin
|
||||||
logging.info(f"Plugin loaded successfully: {plugin_name}")
|
LOG.info(f"Plugin loaded successfully: {plugin_name}")
|
||||||
except ImportError:
|
except ImportError as exc:
|
||||||
logging.error(f"Failed to load plugin: {plugin_name}")
|
LOG.error(f"Failed to load plugin: {plugin_name}")
|
||||||
|
# print stacktrace
|
||||||
|
traceback.print_exc()
|
||||||
|
print(f"Error: {exc}")
|
||||||
|
|
||||||
def get_input_args(self) -> list[str]:
|
def get_input_args(self) -> list[str]:
|
||||||
"""Returns a list of Pydantic classes for all registered plugins' input arguments.'
|
"""Returns a list of Pydantic classes for all registered plugins' input arguments.'
|
||||||
@@ -342,6 +387,20 @@ class PluginManager:
|
|||||||
input_args.append(input_args_from_plugin)
|
input_args.append(input_args_from_plugin)
|
||||||
return input_args
|
return input_args
|
||||||
|
|
||||||
|
def get_training_args_mixin(self):
|
||||||
|
"""
|
||||||
|
Returns a list of dataclasses for all registered plugins' training args mixins'
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[str]: A list of dataclsses
|
||||||
|
"""
|
||||||
|
training_args = []
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
training_args_from_plugin = plugin.get_training_args_mixin()
|
||||||
|
if training_args_from_plugin is not None:
|
||||||
|
training_args.append(training_args_from_plugin)
|
||||||
|
return training_args
|
||||||
|
|
||||||
def load_datasets(
|
def load_datasets(
|
||||||
self, cfg: DictDefault, preprocess: bool = False
|
self, cfg: DictDefault, preprocess: bool = False
|
||||||
) -> Union["TrainDatasetMeta", None]:
|
) -> Union["TrainDatasetMeta", None]:
|
||||||
@@ -431,6 +490,42 @@ class PluginManager:
|
|||||||
return trainer_cls
|
return trainer_cls
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_training_args(self, cfg):
|
||||||
|
"""
|
||||||
|
Calls the get_training_args method of all registered plugins and returns the combined training arguments.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
cfg (dict): The configuration for the plugins.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
object: The training arguments
|
||||||
|
"""
|
||||||
|
training_args_kwargs = {}
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
training_args = plugin.get_training_args(cfg)
|
||||||
|
if training_args is not None:
|
||||||
|
training_args_kwargs.update(training_args)
|
||||||
|
|
||||||
|
return training_args_kwargs
|
||||||
|
|
||||||
|
def get_collator_cls_and_kwargs(self, cfg, is_eval=False):
|
||||||
|
"""
|
||||||
|
Calls the get_collator_cls_and_kwargs method of all registered plugins and returns the first non-None collator class.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
cfg (dict): The configuration for the plugins.
|
||||||
|
is_eval (bool): Whether this is an eval split.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
object: The collator class, or None if none was found.
|
||||||
|
"""
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
collator = plugin.get_collator_cls_and_kwargs(cfg, is_eval=is_eval)
|
||||||
|
if collator is not None:
|
||||||
|
collator_cls, collator_kwargs = collator
|
||||||
|
return collator_cls, collator_kwargs
|
||||||
|
return None
|
||||||
|
|
||||||
def post_trainer_create(self, cfg: DictDefault, trainer: Trainer):
|
def post_trainer_create(self, cfg: DictDefault, trainer: Trainer):
|
||||||
"""Calls the `post_trainer_create` method of all registered plugins.
|
"""Calls the `post_trainer_create` method of all registered plugins.
|
||||||
|
|
||||||
@@ -534,7 +629,6 @@ class PluginManager:
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
cfg: The configuration for the plugins.
|
cfg: The configuration for the plugins.
|
||||||
model: The loaded model.
|
|
||||||
"""
|
"""
|
||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
plugin.post_train_unload(cfg)
|
plugin.post_train_unload(cfg)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ Module to handle merging the plugins' input arguments with the base configuratio
|
|||||||
This was moved here to prevent circular imports.
|
This was moved here to prevent circular imports.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List, Type
|
||||||
|
|
||||||
from axolotl.utils.schemas.config import (
|
from axolotl.utils.schemas.config import (
|
||||||
AxolotlConfigWCapabilities as AxolotlConfigWCapabilitiesBase,
|
AxolotlConfigWCapabilities as AxolotlConfigWCapabilitiesBase,
|
||||||
@@ -61,3 +61,43 @@ def merge_input_args():
|
|||||||
]
|
]
|
||||||
return AxolotlConfigWCapabilities, AxolotlInputConfig
|
return AxolotlConfigWCapabilities, AxolotlInputConfig
|
||||||
return AxolotlConfigWCapabilitiesBase, AxolotlInputConfigBase
|
return AxolotlConfigWCapabilitiesBase, AxolotlInputConfigBase
|
||||||
|
|
||||||
|
|
||||||
|
def merge_training_args() -> Type:
|
||||||
|
"""
|
||||||
|
Merges training arguments from registered plugins with the base TrainingArguments.
|
||||||
|
|
||||||
|
This function retrieves the training arguments from registered plugins using the PluginManager.
|
||||||
|
It then dynamically creates new classes, AxolotlTrainingMixins,
|
||||||
|
that inherit from the base configurations and include the training arguments from the plugins.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: A tuple containing the newly created classes, AxolotlTrainingMixins.
|
||||||
|
"""
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
from axolotl.core.training_args_base import (
|
||||||
|
AxolotlTrainingMixins as AxolotlTrainingMixinsBase,
|
||||||
|
)
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
|
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
training_args_mixins: List[str] = plugin_manager.get_training_args_mixin()
|
||||||
|
mixin_classes = []
|
||||||
|
dynamic_input = ""
|
||||||
|
for plugin_args in training_args_mixins:
|
||||||
|
plugin_module, plugin_cls = plugin_args.rsplit(".", 1)
|
||||||
|
dynamic_input += f"from {plugin_module} import {plugin_cls}\n"
|
||||||
|
mixin_classes.append(plugin_cls)
|
||||||
|
if dynamic_input:
|
||||||
|
dynamic_input += f"class AxolotlTrainingMixins(AxolotlTrainingMixinsBase, {', '.join(mixin_classes)}):\n pass\n"
|
||||||
|
|
||||||
|
namespace: Dict[Any, Any] = {}
|
||||||
|
local_vars = {"AxolotlTrainingMixinsBase": AxolotlTrainingMixinsBase}
|
||||||
|
exec( # pylint: disable=exec-used # nosec B102
|
||||||
|
dynamic_input, {**globals(), **local_vars}, namespace
|
||||||
|
)
|
||||||
|
AxolotlTrainingMixins = namespace[ # pylint: disable=invalid-name
|
||||||
|
"AxolotlTrainingMixins"
|
||||||
|
]
|
||||||
|
return AxolotlTrainingMixins
|
||||||
|
return AxolotlTrainingMixinsBase
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user