Compare commits
108 Commits
sp-rl-v3
...
fa3-hopper
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bdf4b1c23 | ||
|
|
d6f64a3684 | ||
|
|
0735454782 | ||
|
|
bb6464c4c6 | ||
|
|
323a9cb153 | ||
|
|
b22150751f | ||
|
|
8c4bc59bfc | ||
|
|
a064f1c9b4 | ||
|
|
fb5ef6d445 | ||
|
|
34b68ddaae | ||
|
|
9a3d0c919b | ||
|
|
bd34d0b861 | ||
|
|
37220ab90a | ||
|
|
e1b74d710b | ||
|
|
79daf5b934 | ||
|
|
ddd7c55576 | ||
|
|
65c6c98a76 | ||
|
|
4ef2e8293f | ||
|
|
c126d5cd04 | ||
|
|
9b0be4f15c | ||
|
|
a27b909c5c | ||
|
|
6cb07b9d12 | ||
|
|
288653adb6 | ||
|
|
3a5b495a74 | ||
|
|
f661858fc4 | ||
|
|
c837c4a424 | ||
|
|
c9797de6bb | ||
|
|
8f8a7afb05 | ||
|
|
86472715da | ||
|
|
c0a0c7534c | ||
|
|
7fa1089cea | ||
|
|
80304c26a7 | ||
|
|
67c4ea9c7c | ||
|
|
526ddb886d | ||
|
|
f34eef546a | ||
|
|
c7b6790614 | ||
|
|
47e0e71bc8 | ||
|
|
0f3587174d | ||
|
|
25e6c5f9bd | ||
|
|
32f51bca35 | ||
|
|
9daa04da90 | ||
|
|
0d71b0aa5f | ||
|
|
63aaccf85b | ||
|
|
ff0fe767c8 | ||
|
|
8e4158cc0b | ||
|
|
cd84325253 | ||
|
|
0b140fef83 | ||
|
|
e4cfebe995 | ||
|
|
a6cac5dd32 | ||
|
|
b71c0e3447 | ||
|
|
ddaebf8309 | ||
|
|
679743087a | ||
|
|
f720b6e72d | ||
|
|
a980618fd0 | ||
|
|
54960d4de0 | ||
|
|
ed922796b7 | ||
|
|
3dd9c3bf3f | ||
|
|
0ba7d362fa | ||
|
|
e4f73bc98e | ||
|
|
bcb59c70e2 | ||
|
|
6a3e6f8c53 | ||
|
|
fee3c13bb5 | ||
|
|
996fc124e5 | ||
|
|
e963990ad7 | ||
|
|
c3f2b1c5c2 | ||
|
|
6ba5c0ed2c | ||
|
|
24ff5f53f8 | ||
|
|
5e949eaa07 | ||
|
|
89ca14d9a0 | ||
|
|
8446b4ad28 | ||
|
|
fc79606b6d | ||
|
|
baeb00231b | ||
|
|
2413688b08 | ||
|
|
5bb1f3da56 | ||
|
|
a21b9cc472 | ||
|
|
41a1ec0c95 | ||
|
|
ecac731922 | ||
|
|
742fef4200 | ||
|
|
a39caf8824 | ||
|
|
07e4f2e25b | ||
|
|
c7d07de6b4 | ||
|
|
6565ae85d8 | ||
|
|
80b4edb4a7 | ||
|
|
fedbcc0254 | ||
|
|
8175896ada | ||
|
|
14d670dbf0 | ||
|
|
2d77165dc0 | ||
|
|
63b17e3109 | ||
|
|
1178a15ede | ||
|
|
c513487d1a | ||
|
|
dda95e6c40 | ||
|
|
7099343c56 | ||
|
|
5000cb3fe7 | ||
|
|
170cdb5be9 | ||
|
|
5d182a1056 | ||
|
|
40f4ea23ab | ||
|
|
f1df73a798 | ||
|
|
8b33ae1c4f | ||
|
|
dc4da4a7e2 | ||
|
|
f9c7c3bb72 | ||
|
|
caf5cb63ea | ||
|
|
5dba5c82a8 | ||
|
|
e3c9d541a7 | ||
|
|
9eba0ad118 | ||
|
|
53dbf97d85 | ||
|
|
2c2563bc34 | ||
|
|
5cb3398460 | ||
|
|
ae1c7ace63 |
17
.github/workflows/base.yml
vendored
17
.github/workflows/base.yml
vendored
@@ -22,12 +22,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: "124"
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
|
||||||
- cuda: "124"
|
- cuda: "124"
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
@@ -53,11 +47,18 @@ jobs:
|
|||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
- cuda: "128"
|
- cuda: "128"
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.8.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
|
- cuda: "126"
|
||||||
|
cuda_version: 12.6.3
|
||||||
|
cudnn_version: ""
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
suffix: "-hopper"
|
||||||
|
torch_cuda_arch_list: "9.0+PTX"
|
||||||
- cuda: "128"
|
- cuda: "128"
|
||||||
cuda_version: 12.8.1
|
cuda_version: 12.8.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
@@ -93,7 +94,7 @@ jobs:
|
|||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.pytorch == 'nightly' && './docker/Dockerfile-base-nightly' || matrix.pytorch == 'next' && './docker/Dockerfile-base-next' || './docker/Dockerfile-base' }}
|
file: ${{ matrix.pytorch == 'nightly' && './docker/Dockerfile-base-nightly' || matrix.pytorch == 'next' && './docker/Dockerfile-base-next' || './docker/Dockerfile-base' }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}${{ matrix.suffix || '' }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
build-args: |
|
build-args: |
|
||||||
CUDA_VERSION=${{ matrix.cuda_version }}
|
CUDA_VERSION=${{ matrix.cuda_version }}
|
||||||
|
|||||||
25
.github/workflows/main.yml
vendored
25
.github/workflows/main.yml
vendored
@@ -15,16 +15,11 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -35,7 +30,12 @@ jobs:
|
|||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
|
- cuda: 128
|
||||||
|
cuda_version: 12.8.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.7.0
|
||||||
|
axolotl_extras:
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -67,6 +67,7 @@ jobs:
|
|||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
||||||
|
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: |
|
||||||
@@ -82,11 +83,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -103,6 +99,11 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
|
- cuda: 128
|
||||||
|
cuda_version: 12.8.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.7.0
|
||||||
|
axolotl_extras:
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
20
.github/workflows/multi-gpu-e2e.yml
vendored
20
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -3,11 +3,13 @@ name: docker-multigpu-tests-biweekly
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'tests/e2e/multigpu/*.py'
|
- 'tests/e2e/multigpu/**.py'
|
||||||
- 'requirements.txt'
|
- 'requirements.txt'
|
||||||
- 'setup.py'
|
- 'setup.py'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- '.github/workflows/multi-gpu-e2e.yml'
|
- '.github/workflows/multi-gpu-e2e.yml'
|
||||||
|
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||||
|
- 'src/axolotl/utils/distributed.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
||||||
@@ -30,28 +32,25 @@ jobs:
|
|||||||
pytorch: 2.6.0
|
pytorch: 2.6.0
|
||||||
axolotl_extras: vllm
|
axolotl_extras: vllm
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
- cuda: 126
|
||||||
- cuda: 124
|
cuda_version: 12.6.3
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.4.1
|
pytorch: 2.6.0
|
||||||
axolotl_extras: # no vllm support for 2.4.1
|
axolotl_extras:
|
||||||
|
suffix: "-hopper"
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
steps:
|
steps:
|
||||||
@@ -73,7 +72,6 @@ jobs:
|
|||||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
|
||||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||||
- name: Run tests job on Modal
|
- name: Run tests job on Modal
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
10
.github/workflows/nightlies.yml
vendored
10
.github/workflows/nightlies.yml
vendored
@@ -12,11 +12,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -70,11 +65,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
61
.github/workflows/preview-docs.yml
vendored
Normal file
61
.github/workflows/preview-docs.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
name: Preview
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
# Run the workflow only when one of these files changes
|
||||||
|
paths:
|
||||||
|
- '**/*.md' # any Markdown file
|
||||||
|
- '**/*.qmd' # any Quarto file
|
||||||
|
- '_quarto.yaml'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
checks: write
|
||||||
|
contents: write
|
||||||
|
deployments: write
|
||||||
|
issues: write
|
||||||
|
discussions: write
|
||||||
|
pages: write
|
||||||
|
pull-requests: write
|
||||||
|
statuses: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
preview:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Quarto
|
||||||
|
uses: quarto-dev/quarto-actions/setup@v2
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python3 -m pip install jupyter quartodoc
|
||||||
|
python3 -m pip install -e . --no-deps
|
||||||
|
|
||||||
|
- name: Build autodoc
|
||||||
|
run: quartodoc build
|
||||||
|
|
||||||
|
- name: Quarto render
|
||||||
|
run: quarto render
|
||||||
|
|
||||||
|
- name: Netlify Publish
|
||||||
|
uses: nwtgck/actions-netlify@v3.0
|
||||||
|
with:
|
||||||
|
publish-dir: './_site'
|
||||||
|
enable-pull-request-comment: true
|
||||||
|
enable-github-deployment: true
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
deploy-message: "Deployed On Netlify"
|
||||||
|
github-deployment-environment: 'preview'
|
||||||
|
github-deployment-description: 'Preview Deployment'
|
||||||
|
env:
|
||||||
|
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
|
||||||
|
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
|
||||||
96
.github/workflows/tests-nightly.yml
vendored
96
.github/workflows/tests-nightly.yml
vendored
@@ -18,15 +18,102 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SKIP: no-commit-to-branch
|
SKIP: no-commit-to-branch
|
||||||
|
|
||||||
|
preload-cache:
|
||||||
|
name: Preload HF cache
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python_version: ["3.11"]
|
||||||
|
pytorch_version: ["2.6.0"]
|
||||||
|
timeout-minutes: 20
|
||||||
|
|
||||||
|
env:
|
||||||
|
AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Restore HF cache
|
||||||
|
id: hf-cache-restore
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ runner.os }}-hf-hub-cache-v2
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python_version }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: upgrade pip
|
||||||
|
run: |
|
||||||
|
pip3 install --upgrade pip
|
||||||
|
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||||
|
|
||||||
|
- name: Install PyTorch
|
||||||
|
run: |
|
||||||
|
pip3 install torch==${{ matrix.pytorch_version }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip3 show torch
|
||||||
|
pip3 install --no-build-isolation -U -e .
|
||||||
|
python scripts/unsloth_install.py | sh
|
||||||
|
python scripts/cutcrossentropy_install.py | sh
|
||||||
|
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||||
|
|
||||||
|
- name: Make sure PyTorch version wasn't clobbered
|
||||||
|
run: |
|
||||||
|
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||||
|
|
||||||
|
- name: Ensure axolotl CLI was installed
|
||||||
|
run: |
|
||||||
|
axolotl --help
|
||||||
|
|
||||||
|
- name: Pre-Download dataset fixture
|
||||||
|
run: |
|
||||||
|
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
pytest -v tests/conftest.py
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.xml
|
||||||
|
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||||
|
fail_ci_if_error: false
|
||||||
|
|
||||||
|
- name: cleanup pip cache
|
||||||
|
run: |
|
||||||
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
|
- name: Save HF cache
|
||||||
|
id: hf-cache
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||||
|
|
||||||
pytest:
|
pytest:
|
||||||
name: PyTest
|
name: PyTest
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: [preload-cache]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 2
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -106,13 +193,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
num_gpus: 1
|
|
||||||
axolotl_extras:
|
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
220
.github/workflows/tests.yml
vendored
220
.github/workflows/tests.yml
vendored
@@ -27,6 +27,9 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||||
|
|
||||||
|
env:
|
||||||
|
TRANSFORMERS_IS_CI: "yes"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pre-commit:
|
pre-commit:
|
||||||
name: pre-commit
|
name: pre-commit
|
||||||
@@ -41,29 +44,127 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SKIP: no-commit-to-branch
|
SKIP: no-commit-to-branch
|
||||||
|
|
||||||
|
# preload-cache:
|
||||||
|
# name: Preload HF cache
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
# strategy:
|
||||||
|
# fail-fast: false
|
||||||
|
# matrix:
|
||||||
|
# python_version: ["3.11"]
|
||||||
|
# pytorch_version: ["2.6.0"]
|
||||||
|
# timeout-minutes: 20
|
||||||
|
#
|
||||||
|
# env:
|
||||||
|
# AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
||||||
|
#
|
||||||
|
# steps:
|
||||||
|
# - name: Check out repository code
|
||||||
|
# uses: actions/checkout@v4
|
||||||
|
#
|
||||||
|
# - name: Restore HF cache
|
||||||
|
# id: hf-cache-restore
|
||||||
|
# uses: actions/cache/restore@v4
|
||||||
|
# with:
|
||||||
|
# path: |
|
||||||
|
# /home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
# /home/runner/.cache/huggingface/hub/models--*
|
||||||
|
# key: ${{ runner.os }}-hf-hub-cache-v2
|
||||||
|
#
|
||||||
|
# - name: Restore Cache from S3
|
||||||
|
# id: hf-cache-restore-s3
|
||||||
|
# run: |
|
||||||
|
# mkdir -p /home/runner/.cache/huggingface/hub
|
||||||
|
# curl -L https://d1dttdx32dkk5p.cloudfront.net/hf-cache.tar.zst | tar -xf - -C /home/runner/.cache/huggingface/hub/ --use-compress-program unzstd
|
||||||
|
#
|
||||||
|
# - name: Setup Python
|
||||||
|
# uses: actions/setup-python@v5
|
||||||
|
# with:
|
||||||
|
# python-version: ${{ matrix.python_version }}
|
||||||
|
# cache: 'pip' # caching pip dependencies
|
||||||
|
#
|
||||||
|
# - name: upgrade pip
|
||||||
|
# run: |
|
||||||
|
# pip3 install --upgrade pip
|
||||||
|
# pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||||
|
#
|
||||||
|
# - name: Install PyTorch
|
||||||
|
# run: |
|
||||||
|
# pip3 install torch==${{ matrix.pytorch_version }}
|
||||||
|
#
|
||||||
|
# - name: Install dependencies
|
||||||
|
# run: |
|
||||||
|
# pip3 show torch
|
||||||
|
# pip3 install --no-build-isolation -U -e .
|
||||||
|
# python scripts/unsloth_install.py | sh
|
||||||
|
# python scripts/cutcrossentropy_install.py | sh
|
||||||
|
# pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||||
|
#
|
||||||
|
# - name: Make sure PyTorch version wasn't clobbered
|
||||||
|
# run: |
|
||||||
|
# python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||||
|
#
|
||||||
|
# - name: Ensure axolotl CLI was installed
|
||||||
|
# run: |
|
||||||
|
# axolotl --help
|
||||||
|
#
|
||||||
|
# - name: Pre-Download dataset fixture
|
||||||
|
# run: |
|
||||||
|
# huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||||
|
#
|
||||||
|
# - name: Run tests
|
||||||
|
# run: |
|
||||||
|
# pytest -v tests/conftest.py
|
||||||
|
#
|
||||||
|
# - name: Upload coverage to Codecov
|
||||||
|
# uses: codecov/codecov-action@v5
|
||||||
|
# with:
|
||||||
|
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
# files: ./coverage.xml
|
||||||
|
# flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||||
|
# fail_ci_if_error: false
|
||||||
|
#
|
||||||
|
# - name: cleanup pip cache
|
||||||
|
# run: |
|
||||||
|
# find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
#
|
||||||
|
# - name: Save HF cache
|
||||||
|
# id: hf-cache
|
||||||
|
# uses: actions/cache/save@v4
|
||||||
|
# with:
|
||||||
|
# path: |
|
||||||
|
# /home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
# /home/runner/.cache/huggingface/hub/models--*
|
||||||
|
# key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||||
|
|
||||||
pytest:
|
pytest:
|
||||||
name: PyTest
|
name: PyTest
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
# needs: [preload-cache]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 2
|
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Restore HF cache
|
# - name: Restore HF cache
|
||||||
id: hf-cache-restore
|
# id: hf-cache-restore
|
||||||
uses: actions/cache/restore@v4
|
# uses: actions/cache/restore@v4
|
||||||
with:
|
# with:
|
||||||
path: |
|
# path: |
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
# /home/runner/.cache/huggingface/hub/datasets--*
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
# /home/runner/.cache/huggingface/hub/models--*
|
||||||
key: ${{ runner.os }}-hf-hub-cache-v2
|
# key: ${{ runner.os }}-hf-hub-cache-v2
|
||||||
|
|
||||||
|
- name: Restore Cache from S3
|
||||||
|
id: hf-cache-restore-s3
|
||||||
|
run: |
|
||||||
|
mkdir -p /home/runner/.cache/huggingface/hub
|
||||||
|
curl -L https://d1dttdx32dkk5p.cloudfront.net/hf-cache.tar.zst | tar -xf - -C /home/runner/.cache/huggingface/hub/ --use-compress-program unzstd
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
@@ -118,38 +219,35 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
pytest-sdist:
|
pytest-sdist:
|
||||||
name: PyTest from Source Dist
|
name: PyTest from Source Dist
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
# needs: [preload-cache]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 1
|
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Restore HF cache
|
# - name: Restore HF cache
|
||||||
id: hf-cache-restore
|
# id: hf-cache-restore
|
||||||
uses: actions/cache/restore@v4
|
# uses: actions/cache/restore@v4
|
||||||
with:
|
# with:
|
||||||
path: |
|
# path: |
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
# /home/runner/.cache/huggingface/hub/datasets--*
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
# /home/runner/.cache/huggingface/hub/models--*
|
||||||
key: ${{ runner.os }}-hf-hub-cache-v2
|
# key: ${{ runner.os }}-hf-hub-cache-v2
|
||||||
|
|
||||||
|
- name: Restore Cache from S3
|
||||||
|
id: hf-cache-restore-s3
|
||||||
|
run: |
|
||||||
|
mkdir -p /home/runner/.cache/huggingface/hub
|
||||||
|
curl -L https://d1dttdx32dkk5p.cloudfront.net/hf-cache.tar.zst | tar -xf - -C /home/runner/.cache/huggingface/hub/ --use-compress-program unzstd
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
@@ -196,16 +294,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
docker-e2e-tests-1st:
|
docker-e2e-tests-1st:
|
||||||
|
# Run this job first as a gate for running the remainder of the test matrix
|
||||||
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
@@ -252,6 +342,8 @@ jobs:
|
|||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
|
# Only run the remainder of the matrix if the first e2e check passed;
|
||||||
|
# this is to save on wasted compute costs for known failures that get caught in the first run
|
||||||
needs: [pre-commit, pytest, docker-e2e-tests-1st]
|
needs: [pre-commit, pytest, docker-e2e-tests-1st]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@@ -261,21 +353,27 @@ jobs:
|
|||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.4.1
|
pytorch: 2.6.0
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras:
|
axolotl_extras: llmcompressor
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
|
- cuda: 128
|
||||||
|
cuda_version: 12.8.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.7.0
|
||||||
|
num_gpus: 1
|
||||||
|
axolotl_extras:
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -300,3 +398,43 @@ jobs:
|
|||||||
- name: Run tests job on Modal
|
- name: Run tests job on Modal
|
||||||
run: |
|
run: |
|
||||||
modal run cicd.e2e_tests
|
modal run cicd.e2e_tests
|
||||||
|
|
||||||
|
docker-e2e-cleanup:
|
||||||
|
runs-on: [self-hosted, modal]
|
||||||
|
timeout-minutes: 90
|
||||||
|
needs: [docker-e2e-tests]
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- cuda: 124
|
||||||
|
cuda_version: 12.4.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
num_gpus: 1
|
||||||
|
axolotl_extras: vllm
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.11"
|
||||||
|
- name: Install Modal
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install modal==0.71.8 jinja2
|
||||||
|
- name: Update env vars
|
||||||
|
run: |
|
||||||
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||||
|
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||||
|
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||||
|
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||||
|
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||||
|
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||||
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||||
|
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||||
|
- name: Run tests job on Modal
|
||||||
|
run: |
|
||||||
|
modal run cicd.cleanup
|
||||||
|
|||||||
161
.runpod/.gitignore
vendored
Normal file
161
.runpod/.gitignore
vendored
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
pod/scripts/config.yaml
|
||||||
18
.runpod/Dockerfile
Normal file
18
.runpod/Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
FROM axolotlai/axolotl-cloud:main-py3.11-cu124-2.6.0
|
||||||
|
|
||||||
|
COPY .runpod/requirements.txt /requirements.txt
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
python3 -m pip install --upgrade pip && \
|
||||||
|
python3 -m pip install --upgrade -r /requirements.txt
|
||||||
|
|
||||||
|
# Environment settings
|
||||||
|
ARG BASE_VOLUME="/runpod-volume"
|
||||||
|
ENV BASE_VOLUME=$BASE_VOLUME
|
||||||
|
ENV HF_DATASETS_CACHE="${BASE_VOLUME}/huggingface-cache/datasets"
|
||||||
|
ENV HUGGINGFACE_HUB_CACHE="${BASE_VOLUME}/huggingface-cache/hub"
|
||||||
|
ENV TRANSFORMERS_CACHE="${BASE_VOLUME}/huggingface-cache/hub"
|
||||||
|
|
||||||
|
COPY .runpod/src /src
|
||||||
|
|
||||||
|
WORKDIR /src
|
||||||
|
CMD ["python3", "/src/handler.py"]
|
||||||
335
.runpod/README.md
Normal file
335
.runpod/README.md
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
<h1>LLM Post Training- Full fine-tune, LoRA, QLoRa etc. Llama/Mistral/Gemma and more</h1>
|
||||||
|
|
||||||
|
# Configuration Options
|
||||||
|
|
||||||
|
This document outlines all available configuration options for training models. The configuration can be provided as a JSON request.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
You can use these configuration Options:
|
||||||
|
|
||||||
|
1. As a JSON request body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "model-name",
|
||||||
|
"run_id": "run-id",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "", # add your Weights & biases key. TODO: you will be able to set this in Enviornment variables.
|
||||||
|
"hf_token": "", # add your HF_token. TODO: you will be able to set this in Enviornment variables.
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "NousResearch/Llama-3.2-1B",
|
||||||
|
// ... other options
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
### Model Configuration
|
||||||
|
|
||||||
|
| Option | Description | Default |
|
||||||
|
| ------------------- | --------------------------------------------------------------------------------------------- | -------------------- |
|
||||||
|
| `base_model` | Path to the base model (local or HuggingFace) | Required |
|
||||||
|
| `base_model_config` | Configuration path for the base model | Same as base_model |
|
||||||
|
| `revision_of_model` | Specific model revision from HuggingFace hub | Latest |
|
||||||
|
| `tokenizer_config` | Custom tokenizer configuration path | Optional |
|
||||||
|
| `model_type` | Type of model to load | AutoModelForCausalLM |
|
||||||
|
| `tokenizer_type` | Type of tokenizer to use | AutoTokenizer |
|
||||||
|
| `hub_model_id` | Repository ID where the model will be pushed on Hugging Face Hub (format: username/repo-name) | Optional |
|
||||||
|
|
||||||
|
## Model Family Identification
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ------- | ------------------------------ |
|
||||||
|
| `is_falcon_derived_model` | `false` | Whether model is Falcon-based |
|
||||||
|
| `is_llama_derived_model` | `false` | Whether model is LLaMA-based |
|
||||||
|
| `is_qwen_derived_model` | `false` | Whether model is Qwen-based |
|
||||||
|
| `is_mistral_derived_model` | `false` | Whether model is Mistral-based |
|
||||||
|
|
||||||
|
## Model Configuration Overrides
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------------------------------- | ---------- | ---------------------------------- |
|
||||||
|
| `overrides_of_model_config.rope_scaling.type` | `"linear"` | RoPE scaling type (linear/dynamic) |
|
||||||
|
| `overrides_of_model_config.rope_scaling.factor` | `1.0` | RoPE scaling factor |
|
||||||
|
|
||||||
|
### Model Loading Options
|
||||||
|
|
||||||
|
| Option | Description | Default |
|
||||||
|
| -------------- | ----------------------------- | ------- |
|
||||||
|
| `load_in_8bit` | Load model in 8-bit precision | false |
|
||||||
|
| `load_in_4bit` | Load model in 4-bit precision | false |
|
||||||
|
| `bf16` | Use bfloat16 precision | false |
|
||||||
|
| `fp16` | Use float16 precision | false |
|
||||||
|
| `tf32` | Use tensor float 32 precision | false |
|
||||||
|
|
||||||
|
## Memory and Device Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ------------------ | --------- | ----------------------- |
|
||||||
|
| `gpu_memory_limit` | `"20GiB"` | GPU memory limit |
|
||||||
|
| `lora_on_cpu` | `false` | Load LoRA on CPU |
|
||||||
|
| `device_map` | `"auto"` | Device mapping strategy |
|
||||||
|
| `max_memory` | `null` | Max memory per device |
|
||||||
|
|
||||||
|
## Training Hyperparameters
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------------- | --------- | --------------------------- |
|
||||||
|
| `gradient_accumulation_steps` | `1` | Gradient accumulation steps |
|
||||||
|
| `micro_batch_size` | `2` | Batch size per GPU |
|
||||||
|
| `eval_batch_size` | `null` | Evaluation batch size |
|
||||||
|
| `num_epochs` | `4` | Number of training epochs |
|
||||||
|
| `warmup_steps` | `100` | Warmup steps |
|
||||||
|
| `warmup_ratio` | `0.05` | Warmup ratio |
|
||||||
|
| `learning_rate` | `0.00003` | Learning rate |
|
||||||
|
| `lr_quadratic_warmup` | `false` | Quadratic warmup |
|
||||||
|
| `logging_steps` | `null` | Logging frequency |
|
||||||
|
| `eval_steps` | `null` | Evaluation frequency |
|
||||||
|
| `evals_per_epoch` | `null` | Evaluations per epoch |
|
||||||
|
| `save_strategy` | `"epoch"` | Checkpoint saving strategy |
|
||||||
|
| `save_steps` | `null` | Saving frequency |
|
||||||
|
| `saves_per_epoch` | `null` | Saves per epoch |
|
||||||
|
| `save_total_limit` | `null` | Maximum checkpoints to keep |
|
||||||
|
| `max_steps` | `null` | Maximum training steps |
|
||||||
|
|
||||||
|
### Dataset Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
datasets:
|
||||||
|
- path: vicgalle/alpaca-gpt4 # HuggingFace dataset or TODO: You will be able to add the local path.
|
||||||
|
type: alpaca # Format type (alpaca, gpteacher, oasst, etc.)
|
||||||
|
ds_type: json # Dataset type
|
||||||
|
data_files: path/to/data # Source data files
|
||||||
|
train_on_split: train # Dataset split to use
|
||||||
|
```
|
||||||
|
|
||||||
|
## Chat Template Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ------------------------ | -------------------------------- | ---------------------- |
|
||||||
|
| `chat_template` | `"tokenizer_default"` | Chat template type |
|
||||||
|
| `chat_template_jinja` | `null` | Custom Jinja template |
|
||||||
|
| `default_system_message` | `"You are a helpful assistant."` | Default system message |
|
||||||
|
|
||||||
|
## Dataset Processing
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------------- | -------------------------- | --------------------------------- |
|
||||||
|
| `dataset_prepared_path` | `"data/last_run_prepared"` | Path for prepared dataset |
|
||||||
|
| `push_dataset_to_hub` | `""` | Push dataset to HF hub |
|
||||||
|
| `dataset_processes` | `4` | Number of preprocessing processes |
|
||||||
|
| `dataset_keep_in_memory` | `false` | Keep dataset in memory |
|
||||||
|
| `shuffle_merged_datasets` | `true` | Shuffle merged datasets |
|
||||||
|
| `dataset_exact_deduplication` | `true` | Deduplicate datasets |
|
||||||
|
|
||||||
|
## LoRA Configuration
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ---------------------- | ------------------------------ |
|
||||||
|
| `adapter` | `"lora"` | Adapter type (lora/qlora) |
|
||||||
|
| `lora_model_dir` | `""` | Directory with pretrained LoRA |
|
||||||
|
| `lora_r` | `8` | LoRA attention dimension |
|
||||||
|
| `lora_alpha` | `16` | LoRA alpha parameter |
|
||||||
|
| `lora_dropout` | `0.05` | LoRA dropout |
|
||||||
|
| `lora_target_modules` | `["q_proj", "v_proj"]` | Modules to apply LoRA |
|
||||||
|
| `lora_target_linear` | `false` | Target all linear modules |
|
||||||
|
| `peft_layers_to_transform` | `[]` | Layers to transform |
|
||||||
|
| `lora_modules_to_save` | `[]` | Modules to save |
|
||||||
|
| `lora_fan_in_fan_out` | `false` | Fan in/out structure |
|
||||||
|
|
||||||
|
## Optimization Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ------------------------- | ------- | -------------------------- |
|
||||||
|
| `train_on_inputs` | `false` | Train on input prompts |
|
||||||
|
| `group_by_length` | `false` | Group by sequence length |
|
||||||
|
| `gradient_checkpointing` | `false` | Use gradient checkpointing |
|
||||||
|
| `early_stopping_patience` | `3` | Early stopping patience |
|
||||||
|
|
||||||
|
## Learning Rate Scheduling
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ---------- | -------------------- |
|
||||||
|
| `lr_scheduler` | `"cosine"` | Scheduler type |
|
||||||
|
| `lr_scheduler_kwargs` | `{}` | Scheduler parameters |
|
||||||
|
| `cosine_min_lr_ratio` | `null` | Minimum LR ratio |
|
||||||
|
| `cosine_constant_lr_ratio` | `null` | Constant LR ratio |
|
||||||
|
| `lr_div_factor` | `null` | LR division factor |
|
||||||
|
|
||||||
|
## Optimizer Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ---------------------- | ------------ | ------------------- |
|
||||||
|
| `optimizer` | `"adamw_hf"` | Optimizer choice |
|
||||||
|
| `optim_args` | `{}` | Optimizer arguments |
|
||||||
|
| `optim_target_modules` | `[]` | Target modules |
|
||||||
|
| `weight_decay` | `null` | Weight decay |
|
||||||
|
| `adam_beta1` | `null` | Adam beta1 |
|
||||||
|
| `adam_beta2` | `null` | Adam beta2 |
|
||||||
|
| `adam_epsilon` | `null` | Adam epsilon |
|
||||||
|
| `max_grad_norm` | `null` | Gradient clipping |
|
||||||
|
|
||||||
|
## Attention Implementations
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ------- | ----------------------------- |
|
||||||
|
| `flash_optimum` | `false` | Use better transformers |
|
||||||
|
| `xformers_attention` | `false` | Use xformers |
|
||||||
|
| `flash_attention` | `false` | Use flash attention |
|
||||||
|
| `flash_attn_cross_entropy` | `false` | Flash attention cross entropy |
|
||||||
|
| `flash_attn_rms_norm` | `false` | Flash attention RMS norm |
|
||||||
|
| `flash_attn_fuse_qkv` | `false` | Fuse QKV operations |
|
||||||
|
| `flash_attn_fuse_mlp` | `false` | Fuse MLP operations |
|
||||||
|
| `sdp_attention` | `false` | Use scaled dot product |
|
||||||
|
| `s2_attention` | `false` | Use shifted sparse attention |
|
||||||
|
|
||||||
|
## Tokenizer Modifications
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ---------------- | ------- | ---------------------------- |
|
||||||
|
| `special_tokens` | - | Special tokens to add/modify |
|
||||||
|
| `tokens` | `[]` | Additional tokens |
|
||||||
|
|
||||||
|
## Distributed Training
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------- | ------- | --------------------- |
|
||||||
|
| `fsdp` | `null` | FSDP configuration |
|
||||||
|
| `fsdp_config` | `null` | FSDP config options |
|
||||||
|
| `deepspeed` | `null` | Deepspeed config path |
|
||||||
|
| `ddp_timeout` | `null` | DDP timeout |
|
||||||
|
| `ddp_bucket_cap_mb` | `null` | DDP bucket capacity |
|
||||||
|
| `ddp_broadcast_buffers` | `null` | DDP broadcast buffers |
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><h3>Example Configuration Request:</h3></summary>
|
||||||
|
|
||||||
|
Here's a complete example for fine-tuning a LLaMA model using LoRA:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "test-run",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "NousResearch/Llama-3.2-1B",
|
||||||
|
"load_in_8bit": false,
|
||||||
|
"load_in_4bit": false,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "teknium/GPT4-LLM-Cleaned",
|
||||||
|
"type": "alpaca"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dataset_prepared_path": "last_run_prepared",
|
||||||
|
"val_set_size": 0.1,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"adapter": "lora",
|
||||||
|
"sequence_len": 2048,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": true,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"lora_r": 16,
|
||||||
|
"lora_alpha": 32,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_modules": [
|
||||||
|
"gate_proj",
|
||||||
|
"down_proj",
|
||||||
|
"up_proj",
|
||||||
|
"q_proj",
|
||||||
|
"v_proj",
|
||||||
|
"k_proj",
|
||||||
|
"o_proj"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_8bit",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": false,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"loss_watchdog_threshold": 5,
|
||||||
|
"loss_watchdog_patience": 3,
|
||||||
|
"warmup_steps": 10,
|
||||||
|
"evals_per_epoch": 4,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0,
|
||||||
|
"hub_model_id": "runpod/llama-fr-lora",
|
||||||
|
"wandb_name": "test-run-1",
|
||||||
|
"wandb_project": "test-run-1",
|
||||||
|
"wandb_entity": "axo-test",
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|end_of_text|>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Advanced Features
|
||||||
|
|
||||||
|
#### Wandb Integration
|
||||||
|
|
||||||
|
- `wandb_project`: Project name for Weights & Biases
|
||||||
|
- `wandb_entity`: Team name in W&B
|
||||||
|
- `wandb_watch`: Monitor model with W&B
|
||||||
|
- `wandb_name`: Name of the W&B run
|
||||||
|
- `wandb_run_id`: ID for the W&B run
|
||||||
|
|
||||||
|
#### Performance Optimization
|
||||||
|
|
||||||
|
- `sample_packing`: Enable efficient sequence packing
|
||||||
|
- `eval_sample_packing`: Use sequence packing during evaluation
|
||||||
|
- `torch_compile`: Enable PyTorch 2.0 compilation
|
||||||
|
- `flash_attention`: Use Flash Attention implementation
|
||||||
|
- `xformers_attention`: Use xFormers attention implementation
|
||||||
|
|
||||||
|
### Available Optimizers
|
||||||
|
|
||||||
|
The following optimizers are supported:
|
||||||
|
|
||||||
|
- `adamw_hf`: HuggingFace's AdamW implementation
|
||||||
|
- `adamw_torch`: PyTorch's AdamW
|
||||||
|
- `adamw_torch_fused`: Fused AdamW implementation
|
||||||
|
- `adamw_torch_xla`: XLA-optimized AdamW
|
||||||
|
- `adamw_apex_fused`: NVIDIA Apex fused AdamW
|
||||||
|
- `adafactor`: Adafactor optimizer
|
||||||
|
- `adamw_anyprecision`: Anyprecision AdamW
|
||||||
|
- `adamw_bnb_8bit`: 8-bit AdamW from bitsandbytes
|
||||||
|
- `lion_8bit`: 8-bit Lion optimizer
|
||||||
|
- `lion_32bit`: 32-bit Lion optimizer
|
||||||
|
- `sgd`: Stochastic Gradient Descent
|
||||||
|
- `adagrad`: Adagrad optimizer
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Set `load_in_8bit: true` or `load_in_4bit: true` for memory-efficient training
|
||||||
|
- Enable `flash_attention: true` for faster training on modern GPUs
|
||||||
|
- Use `gradient_checkpointing: true` to reduce memory usage
|
||||||
|
- Adjust `micro_batch_size` and `gradient_accumulation_steps` based on your GPU memory
|
||||||
|
|
||||||
|
For more detailed information, please refer to the [documentation](https://axolotl-ai-cloud.github.io/axolotl/docs/config.html).
|
||||||
|
|
||||||
|
### Errors:
|
||||||
|
|
||||||
|
- if you face any issues with the Flash Attention-2, Delete yoor worker and Re-start.
|
||||||
93
.runpod/hub.json
Normal file
93
.runpod/hub.json
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
{
|
||||||
|
"title": "Axolotl Fine-Tuning",
|
||||||
|
"description": "Serverless fine-tuning of open-source LLMs with Axolotl. Supports LoRA, QLoRA, DPO, and more using Hugging Face models and datasets.",
|
||||||
|
"type": "serverless",
|
||||||
|
"category": "language",
|
||||||
|
"iconUrl": "https://avatars.githubusercontent.com/u/167502477",
|
||||||
|
"config": {
|
||||||
|
"runsOn": "GPU",
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"gpuCount": 1,
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
],
|
||||||
|
"presets": [],
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"input": {
|
||||||
|
"name": "Tokenizer",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name or path of the Hugging Face tokenizer to use.",
|
||||||
|
"default": "",
|
||||||
|
"advanced": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "MAX_NUM_SEQS",
|
||||||
|
"input": {
|
||||||
|
"name": "Max Num Seqs",
|
||||||
|
"type": "number",
|
||||||
|
"description": "Maximum number of sequences per iteration.",
|
||||||
|
"default": 256,
|
||||||
|
"advanced": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"input": {
|
||||||
|
"name": "Disable Log Stats",
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Disable logging statistics.",
|
||||||
|
"default": false,
|
||||||
|
"trueValue": "true",
|
||||||
|
"falseValue": "false"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "LOAD_FORMAT",
|
||||||
|
"input": {
|
||||||
|
"name": "Load Format",
|
||||||
|
"type": "string",
|
||||||
|
"description": "The format of the model weights to load.",
|
||||||
|
"default": "auto",
|
||||||
|
"options": [
|
||||||
|
{
|
||||||
|
"label": "auto",
|
||||||
|
"value": "auto"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "pt",
|
||||||
|
"value": "pt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "safetensors",
|
||||||
|
"value": "safetensors"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "npcache",
|
||||||
|
"value": "npcache"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "dummy",
|
||||||
|
"value": "dummy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "tensorizer",
|
||||||
|
"value": "tensorizer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "bitsandbytes",
|
||||||
|
"value": "bitsandbytes"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"advanced": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
7
.runpod/requirements.txt
Normal file
7
.runpod/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# Required Python packages get listed here, one per line.
|
||||||
|
# Reccomended to lock the version number to avoid unexpected changes.
|
||||||
|
|
||||||
|
# You can also install packages from a git repository, e.g.:
|
||||||
|
# git+https://github.com/runpod/runpod-python.git
|
||||||
|
# To learn more, see https://pip.pypa.io/en/stable/reference/requirements-file-format/
|
||||||
|
runpod~=1.7.0
|
||||||
577
.runpod/src/config/config.yaml
Normal file
577
.runpod/src/config/config.yaml
Normal file
@@ -0,0 +1,577 @@
|
|||||||
|
# # This is the huggingface model that contains *.pt, *.safetensors, or *.bin files
|
||||||
|
# # This can also be a relative path to a model on disk
|
||||||
|
# base_model: ./llama-7b-hf
|
||||||
|
# # You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)
|
||||||
|
# base_model_ignore_patterns:
|
||||||
|
# # If the base_model repo on hf hub doesn't include configuration .json files,
|
||||||
|
# # You can set that here, or leave this empty to default to base_model
|
||||||
|
# base_model_config: ./llama-7b-hf
|
||||||
|
# # You can specify to choose a specific model revision from huggingface hub
|
||||||
|
# model_revision:
|
||||||
|
# # Optional tokenizer configuration override in case you want to use a different tokenizer
|
||||||
|
# # than the one defined in the base model
|
||||||
|
# tokenizer_config:
|
||||||
|
# # If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
|
||||||
|
# model_type: AutoModelForCausalLM
|
||||||
|
# # Corresponding tokenizer for the model AutoTokenizer is a good choice
|
||||||
|
# tokenizer_type: AutoTokenizer
|
||||||
|
# # Trust remote code for untrusted source
|
||||||
|
# trust_remote_code:
|
||||||
|
# # use_fast option for tokenizer loading from_pretrained, default to True
|
||||||
|
# tokenizer_use_fast:
|
||||||
|
# # Whether to use the legacy tokenizer setting, defaults to True
|
||||||
|
# tokenizer_legacy:
|
||||||
|
# # Resize the model embeddings when new tokens are added to multiples of 32
|
||||||
|
# # This is reported to improve training speed on some models
|
||||||
|
# resize_token_embeddings_to_32x:
|
||||||
|
|
||||||
|
# # Used to identify which the model is based on
|
||||||
|
# is_falcon_derived_model:
|
||||||
|
# is_llama_derived_model:
|
||||||
|
# # Please note that if you set this to true, `padding_side` will be set to "left" by default
|
||||||
|
# is_mistral_derived_model:
|
||||||
|
# is_qwen_derived_model:
|
||||||
|
|
||||||
|
# # optional overrides to the base model configuration
|
||||||
|
# model_config:
|
||||||
|
# # RoPE Scaling https://github.com/huggingface/transformers/pull/24653
|
||||||
|
# rope_scaling:
|
||||||
|
# type: # linear | dynamic
|
||||||
|
# factor: # float
|
||||||
|
|
||||||
|
|
||||||
|
# # Whether you are training a 4-bit GPTQ quantized model
|
||||||
|
# gptq: true
|
||||||
|
# gptq_groupsize: 128 # group size
|
||||||
|
# gptq_model_v1: false # v1 or v2
|
||||||
|
|
||||||
|
# # This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
|
||||||
|
# load_in_8bit: true
|
||||||
|
# # Use bitsandbytes 4 bit
|
||||||
|
# load_in_4bit:
|
||||||
|
|
||||||
|
# # Use CUDA bf16
|
||||||
|
# bf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere
|
||||||
|
# # Use CUDA fp16
|
||||||
|
# fp16: true
|
||||||
|
# # Use CUDA tf32
|
||||||
|
# tf32: true # require >=ampere
|
||||||
|
|
||||||
|
# # No AMP (automatic mixed precision)
|
||||||
|
# bfloat16: true # require >=ampere
|
||||||
|
# float16: true
|
||||||
|
|
||||||
|
# # A list of one or more datasets to finetune the model with
|
||||||
|
# datasets:
|
||||||
|
# # HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files
|
||||||
|
# - path: vicgalle/alpaca-gpt4
|
||||||
|
# # The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
||||||
|
# type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
||||||
|
# ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file
|
||||||
|
# data_files: # Optional[str] path to source data files
|
||||||
|
# shards: # Optional[int] number of shards to split data into
|
||||||
|
# name: # Optional[str] name of dataset configuration to load
|
||||||
|
# train_on_split: train # Optional[str] name of dataset split to load from
|
||||||
|
|
||||||
|
# # Optional[str] fastchat conversation type, only used with type: sharegpt
|
||||||
|
# conversation: # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
||||||
|
# field_human: # Optional[str]. Human key to use for conversation.
|
||||||
|
# field_model: # Optional[str]. Assistant key to use for conversation.
|
||||||
|
|
||||||
|
# # Custom user prompt
|
||||||
|
# - path: repo
|
||||||
|
# type:
|
||||||
|
# # The below are defaults. only set what's needed.
|
||||||
|
# system_prompt: ""
|
||||||
|
# system_format: "{system}"
|
||||||
|
# field_system: system
|
||||||
|
# field_instruction: instruction
|
||||||
|
# field_input: input
|
||||||
|
# field_output: output
|
||||||
|
|
||||||
|
# # Customizable to be single line or multi-line
|
||||||
|
# # 'format' can include {input}
|
||||||
|
# format: |-
|
||||||
|
# User: {instruction} {input}
|
||||||
|
# Assistant:
|
||||||
|
# # 'no_input_format' cannot include {input}
|
||||||
|
# no_input_format: "{instruction} "
|
||||||
|
|
||||||
|
# # For `completion` datsets only, uses the provided field instead of `text` column
|
||||||
|
# field:
|
||||||
|
|
||||||
|
# # Axolotl attempts to save the dataset as an arrow after packing the data together so
|
||||||
|
# # subsequent training attempts load faster, relative path
|
||||||
|
# dataset_prepared_path: data/last_run_prepared
|
||||||
|
# # Push prepared dataset to hub
|
||||||
|
# push_dataset_to_hub: # repo path
|
||||||
|
# # The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`
|
||||||
|
# # if not set.
|
||||||
|
# dataset_processes: # defaults to os.cpu_count() if not set
|
||||||
|
# # push checkpoints to hub
|
||||||
|
# hub_model_id: # repo path to push finetuned model
|
||||||
|
# # how to push checkpoints to hub
|
||||||
|
# # https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy
|
||||||
|
# hub_strategy:
|
||||||
|
# # Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
||||||
|
# # Required to be true when used in combination with `push_dataset_to_hub`
|
||||||
|
# hf_use_auth_token: # boolean
|
||||||
|
# # How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.
|
||||||
|
# val_set_size: 0.04
|
||||||
|
# # Num shards for whole dataset
|
||||||
|
# dataset_shard_num:
|
||||||
|
# # Index of shard to use for whole dataset
|
||||||
|
# dataset_shard_idx:
|
||||||
|
|
||||||
|
# # The maximum length of an input to train with, this should typically be less than 2048
|
||||||
|
# # as most models have a token/context limit of 2048
|
||||||
|
# sequence_len: 2048
|
||||||
|
# # Pad inputs so each step uses constant sized buffers
|
||||||
|
# # This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently
|
||||||
|
# pad_to_sequence_len:
|
||||||
|
# # Max sequence length to concatenate training samples together up to
|
||||||
|
# # Inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
|
||||||
|
# # FutureWarning: This will soon be DEPRECATED
|
||||||
|
# max_packed_sequence_len: 1024
|
||||||
|
# # Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'
|
||||||
|
# sample_packing:
|
||||||
|
# # Set to 'false' if getting errors during eval with sample_packing on.
|
||||||
|
# eval_sample_packing:
|
||||||
|
# # You can set these packing optimizations AFTER starting a training at least once.
|
||||||
|
# # The trainer will provide recommended values for these values.
|
||||||
|
# sample_packing_eff_est:
|
||||||
|
# total_num_tokens:
|
||||||
|
|
||||||
|
# # If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
||||||
|
# adapter: lora
|
||||||
|
# # If you already have a lora model trained that you want to load, put that here.
|
||||||
|
# # This means after training, if you want to test the model, you should set this to the value of `lora_out_dir`.
|
||||||
|
# lora_model_dir:
|
||||||
|
|
||||||
|
# # LoRA hyperparameters
|
||||||
|
# # For more details about the following options, see:
|
||||||
|
# # https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2
|
||||||
|
# lora_r: 8
|
||||||
|
# lora_alpha: 16
|
||||||
|
# lora_dropout: 0.05
|
||||||
|
# lora_target_modules:
|
||||||
|
# - q_proj
|
||||||
|
# - v_proj
|
||||||
|
# # - k_proj
|
||||||
|
# # - o_proj
|
||||||
|
# # - gate_proj
|
||||||
|
# # - down_proj
|
||||||
|
# # - up_proj
|
||||||
|
# lora_target_linear: # If true, will target all linear layers
|
||||||
|
|
||||||
|
# # If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.
|
||||||
|
# # For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.
|
||||||
|
# # `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
|
||||||
|
# # https://github.com/huggingface/peft/issues/334#issuecomment-1561727994
|
||||||
|
# lora_modules_to_save:
|
||||||
|
# # - embed_tokens
|
||||||
|
# # - lm_head
|
||||||
|
|
||||||
|
# # Once you complete training, the model will be saved to the following directory.
|
||||||
|
# # If you merge the adapter to the base model, a subdirectory `merged` will be created under this directory.
|
||||||
|
# # Make sure `lora_model_dir` points to this directory if you want to use the trained model.
|
||||||
|
# lora_out_dir:
|
||||||
|
# lora_fan_in_fan_out: false
|
||||||
|
|
||||||
|
# # ReLoRA configuration
|
||||||
|
# # Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed
|
||||||
|
# relora_steps: # Number of steps per ReLoRA restart
|
||||||
|
# relora_warmup_steps: # Number of per-restart warmup steps
|
||||||
|
# relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings
|
||||||
|
|
||||||
|
# # wandb configuration if you're using it
|
||||||
|
# wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
|
||||||
|
# wandb_project: # Your wandb project name
|
||||||
|
# wandb_entity: # A wandb Team name if using a Team
|
||||||
|
# wandb_watch:
|
||||||
|
# wandb_run_id: # Set the name of your wandb run
|
||||||
|
# wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
||||||
|
|
||||||
|
# # Where to save the full-finetuned model to
|
||||||
|
# output_dir: ./completed-model
|
||||||
|
|
||||||
|
# # Whether to use torch.compile and which backend to use
|
||||||
|
# torch_compile: # bool
|
||||||
|
# torch_compile_backend: # Optional[str]
|
||||||
|
|
||||||
|
# # Training hyperparameters
|
||||||
|
|
||||||
|
# # If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.
|
||||||
|
# gradient_accumulation_steps: 1
|
||||||
|
# # The number of samples to include in each batch. This is the number of samples sent to each GPU.
|
||||||
|
# micro_batch_size: 2
|
||||||
|
# eval_batch_size:
|
||||||
|
# num_epochs: 4
|
||||||
|
# warmup_steps: 100 # cannot use with warmup_ratio
|
||||||
|
# warmup_ratio: 0.05 # cannot use with warmup_steps
|
||||||
|
# learning_rate: 0.00003
|
||||||
|
# lr_quadratic_warmup:
|
||||||
|
# logging_steps:
|
||||||
|
# save_strategy: # Set to `no` to skip checkpoint saves
|
||||||
|
# save_steps: # Leave empty to save at each epoch
|
||||||
|
# eval_steps: # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps
|
||||||
|
# save_total_limit: # Checkpoints saved at a time
|
||||||
|
# # Maximum number of iterations to train for. It precedes num_epochs which means that
|
||||||
|
# # if both are set, num_epochs will not be guaranteed.
|
||||||
|
# # e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps
|
||||||
|
# max_steps:
|
||||||
|
|
||||||
|
# eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
||||||
|
# eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
||||||
|
|
||||||
|
# # Save model as safetensors (require safetensors package)
|
||||||
|
# save_safetensors:
|
||||||
|
|
||||||
|
# # Whether to mask out or include the human's prompt from the training labels
|
||||||
|
# train_on_inputs: false
|
||||||
|
# # Group similarly sized data to minimize padding.
|
||||||
|
# # May be slower to start, as it must download and sort the entire dataset.
|
||||||
|
# # Note that training loss may have an oscillating pattern with this enabled.
|
||||||
|
# group_by_length: false
|
||||||
|
|
||||||
|
# # Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
||||||
|
# gradient_checkpointing: false
|
||||||
|
|
||||||
|
# # Stop training after this many evaluation losses have increased in a row
|
||||||
|
# # https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
||||||
|
# early_stopping_patience: 3
|
||||||
|
|
||||||
|
# # Specify a scheduler and kwargs to use with the optimizer
|
||||||
|
# lr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine
|
||||||
|
# lr_scheduler_kwargs:
|
||||||
|
|
||||||
|
# # For one_cycle optim
|
||||||
|
# lr_div_factor: # Learning rate div factor
|
||||||
|
|
||||||
|
# # For log_sweep optim
|
||||||
|
# log_sweep_min_lr:
|
||||||
|
# log_sweep_max_lr:
|
||||||
|
|
||||||
|
# # Specify optimizer
|
||||||
|
# # Valid values are driven by the Transformers OptimizerNames class, see:
|
||||||
|
# # https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134
|
||||||
|
# #
|
||||||
|
# # Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of
|
||||||
|
# # torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used
|
||||||
|
# # in the examples/ for your model and fine-tuning use case.
|
||||||
|
# #
|
||||||
|
# # Valid values for 'optimizer' include:
|
||||||
|
# # - adamw_hf
|
||||||
|
# # - adamw_torch
|
||||||
|
# # - adamw_torch_fused
|
||||||
|
# # - adamw_torch_xla
|
||||||
|
# # - adamw_apex_fused
|
||||||
|
# # - adafactor
|
||||||
|
# # - adamw_anyprecision
|
||||||
|
# # - sgd
|
||||||
|
# # - adagrad
|
||||||
|
# # - adamw_bnb_8bit
|
||||||
|
# # - lion_8bit
|
||||||
|
# # - lion_32bit
|
||||||
|
# # - paged_adamw_32bit
|
||||||
|
# # - paged_adamw_8bit
|
||||||
|
# # - paged_lion_32bit
|
||||||
|
# # - paged_lion_8bit
|
||||||
|
# optimizer:
|
||||||
|
# # Specify weight decay
|
||||||
|
# weight_decay:
|
||||||
|
# # adamw hyperparams
|
||||||
|
# adam_beta1:
|
||||||
|
# adam_beta2:
|
||||||
|
# adam_epsilon:
|
||||||
|
# # Gradient clipping max norm
|
||||||
|
# max_grad_norm:
|
||||||
|
|
||||||
|
# # Augmentation techniques
|
||||||
|
# # NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings
|
||||||
|
# # currently only supported on Llama and Mistral
|
||||||
|
# noisy_embedding_alpha:
|
||||||
|
|
||||||
|
# # Whether to bettertransformers
|
||||||
|
# flash_optimum:
|
||||||
|
# # Whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
||||||
|
# xformers_attention:
|
||||||
|
# # Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:
|
||||||
|
# flash_attention:
|
||||||
|
# flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only
|
||||||
|
# flash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only
|
||||||
|
# flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation
|
||||||
|
# flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation
|
||||||
|
# # Whether to use scaled-dot-product attention
|
||||||
|
# # https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
||||||
|
# sdp_attention:
|
||||||
|
# # Landmark attention (only llama)
|
||||||
|
# landmark_attention:
|
||||||
|
# # xpos RoPE see https://github.com/kaiokendev/cutoff-len-is-context-len/blob/main/util/xpos_rope_llama_monkey_patch.py
|
||||||
|
# # LLaMA only
|
||||||
|
# xpos_rope:
|
||||||
|
|
||||||
|
# # Resume from a specific checkpoint dir
|
||||||
|
# resume_from_checkpoint:
|
||||||
|
# # If resume_from_checkpoint isn't set and you simply want it to start where it left off.
|
||||||
|
# # Be careful with this being turned on between different models.
|
||||||
|
# auto_resume_from_checkpoints: false
|
||||||
|
|
||||||
|
# # Don't mess with this, it's here for accelerate and torchrun
|
||||||
|
# local_rank:
|
||||||
|
|
||||||
|
# # Add or change special tokens.
|
||||||
|
# # If you add tokens here, you don't need to add them to the `tokens` list.
|
||||||
|
# special_tokens:
|
||||||
|
# # bos_token: "<s>"
|
||||||
|
# # eos_token: "</s>"
|
||||||
|
# # unk_token: "<unk>"
|
||||||
|
|
||||||
|
# # Add extra tokens.
|
||||||
|
# tokens:
|
||||||
|
|
||||||
|
# # FSDP
|
||||||
|
# fsdp:
|
||||||
|
# fsdp_config:
|
||||||
|
|
||||||
|
# # Deepspeed config path. e.g., deepspeed/zero3.json
|
||||||
|
# deepspeed:
|
||||||
|
|
||||||
|
# # Advanced DDP Arguments
|
||||||
|
# ddp_timeout:
|
||||||
|
# ddp_bucket_cap_mb:
|
||||||
|
# ddp_broadcast_buffers:
|
||||||
|
|
||||||
|
# # Path to torch distx for optim 'adamw_anyprecision'
|
||||||
|
# torchdistx_path:
|
||||||
|
|
||||||
|
# # Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize
|
||||||
|
# pretraining_dataset:
|
||||||
|
|
||||||
|
# # Debug mode
|
||||||
|
# debug:
|
||||||
|
|
||||||
|
# # Seed
|
||||||
|
# seed:
|
||||||
|
|
||||||
|
# # Allow overwrite yml config using from cli
|
||||||
|
# strict:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
base_model: ${BASE_MODEL}
|
||||||
|
base_model_ignore_patterns: ${BASE_MODEL_IGNORE_PATTERNS}
|
||||||
|
base_model_config: ${BASE_MODEL_CONFIG}
|
||||||
|
revision_of_model: ${REVISION_OF_MODEL}
|
||||||
|
tokenizer_config: ${TOKENIZER_CONFIG}
|
||||||
|
model_type: ${MODEL_TYPE}
|
||||||
|
tokenizer_type: ${TOKENIZER_TYPE}
|
||||||
|
trust_remote_code: ${TRUST_REMOTE_CODE}
|
||||||
|
tokenizer_use_fast: ${TOKENIZER_USE_FAST}
|
||||||
|
tokenizer_legacy: ${TOKENIZER_LEGACY}
|
||||||
|
resize_token_embeddings_to_32x: ${RESIZE_TOKEN_EMBEDDINGS_TO_32X}
|
||||||
|
|
||||||
|
is_falcon_derived_model: ${IS_FALCON_DERIVED_MODEL}
|
||||||
|
is_llama_derived_model: ${IS_LLAMA_DERIVED_MODEL}
|
||||||
|
is_qwen_derived_model: ${IS_QWEN_DERIVED_MODEL}
|
||||||
|
is_mistral_derived_model: ${IS_MISTRAL_DERIVED_MODEL}
|
||||||
|
|
||||||
|
overrides_of_model_config:
|
||||||
|
rope_scaling:
|
||||||
|
type: ${ROPE_SCALING_TYPE}
|
||||||
|
factor: ${ROPE_SCALING_FACTOR}
|
||||||
|
|
||||||
|
bnb_config_kwargs:
|
||||||
|
llm_int8_has_fp16_weight: ${BNB_LLM_INT8_HAS_FP16_WEIGHT}
|
||||||
|
bnb_4bit_quant_type: ${BNB_4BIT_QUANT_TYPE}
|
||||||
|
bnb_4bit_use_double_quant: ${BNB_4BIT_USE_DOUBLE_QUANT}
|
||||||
|
|
||||||
|
gptq: ${GPTQ}
|
||||||
|
load_in_8bit: ${LOAD_IN_8BIT}
|
||||||
|
load_in_4bit: ${LOAD_IN_4BIT}
|
||||||
|
bf16: ${BF16}
|
||||||
|
fp16: ${FP16}
|
||||||
|
tf32: ${TF32}
|
||||||
|
bfloat16: ${BFLOAT16}
|
||||||
|
float16: ${FLOAT16}
|
||||||
|
|
||||||
|
gpu_memory_limit: ${GPU_MEMORY_LIMIT}
|
||||||
|
lora_on_cpu: ${LORA_ON_CPU}
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: ${DATASET_PATH}
|
||||||
|
type: ${DATASET_TYPE}
|
||||||
|
ds_type: ${DATASET_DS_TYPE}
|
||||||
|
data_files: ${DATASET_DATA_FILES}
|
||||||
|
shards: ${DATASET_SHARDS}
|
||||||
|
name: ${DATASET_NAME}
|
||||||
|
train_on_split: ${DATASET_TRAIN_ON_SPLIT}
|
||||||
|
revision: ${DATASET_REVISION}
|
||||||
|
trust_remote_code: ${DATASET_TRUST_REMOTE_CODE}
|
||||||
|
|
||||||
|
rl: ${RL}
|
||||||
|
dpo_use_weighting: ${DPO_USE_WEIGHTING}
|
||||||
|
|
||||||
|
chat_template: ${CHAT_TEMPLATE}
|
||||||
|
chat_template_jinja: ${CHAT_TEMPLATE_JINJA}
|
||||||
|
default_system_message: ${DEFAULT_SYSTEM_MESSAGE}
|
||||||
|
dataset_prepared_path: ${DATASET_PREPARED_PATH}
|
||||||
|
push_dataset_to_hub: ${PUSH_DATASET_TO_HUB}
|
||||||
|
dataset_processes: ${DATASET_PROCESSES}
|
||||||
|
dataset_keep_in_memory: ${DATASET_KEEP_IN_MEMORY}
|
||||||
|
hub_model_id: ${HUB_MODEL_ID}
|
||||||
|
hub_strategy: ${HUB_STRATEGY}
|
||||||
|
hf_use_auth_token: ${HF_USE_AUTH_TOKEN}
|
||||||
|
val_set_size: ${VAL_SET_SIZE}
|
||||||
|
dataset_shard_num: ${DATASET_SHARD_NUM}
|
||||||
|
dataset_shard_idx: ${DATASET_SHARD_IDX}
|
||||||
|
|
||||||
|
sequence_len: ${SEQUENCE_LEN}
|
||||||
|
pad_to_sequence_len: ${PAD_TO_SEQUENCE_LEN}
|
||||||
|
sample_packing: ${SAMPLE_PACKING}
|
||||||
|
eval_sample_packing: ${EVAL_SAMPLE_PACKING}
|
||||||
|
sample_packing_eff_est: ${SAMPLE_PACKING_EFF_EST}
|
||||||
|
total_num_tokens: ${TOTAL_NUM_TOKENS}
|
||||||
|
sample_packing_group_size: ${SAMPLE_PACKING_GROUP_SIZE}
|
||||||
|
sample_packing_bin_size: ${SAMPLE_PACKING_BIN_SIZE}
|
||||||
|
|
||||||
|
batch_flattening: ${BATCH_FLATTENING}
|
||||||
|
device_map: ${DEVICE_MAP}
|
||||||
|
max_memory: ${MAX_MEMORY}
|
||||||
|
|
||||||
|
adapter: ${ADAPTER}
|
||||||
|
lora_model_dir: ${LORA_MODEL_DIR}
|
||||||
|
|
||||||
|
lora_r: ${LORA_R}
|
||||||
|
lora_alpha: ${LORA_ALPHA}
|
||||||
|
lora_dropout: ${LORA_DROPOUT}
|
||||||
|
lora_target_modules:
|
||||||
|
- ${LORA_TARGET_MODULES}
|
||||||
|
lora_target_linear: ${LORA_TARGET_LINEAR}
|
||||||
|
peft_layers_to_transform: ${PEFT_LAYERS_TO_TRANSFORM}
|
||||||
|
lora_modules_to_save: ${LORA_MODULES_TO_SAVE}
|
||||||
|
lora_fan_in_fan_out: ${LORA_FAN_IN_FAN_OUT}
|
||||||
|
|
||||||
|
loraplus_lr_ratio: ${LORAPLUS_LR_RATIO}
|
||||||
|
loraplus_lr_embedding: ${LORAPLUS_LR_EMBEDDING}
|
||||||
|
|
||||||
|
peft:
|
||||||
|
loftq_config:
|
||||||
|
loftq_bits: ${LOFTQ_BITS}
|
||||||
|
|
||||||
|
relora_steps: ${RELORA_STEPS}
|
||||||
|
relora_warmup_steps: ${RELORA_WARMUP_STEPS}
|
||||||
|
relora_anneal_steps: ${RELORA_ANNEAL_STEPS}
|
||||||
|
relora_prune_ratio: ${RELORA_PRUNE_RATIO}
|
||||||
|
relora_cpu_offload: ${RELORA_CPU_OFFLOAD}
|
||||||
|
|
||||||
|
wandb_mode: ${WANDB_MODE}
|
||||||
|
wandb_project: ${WANDB_PROJECT}
|
||||||
|
wandb_entity: ${WANDB_ENTITY}
|
||||||
|
wandb_watch: ${WANDB_WATCH}
|
||||||
|
wandb_name: ${WANDB_NAME}
|
||||||
|
wandb_run_id: ${WANDB_RUN_ID}
|
||||||
|
wandb_log_model: ${WANDB_LOG_MODEL}
|
||||||
|
|
||||||
|
mlflow_tracking_uri: ${MLFLOW_TRACKING_URI}
|
||||||
|
mlflow_experiment_name: ${MLFLOW_EXPERIMENT_NAME}
|
||||||
|
mlflow_run_name: ${MLFLOW_RUN_NAME}
|
||||||
|
hf_mlflow_log_artifacts: ${HF_MLFLOW_LOG_ARTIFACTS}
|
||||||
|
|
||||||
|
use_comet: ${USE_COMET}
|
||||||
|
comet_api_key: ${COMET_API_KEY}
|
||||||
|
comet_workspace: ${COMET_WORKSPACE}
|
||||||
|
comet_project_name: ${COMET_PROJECT_NAME}
|
||||||
|
comet_experiment_key: ${COMET_EXPERIMENT_KEY}
|
||||||
|
comet_mode: ${COMET_MODE}
|
||||||
|
comet_online: ${COMET_ONLINE}
|
||||||
|
comet_experiment_config: ${COMET_EXPERIMENT_CONFIG}
|
||||||
|
|
||||||
|
output_dir: ${OUTPUT_DIR}
|
||||||
|
|
||||||
|
torch_compile: ${TORCH_COMPILE}
|
||||||
|
torch_compile_backend: ${TORCH_COMPILE_BACKEND}
|
||||||
|
|
||||||
|
gradient_accumulation_steps: ${GRADIENT_ACCUMULATION_STEPS}
|
||||||
|
micro_batch_size: ${MICRO_BATCH_SIZE}
|
||||||
|
eval_batch_size: ${EVAL_BATCH_SIZE}
|
||||||
|
num_epochs: ${NUM_EPOCHS}
|
||||||
|
warmup_steps: ${WARMUP_STEPS}
|
||||||
|
warmup_ratio: ${WARMUP_RATIO}
|
||||||
|
learning_rate: ${LEARNING_RATE}
|
||||||
|
lr_quadratic_warmup: ${LR_QUADRATIC_WARMUP}
|
||||||
|
logging_steps: ${LOGGING_STEPS}
|
||||||
|
eval_steps: ${EVAL_STEPS}
|
||||||
|
evals_per_epoch: ${EVALS_PER_EPOCH}
|
||||||
|
save_strategy: ${SAVE_STRATEGY}
|
||||||
|
save_steps: ${SAVE_STEPS}
|
||||||
|
saves_per_epoch: ${SAVES_PER_EPOCH}
|
||||||
|
save_total_limit: ${SAVE_TOTAL_LIMIT}
|
||||||
|
max_steps: ${MAX_STEPS}
|
||||||
|
|
||||||
|
eval_table_size: ${EVAL_TABLE_SIZE}
|
||||||
|
eval_max_new_tokens: ${EVAL_MAX_NEW_TOKENS}
|
||||||
|
eval_causal_lm_metrics: ${EVAL_CAUSAL_LM_METRICS}
|
||||||
|
|
||||||
|
profiler_steps: ${PROFILER_STEPS}
|
||||||
|
loss_watchdog_threshold: ${LOSS_WATCHDOG_THRESHOLD}
|
||||||
|
loss_watchdog_patience: ${LOSS_WATCHDOG_PATIENCE}
|
||||||
|
|
||||||
|
save_safetensors: ${SAVE_SAFETENSORS}
|
||||||
|
train_on_inputs: ${TRAIN_ON_INPUTS}
|
||||||
|
group_by_length: ${GROUP_BY_LENGTH}
|
||||||
|
gradient_checkpointing: ${GRADIENT_CHECKPOINTING}
|
||||||
|
early_stopping_patience: ${EARLY_STOPPING_PATIENCE}
|
||||||
|
|
||||||
|
lr_scheduler: ${LR_SCHEDULER}
|
||||||
|
lr_scheduler_kwargs: ${LR_SCHEDULER_KWARGS}
|
||||||
|
cosine_min_lr_ratio: ${COSINE_MIN_LR_RATIO}
|
||||||
|
cosine_constant_lr_ratio: ${COSINE_CONSTANT_LR_RATIO}
|
||||||
|
lr_div_factor: ${LR_DIV_FACTOR}
|
||||||
|
|
||||||
|
optimizer: ${OPTIMIZER}
|
||||||
|
optim_args: ${OPTIM_ARGS}
|
||||||
|
optim_target_modules: ${OPTIM_TARGET_MODULES}
|
||||||
|
weight_decay: ${WEIGHT_DECAY}
|
||||||
|
adam_beta1: ${ADAM_BETA1}
|
||||||
|
adam_beta2: ${ADAM_BETA2}
|
||||||
|
adam_epsilon: ${ADAM_EPSILON}
|
||||||
|
max_grad_norm: ${MAX_GRAD_NORM}
|
||||||
|
|
||||||
|
neftune_noise_alpha: ${NEFTUNE_NOISE_ALPHA}
|
||||||
|
|
||||||
|
flash_optimum: ${FLASH_OPTIMUM}
|
||||||
|
xformers_attention: ${XFORMERS_ATTENTION}
|
||||||
|
flash_attention: ${FLASH_ATTENTION}
|
||||||
|
flash_attn_cross_entropy: ${FLASH_ATTN_CROSS_ENTROPY}
|
||||||
|
flash_attn_rms_norm: ${FLASH_ATTN_RMS_NORM}
|
||||||
|
flash_attn_fuse_qkv: ${FLASH_ATTN_FUSE_QKV}
|
||||||
|
flash_attn_fuse_mlp: ${FLASH_ATTN_FUSE_MLP}
|
||||||
|
sdp_attention: ${SDP_ATTENTION}
|
||||||
|
s2_attention: ${S2_ATTENTION}
|
||||||
|
resume_from_checkpoint: ${RESUME_FROM_CHECKPOINT}
|
||||||
|
auto_resume_from_checkpoints: ${AUTO_RESUME_FROM_CHECKPOINTS}
|
||||||
|
|
||||||
|
local_rank: ${LOCAL_RANK}
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
bos_token: ${SPECIAL_TOKEN_BOS}
|
||||||
|
eos_token: ${SPECIAL_TOKEN_EOS}
|
||||||
|
unk_token: ${SPECIAL_TOKEN_UNK}
|
||||||
|
pad_token: ${SPECIAL_TOKEN_PAD}
|
||||||
|
|
||||||
|
tokens: ${TOKENS}
|
||||||
|
|
||||||
|
fsdp: ${FSDP}
|
||||||
|
fsdp_config: ${FSDP_CONFIG}
|
||||||
|
deepspeed: ${DEEPSPEED}
|
||||||
|
|
||||||
|
ddp_timeout: ${DDP_TIMEOUT}
|
||||||
|
ddp_bucket_cap_mb: ${DDP_BUCKET_CAP_MB}
|
||||||
|
ddp_broadcast_buffers: ${DDP_BROADCAST_BUFFERS}
|
||||||
|
|
||||||
|
torchdistx_path: ${TORCHDISTX_PATH}
|
||||||
|
pretraining_dataset: ${PRETRAINING_DATASET}
|
||||||
|
debug: ${DEBUG}
|
||||||
|
seed: ${SEED}
|
||||||
|
strict: ${STRICT}
|
||||||
66
.runpod/src/handler.py
Normal file
66
.runpod/src/handler.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
"""
|
||||||
|
Runpod serverless entrypoint handler
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import runpod
|
||||||
|
import yaml
|
||||||
|
from huggingface_hub._login import login
|
||||||
|
from train import train
|
||||||
|
from utils import get_output_dir
|
||||||
|
|
||||||
|
BASE_VOLUME = os.environ.get("BASE_VOLUME", "/runpod-volume")
|
||||||
|
if not os.path.exists(BASE_VOLUME):
|
||||||
|
os.makedirs(BASE_VOLUME)
|
||||||
|
|
||||||
|
logger = runpod.RunPodLogger()
|
||||||
|
|
||||||
|
|
||||||
|
async def handler(job):
|
||||||
|
runpod_job_id = job["id"]
|
||||||
|
inputs = job["input"]
|
||||||
|
run_id = inputs.get("run_id", "default_run_id")
|
||||||
|
args = inputs.get("args", {})
|
||||||
|
|
||||||
|
# Set output directory
|
||||||
|
output_dir = os.path.join(BASE_VOLUME, get_output_dir(run_id))
|
||||||
|
args["output_dir"] = output_dir
|
||||||
|
|
||||||
|
# First save args to a temporary config file
|
||||||
|
config_path = "/workspace/test_config.yaml"
|
||||||
|
|
||||||
|
# Add run_name and job_id to args before saving
|
||||||
|
args["run_name"] = run_id
|
||||||
|
args["runpod_job_id"] = runpod_job_id
|
||||||
|
|
||||||
|
yaml_data = yaml.dump(args, default_flow_style=False)
|
||||||
|
with open(config_path, "w", encoding="utf-8") as file:
|
||||||
|
file.write(yaml_data)
|
||||||
|
|
||||||
|
# Handle credentials
|
||||||
|
credentials = inputs.get("credentials", {})
|
||||||
|
|
||||||
|
if "wandb_api_key" in credentials:
|
||||||
|
os.environ["WANDB_API_KEY"] = credentials["wandb_api_key"]
|
||||||
|
if "hf_token" in credentials:
|
||||||
|
os.environ["HF_TOKEN"] = credentials["hf_token"]
|
||||||
|
|
||||||
|
if os.environ.get("HF_TOKEN"):
|
||||||
|
login(token=os.environ["HF_TOKEN"])
|
||||||
|
else:
|
||||||
|
logger.info("No HF_TOKEN provided. Skipping login.")
|
||||||
|
|
||||||
|
logger.info("Starting Training.")
|
||||||
|
async for result in train(config_path): # Pass the config path instead of args
|
||||||
|
logger.info(result)
|
||||||
|
logger.info("Training Complete.")
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
if "WANDB_API_KEY" in os.environ:
|
||||||
|
del os.environ["WANDB_API_KEY"]
|
||||||
|
if "HF_TOKEN" in os.environ:
|
||||||
|
del os.environ["HF_TOKEN"]
|
||||||
|
|
||||||
|
|
||||||
|
runpod.serverless.start({"handler": handler, "return_aggregate_stream": True})
|
||||||
61
.runpod/src/test_input.json
Normal file
61
.runpod/src/test_input.json
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "NousResearch/Meta-Llama-3-8B",
|
||||||
|
"model_type": "LlamaForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_8bit": true,
|
||||||
|
"load_in_4bit": false,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.05,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "lora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 16,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 4,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_bnb_8bit",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": false,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|end_of_text|>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
45
.runpod/src/train.py
Normal file
45
.runpod/src/train.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
"""
|
||||||
|
Runpod train entrypoint
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
|
||||||
|
async def train(config_path: str, gpu_id: str = "0", preprocess: bool = True):
|
||||||
|
"""
|
||||||
|
Run preprocessing (if enabled) and training with the given config file
|
||||||
|
:param config_path: Path to the YAML config file
|
||||||
|
:param gpu_id: GPU ID to use (default: "0")
|
||||||
|
:param preprocess: Whether to run preprocessing (default: True)
|
||||||
|
|
||||||
|
"""
|
||||||
|
# First check if preprocessing is needed
|
||||||
|
if preprocess:
|
||||||
|
# Preprocess command
|
||||||
|
preprocess_cmd = (
|
||||||
|
f"CUDA_VISIBLE_DEVICES={gpu_id} axolotl preprocess {config_path}"
|
||||||
|
)
|
||||||
|
process = await asyncio.create_subprocess_shell(
|
||||||
|
preprocess_cmd,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.STDOUT,
|
||||||
|
)
|
||||||
|
|
||||||
|
if process.stdout is not None:
|
||||||
|
async for line in process.stdout:
|
||||||
|
yield f"Preprocessing: {line.decode().strip()}"
|
||||||
|
await process.wait()
|
||||||
|
yield "Preprocessing completed."
|
||||||
|
else:
|
||||||
|
yield "Skipping preprocessing step."
|
||||||
|
|
||||||
|
# Training command
|
||||||
|
train_cmd = f"axolotl train {config_path}"
|
||||||
|
process = await asyncio.create_subprocess_shell(
|
||||||
|
train_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT
|
||||||
|
)
|
||||||
|
|
||||||
|
if process.stdout is not None:
|
||||||
|
async for line in process.stdout:
|
||||||
|
yield f"Training: {line.decode().strip()}"
|
||||||
|
await process.wait()
|
||||||
89
.runpod/src/utils.py
Normal file
89
.runpod/src/utils.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
"""
|
||||||
|
Runpod launcher utils
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_output_dir(run_id):
|
||||||
|
path = f"fine-tuning/{run_id}"
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def make_valid_config(input_args):
|
||||||
|
"""
|
||||||
|
Creates and saves updated config file, returns the path to the new config
|
||||||
|
:param input_args: dict of input args
|
||||||
|
:return: str, path to the updated config file
|
||||||
|
"""
|
||||||
|
# Load default config
|
||||||
|
with open("config/config.yaml", "r", encoding="utf-8") as fin:
|
||||||
|
all_args = yaml.safe_load(fin)
|
||||||
|
|
||||||
|
if not input_args:
|
||||||
|
print("No args provided, using defaults")
|
||||||
|
else:
|
||||||
|
all_args.update(input_args)
|
||||||
|
|
||||||
|
# Create updated config path
|
||||||
|
updated_config_path = "config/updated_config.yaml"
|
||||||
|
|
||||||
|
# Save updated config to new file
|
||||||
|
with open(updated_config_path, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(all_args, f)
|
||||||
|
|
||||||
|
return updated_config_path
|
||||||
|
|
||||||
|
|
||||||
|
def set_config_env_vars(args: dict):
|
||||||
|
"""
|
||||||
|
Convert API arguments into environment variables.
|
||||||
|
Handles nested dictionaries, lists, and special values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (dict): The arguments dictionary from the API request
|
||||||
|
"""
|
||||||
|
|
||||||
|
def process_value(value):
|
||||||
|
"""Convert Python values to string format for environment variables"""
|
||||||
|
if value is None:
|
||||||
|
return ""
|
||||||
|
if isinstance(value, bool):
|
||||||
|
return str(value).lower()
|
||||||
|
if isinstance(value, (list, dict)):
|
||||||
|
return str(value)
|
||||||
|
return str(value)
|
||||||
|
|
||||||
|
def set_env_vars(data, prefix=""):
|
||||||
|
"""Recursively set environment variables from nested dictionary"""
|
||||||
|
for key, value in data.items():
|
||||||
|
env_key = prefix + key.upper()
|
||||||
|
|
||||||
|
# Handle special cases
|
||||||
|
if isinstance(value, dict):
|
||||||
|
# For nested dictionaries (like special_tokens)
|
||||||
|
set_env_vars(value, f"{env_key}_")
|
||||||
|
elif isinstance(value, list):
|
||||||
|
# Handle list of dictionaries (like datasets)
|
||||||
|
if value and isinstance(value[0], dict):
|
||||||
|
for i, item in enumerate(value):
|
||||||
|
set_env_vars(item, f"{env_key}_{i}_")
|
||||||
|
else:
|
||||||
|
# For simple lists (like lora_target_modules)
|
||||||
|
os.environ[env_key] = process_value(value)
|
||||||
|
else:
|
||||||
|
# Handle all other cases
|
||||||
|
os.environ[env_key] = process_value(value)
|
||||||
|
|
||||||
|
# Clear any existing related environment variables
|
||||||
|
# This prevents old values from persisting
|
||||||
|
for key in list(os.environ.keys()):
|
||||||
|
if key.startswith(
|
||||||
|
("BASE_MODEL", "MODEL_TYPE", "TOKENIZER_TYPE", "DATASET", "LORA_", "WANDB_")
|
||||||
|
):
|
||||||
|
del os.environ[key]
|
||||||
|
|
||||||
|
# Set new environment variables
|
||||||
|
set_env_vars(args)
|
||||||
86
.runpod/test-input.json
Normal file
86
.runpod/test-input.json
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"name": "quick_smoke_test_sft",
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"model_type": "AutoModelForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_4bit": true,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
"split": "train[:10%]"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "qlora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 64,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": true,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>"
|
||||||
|
},
|
||||||
|
"max_steps": 20
|
||||||
|
},
|
||||||
|
"timeout": 100000
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
||||||
|
"gpuCount": 1,
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"value": "true"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
90
.runpod/tests.json
Normal file
90
.runpod/tests.json
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
{
|
||||||
|
"tests": [
|
||||||
|
{
|
||||||
|
"name": "quick_smoke_test_sft",
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"model_type": "AutoModelForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_4bit": true,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
"split": "train[:10%]"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "qlora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 64,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": true,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>"
|
||||||
|
},
|
||||||
|
"max_steps": 20
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timeout": 100000
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"config": {
|
||||||
|
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
||||||
|
"gpuCount": 1,
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"value": "true"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
20
_quarto.yml
20
_quarto.yml
@@ -48,8 +48,23 @@ quartodoc:
|
|||||||
contents:
|
contents:
|
||||||
- core.trainers.base
|
- core.trainers.base
|
||||||
- core.trainers.trl
|
- core.trainers.trl
|
||||||
|
- core.trainers.mamba
|
||||||
|
- core.trainers.relora
|
||||||
- core.trainers.dpo.trainer
|
- core.trainers.dpo.trainer
|
||||||
- core.trainers.grpo.trainer
|
- core.trainers.grpo.trainer
|
||||||
|
- core.trainers.grpo.sampler
|
||||||
|
- core.trainers.utils
|
||||||
|
- title: Mixins
|
||||||
|
desc: Mixin classes for augmenting trainers
|
||||||
|
contents:
|
||||||
|
- core.trainers.mixins.optimizer
|
||||||
|
- core.trainers.mixins.rng_state_loader
|
||||||
|
- core.trainers.mixins.scheduler
|
||||||
|
- core.trainers.mixins.sequence_parallel
|
||||||
|
- title: Context Managers
|
||||||
|
desc: Context managers for altering trainer behaviors
|
||||||
|
contents:
|
||||||
|
- utils.ctx_managers.sequence_parallel
|
||||||
- title: Prompt Strategies
|
- title: Prompt Strategies
|
||||||
desc: Prompt formatting strategies
|
desc: Prompt formatting strategies
|
||||||
contents:
|
contents:
|
||||||
@@ -86,7 +101,7 @@ quartodoc:
|
|||||||
- kernels.swiglu
|
- kernels.swiglu
|
||||||
- kernels.quantize
|
- kernels.quantize
|
||||||
- kernels.utils
|
- kernels.utils
|
||||||
- title: MonkeyPatches
|
- title: Monkey Patches
|
||||||
desc: Runtime patches for model optimizations
|
desc: Runtime patches for model optimizations
|
||||||
contents:
|
contents:
|
||||||
- monkeypatch.llama_attn_hijack_flash
|
- monkeypatch.llama_attn_hijack_flash
|
||||||
@@ -124,7 +139,8 @@ quartodoc:
|
|||||||
- utils.optimizers.adopt
|
- utils.optimizers.adopt
|
||||||
- utils.data.pretraining
|
- utils.data.pretraining
|
||||||
- utils.data.sft
|
- utils.data.sft
|
||||||
- utils.gradient_checkpointing.unsloth
|
- utils.gradient_checkpointing.offload_cpu
|
||||||
|
- utils.gradient_checkpointing.offload_disk
|
||||||
- title: Schemas
|
- title: Schemas
|
||||||
desc: Pydantic data models for Axolotl config
|
desc: Pydantic data models for Axolotl config
|
||||||
contents:
|
contents:
|
||||||
|
|||||||
@@ -32,6 +32,11 @@ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
RUN pip install packaging==23.2 setuptools==75.8.0
|
RUN pip install packaging==23.2 setuptools==75.8.0
|
||||||
|
RUN if [ "$PYTORCH_VERSION" = "2.6.0" ] && [ "$CUDA" = "126" ] ; then \
|
||||||
|
curl -L -O https://d1dttdx32dkk5p.cloudfront.net/fa3/cu${CUDA}/torch-${PYTORCH_VERSION}/flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||||
|
pip3 install --no-cache-dir flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||||
|
rm flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||||
|
fi
|
||||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
else \
|
else \
|
||||||
|
|||||||
0
cicd/__init__.py
Normal file
0
cicd/__init__.py
Normal file
@@ -18,7 +18,7 @@ pytest -v --durations=10 \
|
|||||||
--cov-append
|
--cov-append
|
||||||
|
|
||||||
# Run patched tests excluding lora kernels with coverage append
|
# Run patched tests excluding lora kernels with coverage append
|
||||||
pytest -v --durations=10 \
|
pytest --full-trace -vvv --durations=10 \
|
||||||
--ignore=tests/e2e/patched/lora_kernels \
|
--ignore=tests/e2e/patched/lora_kernels \
|
||||||
/workspace/axolotl/tests/e2e/patched \
|
/workspace/axolotl/tests/e2e/patched \
|
||||||
--cov=axolotl \
|
--cov=axolotl \
|
||||||
@@ -52,4 +52,4 @@ pytest -v --durations=10 \
|
|||||||
--cov-append \
|
--cov-append \
|
||||||
--cov-report=xml:e2e-coverage.xml
|
--cov-report=xml:e2e-coverage.xml
|
||||||
|
|
||||||
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION}
|
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION} || true
|
||||||
|
|||||||
19
cicd/cleanup.py
Normal file
19
cicd/cleanup.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
"""Modal app to run axolotl GPU cleanup"""
|
||||||
|
|
||||||
|
from .single_gpu import VOLUME_CONFIG, app, cicd_image, run_cmd
|
||||||
|
|
||||||
|
|
||||||
|
@app.function(
|
||||||
|
image=cicd_image,
|
||||||
|
timeout=60 * 60,
|
||||||
|
cpu=8.0,
|
||||||
|
memory=131072,
|
||||||
|
volumes=VOLUME_CONFIG,
|
||||||
|
)
|
||||||
|
def cleanup():
|
||||||
|
run_cmd("./cicd/cleanup.sh", "/workspace/axolotl")
|
||||||
|
|
||||||
|
|
||||||
|
@app.local_entrypoint()
|
||||||
|
def main():
|
||||||
|
cleanup.remote()
|
||||||
6
cicd/cleanup.sh
Executable file
6
cicd/cleanup.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# cleanup old cache files for datasets processing and intermediate mappings
|
||||||
|
find /workspace/data/huggingface-cache/hub/datasets -name "cache-*" -type f -mtime +1 -exec rm {} \;
|
||||||
|
find /workspace/data/huggingface-cache/hub/datasets -name "*.lock" -type f -mtime +1 -exec rm {} \;
|
||||||
@@ -1,75 +1,12 @@
|
|||||||
"""Modal app to run axolotl GPU tests"""
|
"""Modal app to run axolotl GPU tests"""
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
from .single_gpu import GPU_CONFIG, VOLUME_CONFIG, app, cicd_image, run_cmd
|
||||||
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
import jinja2
|
|
||||||
import modal
|
|
||||||
from jinja2 import select_autoescape
|
|
||||||
from modal import App, Image
|
|
||||||
|
|
||||||
cicd_path = pathlib.Path(__file__).parent.resolve()
|
|
||||||
|
|
||||||
template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
|
||||||
template_env = jinja2.Environment(
|
|
||||||
loader=template_loader, autoescape=select_autoescape()
|
|
||||||
)
|
|
||||||
df_template = template_env.get_template("Dockerfile.jinja")
|
|
||||||
|
|
||||||
df_args = {
|
|
||||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
|
||||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
|
||||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
|
|
||||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
|
|
||||||
"CUDA": os.environ.get("CUDA", "121"),
|
|
||||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
|
||||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
|
||||||
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
|
||||||
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
|
||||||
"HF_HOME": "/workspace/data/huggingface-cache/hub",
|
|
||||||
}
|
|
||||||
|
|
||||||
dockerfile_contents = df_template.render(**df_args)
|
|
||||||
|
|
||||||
temp_dir = tempfile.mkdtemp()
|
|
||||||
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
|
||||||
f.write(dockerfile_contents)
|
|
||||||
|
|
||||||
cicd_image = Image.from_dockerfile(
|
|
||||||
pathlib.Path(temp_dir) / "Dockerfile",
|
|
||||||
context_mount=None,
|
|
||||||
force_build=True,
|
|
||||||
gpu="A10G",
|
|
||||||
).env(df_args)
|
|
||||||
|
|
||||||
app = App("Axolotl CI/CD", secrets=[])
|
|
||||||
|
|
||||||
hf_cache_volume = modal.Volume.from_name(
|
|
||||||
"axolotl-ci-hf-hub-cache", create_if_missing=True
|
|
||||||
)
|
|
||||||
VOLUME_CONFIG = {
|
|
||||||
"/workspace/data/huggingface-cache/hub": hf_cache_volume,
|
|
||||||
}
|
|
||||||
|
|
||||||
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
|
||||||
GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
|
|
||||||
|
|
||||||
|
|
||||||
def run_cmd(cmd: str, run_folder: str):
|
|
||||||
import subprocess # nosec
|
|
||||||
|
|
||||||
# Propagate errors from subprocess.
|
|
||||||
if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
|
|
||||||
exit(exit_code) # pylint: disable=consider-using-sys-exit
|
|
||||||
|
|
||||||
|
|
||||||
@app.function(
|
@app.function(
|
||||||
image=cicd_image,
|
image=cicd_image,
|
||||||
gpu=GPU_CONFIG,
|
gpu=GPU_CONFIG,
|
||||||
timeout=60 * 60,
|
timeout=90 * 60, # 90 min
|
||||||
cpu=8.0,
|
cpu=8.0,
|
||||||
memory=131072,
|
memory=131072,
|
||||||
volumes=VOLUME_CONFIG,
|
volumes=VOLUME_CONFIG,
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ def run_cmd(cmd: str, run_folder: str):
|
|||||||
image=cicd_image,
|
image=cicd_image,
|
||||||
gpu=GPU_CONFIG,
|
gpu=GPU_CONFIG,
|
||||||
timeout=90 * 60,
|
timeout=90 * 60,
|
||||||
cpu=8.0,
|
cpu=16.0,
|
||||||
memory=131072 * N_GPUS,
|
memory=131072 * N_GPUS,
|
||||||
volumes=VOLUME_CONFIG,
|
volumes=VOLUME_CONFIG,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -20,4 +20,4 @@ pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/multigpu/patched/ \
|
|||||||
--cov-report=xml:multigpu-coverage.xml
|
--cov-report=xml:multigpu-coverage.xml
|
||||||
|
|
||||||
# Upload coverage to Codecov
|
# Upload coverage to Codecov
|
||||||
codecov upload-process -t $CODECOV_TOKEN -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION}
|
codecov upload-process -t "${CODECOV_TOKEN}" -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION} || true
|
||||||
|
|||||||
66
cicd/single_gpu.py
Normal file
66
cicd/single_gpu.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
"""Modal app to run axolotl GPU tests"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
import modal
|
||||||
|
from jinja2 import select_autoescape
|
||||||
|
from modal import App, Image
|
||||||
|
|
||||||
|
cicd_path = pathlib.Path(__file__).parent.resolve()
|
||||||
|
|
||||||
|
template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
||||||
|
template_env = jinja2.Environment(
|
||||||
|
loader=template_loader, autoescape=select_autoescape()
|
||||||
|
)
|
||||||
|
df_template = template_env.get_template("Dockerfile.jinja")
|
||||||
|
|
||||||
|
df_args = {
|
||||||
|
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||||
|
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||||
|
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.4.1"),
|
||||||
|
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu121-2.4.1"),
|
||||||
|
"CUDA": os.environ.get("CUDA", "121"),
|
||||||
|
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||||
|
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||||
|
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
||||||
|
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
||||||
|
"HF_HOME": "/workspace/data/huggingface-cache/hub",
|
||||||
|
}
|
||||||
|
|
||||||
|
dockerfile_contents = df_template.render(**df_args)
|
||||||
|
|
||||||
|
temp_dir = tempfile.mkdtemp()
|
||||||
|
with open(pathlib.Path(temp_dir) / "Dockerfile", "w", encoding="utf-8") as f:
|
||||||
|
f.write(dockerfile_contents)
|
||||||
|
|
||||||
|
cicd_image = Image.from_dockerfile(
|
||||||
|
pathlib.Path(temp_dir) / "Dockerfile",
|
||||||
|
context_mount=None,
|
||||||
|
force_build=True,
|
||||||
|
gpu="A10G",
|
||||||
|
).env(df_args)
|
||||||
|
|
||||||
|
app = App("Axolotl CI/CD", secrets=[])
|
||||||
|
|
||||||
|
hf_cache_volume = modal.Volume.from_name(
|
||||||
|
"axolotl-ci-hf-hub-cache", create_if_missing=True
|
||||||
|
)
|
||||||
|
VOLUME_CONFIG = {
|
||||||
|
"/workspace/data/huggingface-cache/hub": hf_cache_volume,
|
||||||
|
}
|
||||||
|
|
||||||
|
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
||||||
|
GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
|
||||||
|
|
||||||
|
|
||||||
|
def run_cmd(cmd: str, run_folder: str):
|
||||||
|
import subprocess # nosec
|
||||||
|
|
||||||
|
# Propagate errors from subprocess.
|
||||||
|
if exit_code := subprocess.call(cmd.split(), cwd=run_folder): # nosec
|
||||||
|
exit(exit_code) # pylint: disable=consider-using-sys-exit
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
codecov:
|
codecov:
|
||||||
require_ci_to_pass: yes
|
require_ci_to_pass: yes
|
||||||
|
notify:
|
||||||
|
wait_for_ci: true
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
precision: 2
|
precision: 2
|
||||||
@@ -17,7 +19,7 @@ coverage:
|
|||||||
if_no_uploads: error
|
if_no_uploads: error
|
||||||
if_not_found: success
|
if_not_found: success
|
||||||
if_ci_failed: error
|
if_ci_failed: error
|
||||||
only_pulls: false
|
only_pulls: true
|
||||||
flags: null
|
flags: null
|
||||||
paths: null
|
paths: null
|
||||||
patch:
|
patch:
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
ARG CUDA_VERSION="11.8.0"
|
ARG CUDA_VERSION="12.4.1"
|
||||||
ARG CUDNN_VERSION="8"
|
ARG CUDNN_VERSION=""
|
||||||
ARG UBUNTU_VERSION="22.04"
|
ARG UBUNTU_VERSION="22.04"
|
||||||
ARG MAX_JOBS=4
|
ARG MAX_JOBS=4
|
||||||
|
|
||||||
@@ -7,16 +7,16 @@ FROM nvidia/cuda:$CUDA_VERSION-cudnn$CUDNN_VERSION-devel-ubuntu$UBUNTU_VERSION A
|
|||||||
|
|
||||||
ENV PATH="/root/miniconda3/bin:${PATH}"
|
ENV PATH="/root/miniconda3/bin:${PATH}"
|
||||||
|
|
||||||
ARG PYTHON_VERSION="3.10"
|
ARG PYTHON_VERSION="3.11"
|
||||||
ARG PYTORCH_VERSION="2.1.2"
|
ARG PYTORCH_VERSION="2.5.1"
|
||||||
ARG CUDA="118"
|
ARG CUDA="124"
|
||||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
|
|
||||||
ENV PYTHON_VERSION=$PYTHON_VERSION
|
ENV PYTHON_VERSION=$PYTHON_VERSION
|
||||||
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev pkg-config && rm -rf /var/lib/apt/lists/* \
|
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev pkg-config curl && rm -rf /var/lib/apt/lists/* \
|
||||||
&& wget \
|
&& wget \
|
||||||
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
|
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
|
||||||
&& mkdir /root/.conda \
|
&& mkdir /root/.conda \
|
||||||
@@ -38,6 +38,10 @@ RUN git lfs install --skip-repo && \
|
|||||||
# The base image ships with `pydantic==1.8.2` which is not working
|
# The base image ships with `pydantic==1.8.2` which is not working
|
||||||
pip3 install -U --no-cache-dir pydantic==1.10.10
|
pip3 install -U --no-cache-dir pydantic==1.10.10
|
||||||
|
|
||||||
RUN if [ "$PYTORCH_VERSION" = "2.7.0" ] ; then \
|
RUN if [ "$TORCH_CUDA_ARCH_LIST" = "9.0+PTX" ] ; then \
|
||||||
|
curl -L -O https://d1dttdx32dkk5p.cloudfront.net/fa3/cu${CUDA}/torch-${PYTORCH_VERSION}/flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||||
|
pip3 install --no-cache-dir flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||||
|
rm flash_attn_3-3.0.0b1-cp311-cp311-linux_x86_64.whl; \
|
||||||
|
elif [ "$PYTORCH_VERSION" = "2.7.0" ] ; then \
|
||||||
pip3 install flash-attn==2.7.4.post1; \
|
pip3 install flash-attn==2.7.4.post1; \
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -32,6 +32,8 @@ tokenizer_legacy:
|
|||||||
resize_token_embeddings_to_32x:
|
resize_token_embeddings_to_32x:
|
||||||
# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.
|
# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.
|
||||||
shrink_embeddings:
|
shrink_embeddings:
|
||||||
|
# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs
|
||||||
|
embeddings_skip_upcast:
|
||||||
# Whether to load the model with randomly initialized weights. Useful for
|
# Whether to load the model with randomly initialized weights. Useful for
|
||||||
# pre-training a model from scratch or debugging purposes.
|
# pre-training a model from scratch or debugging purposes.
|
||||||
random_init_weights:
|
random_init_weights:
|
||||||
@@ -73,11 +75,12 @@ load_in_8bit: true
|
|||||||
load_in_4bit:
|
load_in_4bit:
|
||||||
|
|
||||||
# Use CUDA bf16
|
# Use CUDA bf16
|
||||||
bf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere
|
bf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere
|
||||||
# Use CUDA fp16
|
# Use CUDA fp16
|
||||||
fp16: true
|
fp16: true
|
||||||
# Use CUDA tf32
|
# Use CUDA tf32
|
||||||
tf32: true # require >=ampere
|
tf32: true # require >=ampere
|
||||||
|
# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting
|
||||||
|
|
||||||
# No AMP (automatic mixed precision)
|
# No AMP (automatic mixed precision)
|
||||||
bfloat16: true # require >=ampere
|
bfloat16: true # require >=ampere
|
||||||
@@ -154,6 +157,10 @@ datasets:
|
|||||||
# Key containing the messages (default: "messages")
|
# Key containing the messages (default: "messages")
|
||||||
field_messages: messages
|
field_messages: messages
|
||||||
|
|
||||||
|
# Key containing the system message (default: "system")
|
||||||
|
# If the system message is not present in the dataset sample, it will be loaded from the field_system property.
|
||||||
|
field_system: system
|
||||||
|
|
||||||
# Mapping of properties from the input dataset to the chat template.
|
# Mapping of properties from the input dataset to the chat template.
|
||||||
# (default: message_property_mappings={'role':'role', 'content':'content'})
|
# (default: message_property_mappings={'role':'role', 'content':'content'})
|
||||||
# If a property exists in the template but not in this mapping, the system will attempt
|
# If a property exists in the template but not in this mapping, the system will attempt
|
||||||
@@ -180,10 +187,14 @@ datasets:
|
|||||||
# adding a system turn with empty content.
|
# adding a system turn with empty content.
|
||||||
drop_system_message:
|
drop_system_message:
|
||||||
|
|
||||||
|
# Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags
|
||||||
|
# See example at `docs/dataset-formats/conversation.qmd`
|
||||||
|
split_thinking:
|
||||||
|
|
||||||
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
||||||
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
||||||
# See examples at `docs/dataset-formats/conversation.qmd`
|
# See examples at `docs/dataset-formats/conversation.qmd`
|
||||||
# Note: If the below 4 fields are set to empty, defaults to training only on the last message.
|
# Note: If the below 5 fields are empty, defaults to training only on the last message.
|
||||||
|
|
||||||
# Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.
|
# Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.
|
||||||
roles_to_train: ["assistant"] # default
|
roles_to_train: ["assistant"] # default
|
||||||
@@ -192,7 +203,13 @@ datasets:
|
|||||||
# - turn (default): train on the EOS token at the end of each trainable turn
|
# - turn (default): train on the EOS token at the end of each trainable turn
|
||||||
# - last: train on the last EOS token in the conversation
|
# - last: train on the last EOS token in the conversation
|
||||||
# TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
# TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
||||||
train_on_eos: last
|
train_on_eos: turn
|
||||||
|
# Optional[str]. Which EOT (End-of-Turn) tokens to train on in the conversation. Possible values are:
|
||||||
|
# - all: train on all EOT tokens
|
||||||
|
# - turn: train on the EOT token at the end of each trainable turn
|
||||||
|
# - last: train on the last EOT token in the conversation
|
||||||
|
# If not specified, defaults to the value of train_on_eos for backward compatibility.
|
||||||
|
train_on_eot:
|
||||||
# The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.
|
# The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.
|
||||||
message_field_training: training
|
message_field_training: training
|
||||||
# The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.
|
# The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.
|
||||||
@@ -275,8 +292,17 @@ process_reward_model:
|
|||||||
chat_template: tokenizer_default
|
chat_template: tokenizer_default
|
||||||
# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.
|
# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.
|
||||||
chat_template_jinja: null
|
chat_template_jinja: null
|
||||||
# Changes the default system message. Currently only supports chatml.
|
# Optional[List[str]]. Custom EOT (End-of-Turn) tokens to mask/unmask during training.
|
||||||
default_system_message: You are a helpful assistant. Please give a long and detailed answer.
|
# These tokens mark the boundaries between conversation turns.
|
||||||
|
# For example: ["/INST", "</s>", "[/SYSTEM_PROMPT]"]
|
||||||
|
# If not specified, defaults to just the model's eos_token.
|
||||||
|
# This is useful for templates that use multiple delimiter tokens.
|
||||||
|
eot_tokens:
|
||||||
|
# - "</s>"
|
||||||
|
# - "[/INST]"
|
||||||
|
# - "[/SYSTEM_PROMPT]"
|
||||||
|
# Changes the default system message
|
||||||
|
default_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.
|
||||||
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
||||||
# subsequent training attempts load faster, relative path
|
# subsequent training attempts load faster, relative path
|
||||||
dataset_prepared_path: data/last_run_prepared
|
dataset_prepared_path: data/last_run_prepared
|
||||||
@@ -479,6 +505,7 @@ save_strategy: # Set to `"no"` to skip checkpoint saves, `"epoch"` at end of eac
|
|||||||
save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps
|
save_steps: # Leave empty to save at each epoch, integer for every N steps. float for fraction of total steps
|
||||||
saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps
|
saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps
|
||||||
save_total_limit: # Checkpoints saved at a time
|
save_total_limit: # Checkpoints saved at a time
|
||||||
|
save_only_model: # Save only the model weights, skipping the optimizer. Using this means you can't resume from checkpoints.
|
||||||
# Maximum number of iterations to train for. It precedes num_epochs which means that
|
# Maximum number of iterations to train for. It precedes num_epochs which means that
|
||||||
# if both are set, num_epochs will not be guaranteed.
|
# if both are set, num_epochs will not be guaranteed.
|
||||||
# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps
|
# e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps
|
||||||
@@ -512,7 +539,7 @@ train_on_inputs: false
|
|||||||
# Note that training loss may have an oscillating pattern with this enabled.
|
# Note that training loss may have an oscillating pattern with this enabled.
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
|
|
||||||
# Whether to use gradient checkpointing. Available options are: true, false, "offload".
|
# Whether to use gradient checkpointing. Available options are: true, false, "offload", "offload_disk".
|
||||||
# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
# https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
||||||
gradient_checkpointing: false
|
gradient_checkpointing: false
|
||||||
# additional kwargs to pass to the trainer for gradient checkpointing
|
# additional kwargs to pass to the trainer for gradient checkpointing
|
||||||
@@ -524,7 +551,7 @@ gradient_checkpointing: false
|
|||||||
early_stopping_patience: 3
|
early_stopping_patience: 3
|
||||||
|
|
||||||
# Specify a scheduler and kwargs to use with the optimizer
|
# Specify a scheduler and kwargs to use with the optimizer
|
||||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine
|
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
|
||||||
lr_scheduler_kwargs:
|
lr_scheduler_kwargs:
|
||||||
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
||||||
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
||||||
@@ -586,6 +613,7 @@ lr_div_factor: # Learning rate div factor
|
|||||||
# - optimi_adamw
|
# - optimi_adamw
|
||||||
# - ao_adamw_8bit
|
# - ao_adamw_8bit
|
||||||
# - ao_adamw_fp8
|
# - ao_adamw_fp8
|
||||||
|
# - came_pytorch
|
||||||
optimizer:
|
optimizer:
|
||||||
# Dictionary of arguments to pass to the optimizer
|
# Dictionary of arguments to pass to the optimizer
|
||||||
optim_args:
|
optim_args:
|
||||||
@@ -605,7 +633,9 @@ weight_decay:
|
|||||||
# adamw hyperparams
|
# adamw hyperparams
|
||||||
adam_beta1:
|
adam_beta1:
|
||||||
adam_beta2:
|
adam_beta2:
|
||||||
|
adam_beta3: # only used for CAME Optimizer
|
||||||
adam_epsilon:
|
adam_epsilon:
|
||||||
|
adam_epsilon2: # only used for CAME Optimizer
|
||||||
# Gradient clipping max norm
|
# Gradient clipping max norm
|
||||||
max_grad_norm:
|
max_grad_norm:
|
||||||
|
|
||||||
@@ -661,8 +691,10 @@ special_tokens:
|
|||||||
# unk_token: "<unk>"
|
# unk_token: "<unk>"
|
||||||
# pad_token: "[PAD]"
|
# pad_token: "[PAD]"
|
||||||
|
|
||||||
# Add extra tokens.
|
# Optional[list[str]]. Add extra tokens to the tokenizer.
|
||||||
tokens:
|
tokens:
|
||||||
|
# - "<|startoftext|>"
|
||||||
|
# - "<|endoftext|>"
|
||||||
|
|
||||||
# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.
|
# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.
|
||||||
# Only works for tokens that are not part of the base vocab (aka are added_tokens).
|
# Only works for tokens that are not part of the base vocab (aka are added_tokens).
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ sections = [
|
|||||||
("Knowledge Distillation (KD)", "kd"),
|
("Knowledge Distillation (KD)", "kd"),
|
||||||
("Liger Kernels", "liger"),
|
("Liger Kernels", "liger"),
|
||||||
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
||||||
("Spectrum", "spectrum")
|
("Spectrum", "spectrum"),
|
||||||
|
("LLMCompressor", "llm_compressor")
|
||||||
]
|
]
|
||||||
|
|
||||||
for section_name, folder_name in sections:
|
for section_name, folder_name in sections:
|
||||||
|
|||||||
@@ -4,18 +4,6 @@ description: Conversation format for supervised fine-tuning.
|
|||||||
order: 3
|
order: 3
|
||||||
---
|
---
|
||||||
|
|
||||||
## sharegpt
|
|
||||||
|
|
||||||
::: {.callout-important}
|
|
||||||
ShareGPT is deprecated!. Please see [chat_template](#chat_template) section below.
|
|
||||||
:::
|
|
||||||
|
|
||||||
## pygmalion
|
|
||||||
|
|
||||||
```{.json filename="data.jsonl"}
|
|
||||||
{"conversations": [{"role": "...", "value": "..."}]}
|
|
||||||
```
|
|
||||||
|
|
||||||
## chat_template
|
## chat_template
|
||||||
|
|
||||||
Chat Template strategy uses a jinja2 template that converts a list of messages into a prompt. Support using tokenizer's template, a supported template, or custom jinja2.
|
Chat Template strategy uses a jinja2 template that converts a list of messages into a prompt. Support using tokenizer's template, a supported template, or custom jinja2.
|
||||||
@@ -64,7 +52,7 @@ We recommend checking the below examples for other usecases.
|
|||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
1. Using the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.
|
1. (Legacy) Using the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
datasets:
|
datasets:
|
||||||
@@ -109,10 +97,55 @@ datasets:
|
|||||||
```
|
```
|
||||||
|
|
||||||
::: {.callout-important}
|
::: {.callout-important}
|
||||||
Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
Please make sure that your `tokenizer.eos_token` is same as EOS (End-of-Sequence) token in template. Otherwise, set `eos_token` under `special_tokens: `.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
5. (Advanced) Using fine-grained control over tokens and turns to train in a conversation
|
5. If you are using a template that has a different EOT (End-of-Turn) token from EOS token or multiple EOT tokens (like Mistral V7 Tekken), set the `eot_tokens: ` config. The handling of EOT tokens follows `train_on_eos: ` which defaults to turn.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
eot_tokens:
|
||||||
|
- "[/INST]"
|
||||||
|
# - "[/SYSTEM_PROMPT]"
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
|
||||||
|
# optional
|
||||||
|
train_on_eot: turn # defaults read from train_on_eos (which defaults to turn)
|
||||||
|
```
|
||||||
|
|
||||||
|
::: {.callout-tip}
|
||||||
|
See [config documentation](../config.qmd) for detailed explanations of "turn", "last", and "all" options for training on tokens.
|
||||||
|
:::
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
Using `eot_tokens` requires each token that exists in `chat_template` to be a single token in the tokenizer. Otherwise, the tokenizer will split the token and cause unexpected behavior.
|
||||||
|
|
||||||
|
You can add those tokens as new tokens under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `. See [config](../config.qmd) for more details.
|
||||||
|
:::
|
||||||
|
|
||||||
|
6. Continuing from the previous example, if you want to train on all EOT token trainable turns but only last EOS token, set `train_on_eos: last`.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
eot_tokens:
|
||||||
|
- "[/INST]"
|
||||||
|
# ...
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
|
||||||
|
train_on_eos: last
|
||||||
|
train_on_eot: turn
|
||||||
|
```
|
||||||
|
|
||||||
|
::: {.callout-tip}
|
||||||
|
If EOS token only appears at the end of a prompt, `train_on_eos: last` is equivalent to `train_on_eos: turn`. Therefore, generally, you can leave them to their defaults and omit them.
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
|
7. (Advanced) Using fine-grained control over tokens and turns to train in a conversation
|
||||||
|
|
||||||
For a data sample that looks like:
|
For a data sample that looks like:
|
||||||
|
|
||||||
@@ -162,3 +195,43 @@ datasets:
|
|||||||
::: {.callout-tip}
|
::: {.callout-tip}
|
||||||
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
8. (For Qwen3 template only) Enable reasoning split, where the reasoning is split from the content and passed as a separate field into the template.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
chat_template: qwen3
|
||||||
|
split_thinking: true
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, a content can look like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"content": "<think>Some thinking outputs</think>Output after thinking."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
After split, it will look like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"reasoning_content": "Some thinking outputs",
|
||||||
|
"content": "Output after thinking..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## sharegpt
|
||||||
|
|
||||||
|
::: {.callout-important}
|
||||||
|
ShareGPT is deprecated!. Please see [chat_template](#chat_template) section.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## pygmalion
|
||||||
|
|
||||||
|
```{.json filename="data.jsonl"}
|
||||||
|
{"conversations": [{"role": "...", "value": "..."}]}
|
||||||
|
```
|
||||||
|
|||||||
@@ -28,6 +28,8 @@ main-base-py{python_version}-cu{cuda_version}-{pytorch_version}
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
|
- `main-base-py3.11-cu128-2.7.0`
|
||||||
|
- `main-base-py3.11-cu126-2.7.0`
|
||||||
- `main-base-py3.11-cu124-2.6.0`
|
- `main-base-py3.11-cu124-2.6.0`
|
||||||
- `main-base-py3.11-cu124-2.5.1`
|
- `main-base-py3.11-cu124-2.5.1`
|
||||||
- `main-base-py3.11-cu124-2.4.1`
|
- `main-base-py3.11-cu124-2.4.1`
|
||||||
@@ -50,7 +52,7 @@ Link: [Docker Hub](https://hub.docker.com/r/axolotlai/axolotl)
|
|||||||
# on push to main
|
# on push to main
|
||||||
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
||||||
|
|
||||||
# latest main (currently torch 2.5.1, python 3.11, cuda 12.4)
|
# latest main (currently torch 2.6.0, python 3.11, cuda 12.4)
|
||||||
main-latest
|
main-latest
|
||||||
|
|
||||||
# nightly build
|
# nightly build
|
||||||
@@ -68,6 +70,7 @@ There may be some extra tags appended to the image, like `-vllm` which installs
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
|
- `main-py3.11-cu126-2.7.0`
|
||||||
- `main-py3.11-cu124-2.6.0`
|
- `main-py3.11-cu124-2.6.0`
|
||||||
- `main-py3.11-cu124-2.5.1`
|
- `main-py3.11-cu124-2.5.1`
|
||||||
- `main-py3.11-cu124-2.4.1`
|
- `main-py3.11-cu124-2.4.1`
|
||||||
|
|||||||
34
docs/faq.qmd
34
docs/faq.qmd
@@ -73,10 +73,40 @@ description: Frequently asked questions
|
|||||||
|
|
||||||
> A: This is likely an empty turn.
|
> A: This is likely an empty turn.
|
||||||
|
|
||||||
**Q: The EOS/EOT token is incorrectly being masked or not being masked.**
|
**Q: The EOS token is incorrectly being masked or not being masked / `EOS token __ not found in chat template`.**
|
||||||
|
|
||||||
> A: This is because of the mismatch between `tokenizer.eos_token` and EOS/EOT token in template. Please make sure to set `eos_token` under `special_tokens` to the same EOS/EOT token as in template.
|
> A: There can be two reasons:
|
||||||
|
|
||||||
|
> 1. This is because of the mismatch between `tokenizer.eos_token` and EOS token in template. Please make sure to set `eos_token: ` under `special_tokens: ` to the same EOS token as in template.
|
||||||
|
|
||||||
|
> 2. The EOS token is not in the template. Please check if your template is correct. As an example, `phi_35` template does not use its dedicated EOS token `<|endoftext|>` at the end.
|
||||||
|
|
||||||
**Q: "`chat_template` choice is `tokenizer_default` but tokenizer's `chat_template` is null. Please add a `chat_template` in tokenizer config"**
|
**Q: "`chat_template` choice is `tokenizer_default` but tokenizer's `chat_template` is null. Please add a `chat_template` in tokenizer config"**
|
||||||
|
|
||||||
> A: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See [chat_template](dataset-formats/conversation.qmd#chat-template) for more details.
|
> A: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See [chat_template](dataset-formats/conversation.qmd#chat-template) for more details.
|
||||||
|
|
||||||
|
**Q: The EOT token(s) are incorrectly being masked or not being masked / `EOT token __ not found in chat template`.**
|
||||||
|
|
||||||
|
> A: There can be two reasons:
|
||||||
|
|
||||||
|
> 1. The EOT token is different from the EOS token and was not specified under `eot_tokens: `. Please set `eot_tokens: ` to the same EOT token(s) as in template.
|
||||||
|
|
||||||
|
> 2. There is more than one EOT token per turn in the template. Please raise an issue with examples as we recognize this as an edge case.
|
||||||
|
|
||||||
|
**Q: `EOT token encoding failed. Please check if the token is valid and can be encoded.`**
|
||||||
|
|
||||||
|
> A: There could be some issue with the tokenizer or unicode encoding. Please raise an issue with examples with the EOT token & tokenizer causing the issue.
|
||||||
|
|
||||||
|
**Q: `EOT token __ is encoded as multiple tokens.`**
|
||||||
|
|
||||||
|
> A: This is because the EOT token is encoded as multiple tokens which can cause unexpected behavior. Please add it under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `.
|
||||||
|
|
||||||
|
**Q: `Conflict between train_on_eos and train_on_eot. eos_token is in eot_tokens and train_on_eos != train_on_eot`**
|
||||||
|
|
||||||
|
> A: This is because the EOS token is in the `eot_tokens: ` while mismatch between `train_on_eos: ` and `train_on_eot: `. This will cause one to override the other. Please ensure that `train_on_eos: ` and `train_on_eot: ` are the same or remove the EOS token from `eot_tokens: `.
|
||||||
|
|
||||||
|
**Q: If `eot_tokens: ` is not provided, what happens?**
|
||||||
|
|
||||||
|
> A: If `eot_tokens: ` is not provided, the default behavior is the same as before. EOS tokens used to delimit turns are masked/unmasked depending on whether the turn is trainable.
|
||||||
|
|
||||||
|
> Internally, `eot_tokens: tokenizer.eos_token` and `train_on_eot: train_on_eos` (which defaults to `turn`). This transition helps clarify the naming and behavior of EOT/EOS tokens.
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ the `alpaca` dataset format, which has the following format:
|
|||||||
Please see our [Dataset Formats](dataset-formats) for more dataset formats and how to
|
Please see our [Dataset Formats](dataset-formats) for more dataset formats and how to
|
||||||
format them.
|
format them.
|
||||||
|
|
||||||
2. Prepare your JSONL data in the specified format (in this case, the expected `alpaca
|
2. Prepare your JSONL data in the specified format (in this case, the expected `alpaca`
|
||||||
format):
|
format):
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@@ -120,6 +120,12 @@ axolotl train my_training.yml
|
|||||||
|
|
||||||
## Common Tasks {#sec-common-tasks}
|
## Common Tasks {#sec-common-tasks}
|
||||||
|
|
||||||
|
::: {.callout-tip}
|
||||||
|
|
||||||
|
The same yaml file is used for training, inference, and merging.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
### Testing Your Model {#sec-testing}
|
### Testing Your Model {#sec-testing}
|
||||||
|
|
||||||
After training, test your model:
|
After training, test your model:
|
||||||
@@ -128,6 +134,16 @@ After training, test your model:
|
|||||||
axolotl inference my_training.yml --lora-model-dir="./outputs/lora-out"
|
axolotl inference my_training.yml --lora-model-dir="./outputs/lora-out"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
More details can be found in [Inference](inference.qmd).
|
||||||
|
|
||||||
|
### Using a UI {#sec-ui}
|
||||||
|
|
||||||
|
Launch a Gradio interface:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
axolotl inference my_training.yml --lora-model-dir="./outputs/lora-out" --gradio
|
||||||
|
```
|
||||||
|
|
||||||
### Preprocessing Data {#sec-preprocessing}
|
### Preprocessing Data {#sec-preprocessing}
|
||||||
|
|
||||||
For large datasets, preprocess first:
|
For large datasets, preprocess first:
|
||||||
@@ -136,14 +152,22 @@ For large datasets, preprocess first:
|
|||||||
axolotl preprocess my_training.yml
|
axolotl preprocess my_training.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using a UI {#sec-ui}
|
Please make sure to set `dataset_prepared_path: ` in your config to set the path to save the prepared dataset.
|
||||||
|
|
||||||
Launch a Gradio interface:
|
More details can be found in [Dataset Preprocessing](dataset_preprocessing.qmd).
|
||||||
|
|
||||||
|
### Merging LoRA weights {#sec-merging-lora}
|
||||||
|
|
||||||
|
To merge the LoRA weights back into the base model, run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
axolotl inference my_training.yml --lora-model-dir="./outputs/lora-out" --gradio
|
axolotl merge-lora my_training.yml --lora-model-dir="./outputs/lora-out"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The merged model will be saved in the `{output_dir}/merged` directory.
|
||||||
|
|
||||||
|
More details can be found in [Merging LoRA weights](inference.qmd#sec-merging).
|
||||||
|
|
||||||
## Next Steps {#sec-next-steps}
|
## Next Steps {#sec-next-steps}
|
||||||
|
|
||||||
Now that you have the basics, you might want to:
|
Now that you have the basics, you might want to:
|
||||||
@@ -156,6 +180,7 @@ Now that you have the basics, you might want to:
|
|||||||
Check our other guides for details on these topics:
|
Check our other guides for details on these topics:
|
||||||
|
|
||||||
- [Configuration Guide](config.qmd) - Full configuration options
|
- [Configuration Guide](config.qmd) - Full configuration options
|
||||||
|
- [Dataset Loading](dataset-loading.qmd) - Loading datasets from various sources
|
||||||
- [Dataset Formats](dataset-formats) - Working with different data formats
|
- [Dataset Formats](dataset-formats) - Working with different data formats
|
||||||
- [Multi-GPU Training](multi-gpu.qmd)
|
- [Multi-GPU Training](multi-gpu.qmd)
|
||||||
- [Multi-Node Training](multi-node.qmd)
|
- [Multi-Node Training](multi-node.qmd)
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ Here is an example of a multi-modal dataset:
|
|||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": [
|
"content": [
|
||||||
{"type": "image", "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
|
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
|
||||||
{"type": "text", "text": "Describe this image in detail."}
|
{"type": "text", "text": "Describe this image in detail."}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -502,9 +502,7 @@ The input format is a simple JSON input with customizable fields based on the ab
|
|||||||
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/axolotl-cookbook/tree/main/grpo#training-an-r1-style-large-language-model-using-grpo).
|
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/axolotl-cookbook/tree/main/grpo#training-an-r1-style-large-language-model-using-grpo).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
If you have multiple GPUs available, we reccomend using `vLLM` with the `GRPOTrainer` to significantly speedup trajectory generation during training.
|
In the latest GRPO implementation, `vLLM` is used to significantly speedup trajectory generation during training. In this example, we're using 4 GPUs - 2 for training, and 2 for vLLM:
|
||||||
First, launch a `vLLM` server using `trl vllm-serve` - you may use a config file or CLI overrides to configure your vLLM server. In this example, we're
|
|
||||||
using 4 GPUs - 2 for training, and 2 for vLLM:
|
|
||||||
|
|
||||||
::: {.callout-important}
|
::: {.callout-important}
|
||||||
Make sure you've installed the correct version of vLLM by including it as an extra when installing axolotl, e.g. `pip install axolotl[vllm]`.
|
Make sure you've installed the correct version of vLLM by including it as an extra when installing axolotl, e.g. `pip install axolotl[vllm]`.
|
||||||
@@ -539,6 +537,10 @@ Your `vLLM` instance will now attempt to spin up, and it's time to kick off trai
|
|||||||
CUDA_VISIBLE_DEVICES=0,1 axolotl train grpo.yaml --num-processes 2
|
CUDA_VISIBLE_DEVICES=0,1 axolotl train grpo.yaml --num-processes 2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
Due to TRL's implementation with vLLM, the vLLM instance must use the last N GPUs instead of the first N GPUs. This is why in the example above, we use `CUDA_VISIBLE_DEVICES=2,3` for the vLLM instance.
|
||||||
|
:::
|
||||||
|
|
||||||
#### Reward functions
|
#### Reward functions
|
||||||
|
|
||||||
GRPO uses custom reward functions and transformations. Please have them ready locally.
|
GRPO uses custom reward functions and transformations. Please have them ready locally.
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ title: Sequence Parallelism
|
|||||||
description: Train with long sequences split across multiple GPUs.
|
description: Train with long sequences split across multiple GPUs.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Sequence Parallelism
|
|
||||||
|
|
||||||
Sequence parallelism is a technique that splits sequences across multiple GPUs,
|
Sequence parallelism is a technique that splits sequences across multiple GPUs,
|
||||||
allowing you to train with very long sequences that wouldn't fit on a single GPU. Each
|
allowing you to train with very long sequences that wouldn't fit on a single GPU. Each
|
||||||
GPU processes a different portion of the sequence, and the results are aggregated
|
GPU processes a different portion of the sequence, and the results are aggregated
|
||||||
@@ -27,7 +25,7 @@ To enable sequence parallelism, add the following to your configuration file:
|
|||||||
sequence_parallel_degree: 4 # Split sequences across 4 GPUs
|
sequence_parallel_degree: 4 # Split sequences across 4 GPUs
|
||||||
# Optional; strides across the key dimension. Larger values use more memory but should make training faster.
|
# Optional; strides across the key dimension. Larger values use more memory but should make training faster.
|
||||||
heads_k_stride: 1
|
heads_k_stride: 1
|
||||||
# Optional; one of "varlen_llama3", "batch_ring", "batch_zigzag", "batch_stripe". Defaults to
|
# Optional; one of "varlen_llama3" or "batch_ring". Defaults to
|
||||||
# "varlen_llama3" when `sample_packing: true`, and "batch_ring" otherwise.
|
# "varlen_llama3" when `sample_packing: true`, and "batch_ring" otherwise.
|
||||||
ring_attn_func:
|
ring_attn_func:
|
||||||
```
|
```
|
||||||
|
|||||||
77
examples/llama-3/sparse-finetuning.yaml
Normal file
77
examples/llama-3/sparse-finetuning.yaml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
base_model: neuralmagic/Sparse-Llama-3.1-8B-2of4
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 2
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <|end_of_text|>
|
||||||
|
|
||||||
|
llmcompressor:
|
||||||
|
recipe:
|
||||||
|
finetuning_stage:
|
||||||
|
finetuning_modifiers:
|
||||||
|
ConstantPruningModifier:
|
||||||
|
targets: [
|
||||||
|
're:.*q_proj.weight',
|
||||||
|
're:.*k_proj.weight',
|
||||||
|
're:.*v_proj.weight',
|
||||||
|
're:.*o_proj.weight',
|
||||||
|
're:.*gate_proj.weight',
|
||||||
|
're:.*up_proj.weight',
|
||||||
|
're:.*down_proj.weight',
|
||||||
|
]
|
||||||
|
start: 0
|
||||||
|
save_compressed: true
|
||||||
@@ -34,3 +34,5 @@ We provide a script to delinearize Llama 4 linearized models into regular Huggin
|
|||||||
```bash
|
```bash
|
||||||
axolotl delinearize-llama4 --model path/to/model_dir --output path/to/output_dir
|
axolotl delinearize-llama4 --model path/to/model_dir --output path/to/output_dir
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note: This only works with the non-quantized linearized model. If you have an adapter, merge it with the *non-quantized linearized* model before delinearizing.
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ plugins:
|
|||||||
liger_glu_activation: true
|
liger_glu_activation: true
|
||||||
liger_rms_norm: true
|
liger_rms_norm: true
|
||||||
liger_layer_norm: true
|
liger_layer_norm: true
|
||||||
cut_cross_entropy: true
|
|
||||||
|
|
||||||
llama4_linearized_experts: true # needed with custom linearized experts model
|
llama4_linearized_experts: true # needed with custom linearized experts model
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
|
|||||||
341
examples/orpheus/README.md
Normal file
341
examples/orpheus/README.md
Normal file
@@ -0,0 +1,341 @@
|
|||||||
|
# Finetuning LLMs to output audio
|
||||||
|
|
||||||
|
In this example, we finetune Orpcanopylabs/orpheus-tts-0.1-pretrained (a LLaMA 3.2 3b model) to output audio.
|
||||||
|
|
||||||
|
The `finetune.yml` withe current settings will run on any Nvidia GPU with 45GB VRAM or more. If you adjust the batch size it can easily run on any GPU under 24GB.
|
||||||
|
|
||||||
|
## Dataset pre-processing for pre-training
|
||||||
|
If you are adding another voice in English, please jump ahead to finetuning pre-processing.
|
||||||
|
|
||||||
|
For this to work, we need to preprocess our dataset. Since we are expecting to output audio, we will need to add tokens to the tokenizer.
|
||||||
|
|
||||||
|
Using this code, it will download the SNAC model and add the correct tokens and upload the final dataset.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from snac import SNAC
|
||||||
|
from datasets import load_dataset
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
from datasets import load_dataset
|
||||||
|
import random
|
||||||
|
import torchaudio.transforms as T
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
import os
|
||||||
|
|
||||||
|
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
||||||
|
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
||||||
|
|
||||||
|
dsn = my_original_dataset_name
|
||||||
|
|
||||||
|
snapshot_download(
|
||||||
|
repo_id=dsn,
|
||||||
|
repo_type="dataset",
|
||||||
|
revision="main",
|
||||||
|
max_workers=64,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ds = load_dataset(dsn, split="train")
|
||||||
|
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
||||||
|
|
||||||
|
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
||||||
|
model = model.to("mps")
|
||||||
|
|
||||||
|
def tokenise_audio(waveform):
|
||||||
|
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
||||||
|
waveform = waveform.to(dtype=torch.float32)
|
||||||
|
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
||||||
|
waveform = resample_transform(waveform)
|
||||||
|
|
||||||
|
waveform = waveform.unsqueeze(0).to("cuda")
|
||||||
|
|
||||||
|
#generate the codes from snac
|
||||||
|
with torch.inference_mode():
|
||||||
|
codes = model.encode(waveform)
|
||||||
|
|
||||||
|
all_codes = []
|
||||||
|
for i in range(codes[0].shape[1]):
|
||||||
|
all_codes.append(codes[0][0][i].item()+128266)
|
||||||
|
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
||||||
|
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
||||||
|
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
||||||
|
|
||||||
|
|
||||||
|
return all_codes
|
||||||
|
|
||||||
|
def add_codes(example):
|
||||||
|
# Always initialize codes_list to None
|
||||||
|
codes_list = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
answer_audio = example.get("audio")
|
||||||
|
# If there's a valid audio array, tokenise it
|
||||||
|
if answer_audio and "array" in answer_audio:
|
||||||
|
audio_array = answer_audio["array"]
|
||||||
|
codes_list = tokenise_audio(audio_array)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Skipping row due to error: {e}")
|
||||||
|
# Keep codes_list as None if we fail
|
||||||
|
example["codes_list"] = codes_list
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(add_codes, remove_columns=["audio"])
|
||||||
|
|
||||||
|
#@title Load Tokenizer
|
||||||
|
tokeniser_length = 128256
|
||||||
|
start_of_text = 128000
|
||||||
|
end_of_text = 128009
|
||||||
|
|
||||||
|
start_of_speech = tokeniser_length + 1
|
||||||
|
end_of_speech = tokeniser_length + 2
|
||||||
|
|
||||||
|
start_of_human = tokeniser_length + 3
|
||||||
|
end_of_human = tokeniser_length + 4
|
||||||
|
|
||||||
|
start_of_ai = tokeniser_length + 5
|
||||||
|
end_of_ai = tokeniser_length + 6
|
||||||
|
pad_token = tokeniser_length + 7
|
||||||
|
|
||||||
|
audio_tokens_start = tokeniser_length + 10
|
||||||
|
|
||||||
|
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
||||||
|
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
||||||
|
num_proc = os.cpu_count() - 2
|
||||||
|
|
||||||
|
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
||||||
|
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
||||||
|
|
||||||
|
#@title Create Input Ids
|
||||||
|
def remove_duplicate_frames(example):
|
||||||
|
vals = example["codes_list"]
|
||||||
|
if len(vals) % 7 != 0:
|
||||||
|
raise ValueError("Input list length must be divisible by 7")
|
||||||
|
|
||||||
|
result = vals[:7]
|
||||||
|
|
||||||
|
removed_frames = 0
|
||||||
|
|
||||||
|
for i in range(7, len(vals), 7):
|
||||||
|
current_first = vals[i]
|
||||||
|
previous_first = result[-7]
|
||||||
|
|
||||||
|
if current_first != previous_first:
|
||||||
|
result.extend(vals[i:i+7])
|
||||||
|
else:
|
||||||
|
removed_frames += 1
|
||||||
|
|
||||||
|
example["codes_list"] = result
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
||||||
|
|
||||||
|
|
||||||
|
def create_input_ids(example):
|
||||||
|
text_ids = tokenizer.encode({example['text']}, add_special_tokens=True)
|
||||||
|
text_ids.append(end_of_text)
|
||||||
|
example["text_tokens"] = text_ids
|
||||||
|
input_ids = (
|
||||||
|
[start_of_human]
|
||||||
|
+ example["text_tokens"]
|
||||||
|
+ [end_of_human]
|
||||||
|
+ [start_of_ai]
|
||||||
|
+ [start_of_speech]
|
||||||
|
+ example["codes_list"]
|
||||||
|
+ [end_of_speech]
|
||||||
|
+ [end_of_ai]
|
||||||
|
)
|
||||||
|
example["input_ids"] = input_ids
|
||||||
|
example["labels"] = input_ids
|
||||||
|
example["attention_mask"] = [1] * len(input_ids)
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
||||||
|
|
||||||
|
#@title Remove unnecessary columns
|
||||||
|
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
||||||
|
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
||||||
|
|
||||||
|
ds = ds.remove_columns(columns_to_remove)
|
||||||
|
|
||||||
|
ds.push_to_hub(name_to_push_dataset_to)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Finetune pre-processing
|
||||||
|
Use this code to add a new voice.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from snac import SNAC
|
||||||
|
from datasets import load_dataset
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
from datasets import load_dataset
|
||||||
|
import random
|
||||||
|
import torchaudio.transforms as T
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
import os
|
||||||
|
|
||||||
|
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
||||||
|
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
||||||
|
|
||||||
|
dsn = my_original_dataset_name
|
||||||
|
|
||||||
|
snapshot_download(
|
||||||
|
repo_id=dsn,
|
||||||
|
repo_type="dataset",
|
||||||
|
revision="main",
|
||||||
|
max_workers=64,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ds = load_dataset(dsn, split="train")
|
||||||
|
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
||||||
|
|
||||||
|
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
||||||
|
model = model.to("mps")
|
||||||
|
|
||||||
|
def tokenise_audio(waveform):
|
||||||
|
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
||||||
|
waveform = waveform.to(dtype=torch.float32)
|
||||||
|
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
||||||
|
waveform = resample_transform(waveform)
|
||||||
|
|
||||||
|
waveform = waveform.unsqueeze(0).to("cuda")
|
||||||
|
|
||||||
|
#generate the codes from snac
|
||||||
|
with torch.inference_mode():
|
||||||
|
codes = model.encode(waveform)
|
||||||
|
|
||||||
|
all_codes = []
|
||||||
|
for i in range(codes[0].shape[1]):
|
||||||
|
all_codes.append(codes[0][0][i].item()+128266)
|
||||||
|
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
||||||
|
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
||||||
|
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
||||||
|
|
||||||
|
|
||||||
|
return all_codes
|
||||||
|
|
||||||
|
def add_codes(example):
|
||||||
|
# Always initialize codes_list to None
|
||||||
|
codes_list = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
answer_audio = example.get("audio")
|
||||||
|
# If there's a valid audio array, tokenise it
|
||||||
|
if answer_audio and "array" in answer_audio:
|
||||||
|
audio_array = answer_audio["array"]
|
||||||
|
codes_list = tokenise_audio(audio_array)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Skipping row due to error: {e}")
|
||||||
|
# Keep codes_list as None if we fail
|
||||||
|
example["codes_list"] = codes_list
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(add_codes, remove_columns=["audio"])
|
||||||
|
|
||||||
|
#@title Load Tokenizer
|
||||||
|
tokeniser_length = 128256
|
||||||
|
start_of_text = 128000
|
||||||
|
end_of_text = 128009
|
||||||
|
|
||||||
|
start_of_speech = tokeniser_length + 1
|
||||||
|
end_of_speech = tokeniser_length + 2
|
||||||
|
|
||||||
|
start_of_human = tokeniser_length + 3
|
||||||
|
end_of_human = tokeniser_length + 4
|
||||||
|
|
||||||
|
start_of_ai = tokeniser_length + 5
|
||||||
|
end_of_ai = tokeniser_length + 6
|
||||||
|
pad_token = tokeniser_length + 7
|
||||||
|
|
||||||
|
audio_tokens_start = tokeniser_length + 10
|
||||||
|
|
||||||
|
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
||||||
|
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
||||||
|
num_proc = os.cpu_count() - 2
|
||||||
|
|
||||||
|
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
||||||
|
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
||||||
|
|
||||||
|
#@title Create Input Ids
|
||||||
|
def remove_duplicate_frames(example):
|
||||||
|
vals = example["codes_list"]
|
||||||
|
if len(vals) % 7 != 0:
|
||||||
|
raise ValueError("Input list length must be divisible by 7")
|
||||||
|
|
||||||
|
result = vals[:7]
|
||||||
|
|
||||||
|
removed_frames = 0
|
||||||
|
|
||||||
|
for i in range(7, len(vals), 7):
|
||||||
|
current_first = vals[i]
|
||||||
|
previous_first = result[-7]
|
||||||
|
|
||||||
|
if current_first != previous_first:
|
||||||
|
result.extend(vals[i:i+7])
|
||||||
|
else:
|
||||||
|
removed_frames += 1
|
||||||
|
|
||||||
|
example["codes_list"] = result
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
||||||
|
|
||||||
|
tok_info = '''*** HERE you can modify the text prompt
|
||||||
|
i.e. if you wanted a multispeaker model like canopylabs/orpheus-3b-0.1-ft, you can pass:
|
||||||
|
f"{example["source"]}: {example["text"]}", as is passed.
|
||||||
|
'''
|
||||||
|
print(tok_info)
|
||||||
|
|
||||||
|
def create_input_ids(example):
|
||||||
|
text_ids = tokenizer.encode(f"{example['speaker_id']}: {example['text']}", add_special_tokens=True)
|
||||||
|
text_ids.append(end_of_text)
|
||||||
|
example["text_tokens"] = text_ids
|
||||||
|
input_ids = (
|
||||||
|
[start_of_human]
|
||||||
|
+ example["text_tokens"]
|
||||||
|
+ [end_of_human]
|
||||||
|
+ [start_of_ai]
|
||||||
|
+ [start_of_speech]
|
||||||
|
+ example["codes_list"]
|
||||||
|
+ [end_of_speech]
|
||||||
|
+ [end_of_ai]
|
||||||
|
)
|
||||||
|
example["input_ids"] = input_ids
|
||||||
|
example["labels"] = input_ids
|
||||||
|
example["attention_mask"] = [1] * len(input_ids)
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
||||||
|
|
||||||
|
#@title Remove unnecessary columns
|
||||||
|
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
||||||
|
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
||||||
|
|
||||||
|
ds = ds.remove_columns(columns_to_remove)
|
||||||
|
|
||||||
|
ds.push_to_hub(name_to_push_dataset_to)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Training
|
||||||
|
After preprocessing is done, fill out the blanks in finetune.yml and simply run `axolotl train finetune.yml`
|
||||||
|
|
||||||
|
## Inference
|
||||||
|
For inference, please refer to the original [orpheus github](https://github.com/canopyai/Orpheus-TTS/tree/main).
|
||||||
52
examples/orpheus/finetune.yml
Normal file
52
examples/orpheus/finetune.yml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
base_model: canopylabs/orpheus-3b-0.1-pretrained
|
||||||
|
|
||||||
|
hub_model_id: <your-hub-model-id>
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
liger_rope: true
|
||||||
|
liger_rms_norm: true
|
||||||
|
liger_glu_activation: true
|
||||||
|
liger_fused_linear_cross_entropy: true
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: <your-hf-dataset-id>
|
||||||
|
type: # leave empty to load pre-tokenized
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 8192
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 4
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 20
|
||||||
|
evals_per_epoch: 5
|
||||||
|
saves_per_epoch: 5
|
||||||
|
weight_decay: 0.05
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <custom_token_7>
|
||||||
69
examples/qwen3/32b-qlora.yaml
Normal file
69
examples/qwen3/32b-qlora.yaml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
base_model: Qwen/Qwen3-32B
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
chat_template: qwen3
|
||||||
|
datasets:
|
||||||
|
- path: mlabonne/FineTome-100k
|
||||||
|
type: chat_template
|
||||||
|
split: train[:20%]
|
||||||
|
field_messages: conversations
|
||||||
|
message_property_mappings:
|
||||||
|
role: from
|
||||||
|
content: value
|
||||||
|
val_set_size: 0.0
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
eval_sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
load_in_4bit: true
|
||||||
|
adapter: qlora
|
||||||
|
lora_r: 16
|
||||||
|
lora_alpha: 32
|
||||||
|
lora_target_modules:
|
||||||
|
- q_proj
|
||||||
|
- k_proj
|
||||||
|
- v_proj
|
||||||
|
- o_proj
|
||||||
|
- down_proj
|
||||||
|
- up_proj
|
||||||
|
lora_mlp_kernel: true
|
||||||
|
lora_qkv_kernel: true
|
||||||
|
lora_o_kernel: true
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_torch_4bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: offload
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
weight_decay: 0.0
|
||||||
|
special_tokens:
|
||||||
68
examples/qwen3/qlora-fsdp.yaml
Normal file
68
examples/qwen3/qlora-fsdp.yaml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
base_model: Qwen/Qwen3-8B
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 2048
|
||||||
|
sample_packing: true
|
||||||
|
eval_sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 64
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: true
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
evals_per_epoch: 4
|
||||||
|
saves_per_epoch: 1
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
- full_shard
|
||||||
|
- auto_wrap
|
||||||
|
fsdp_config:
|
||||||
|
fsdp_limit_all_gathers: true
|
||||||
|
fsdp_sync_module_states: true
|
||||||
|
fsdp_offload_params: true
|
||||||
|
fsdp_use_orig_params: false
|
||||||
|
fsdp_cpu_ram_efficient_loading: true
|
||||||
|
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||||
|
fsdp_transformer_layer_cls_to_wrap: Qwen3DecoderLayer
|
||||||
|
fsdp_state_dict_type: FULL_STATE_DICT
|
||||||
|
fsdp_sharding_strategy: FULL_SHARD
|
||||||
|
special_tokens:
|
||||||
@@ -6,19 +6,20 @@ triton>=3.0.0
|
|||||||
mamba-ssm==1.2.0.post1
|
mamba-ssm==1.2.0.post1
|
||||||
xformers>=0.0.23.post1
|
xformers>=0.0.23.post1
|
||||||
autoawq==0.2.7.post3
|
autoawq==0.2.7.post3
|
||||||
liger-kernel==0.5.8
|
liger-kernel==0.5.9
|
||||||
# END section
|
# END section
|
||||||
|
|
||||||
packaging==23.2
|
packaging==23.2
|
||||||
|
|
||||||
peft==0.15.1
|
huggingface_hub==0.31.0
|
||||||
|
peft==0.15.2
|
||||||
transformers==4.51.3
|
transformers==4.51.3
|
||||||
tokenizers>=0.21.1
|
tokenizers>=0.21.1
|
||||||
accelerate==1.6.0
|
accelerate==1.6.0
|
||||||
datasets==3.5.0
|
datasets==3.5.1
|
||||||
deepspeed>=0.15.4
|
deepspeed>=0.15.4
|
||||||
trl==0.16.1
|
trl==0.17.0
|
||||||
hf_xet==1.0.0
|
hf_xet==1.1.0
|
||||||
hqq==0.2.5
|
hqq==0.2.5
|
||||||
|
|
||||||
optimum==1.16.2
|
optimum==1.16.2
|
||||||
|
|||||||
8
setup.py
8
setup.py
@@ -67,13 +67,13 @@ def parse_requirements(extras_require_map):
|
|||||||
if (major, minor) >= (2, 7):
|
if (major, minor) >= (2, 7):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
||||||
extras_require_map["vllm"] = ["vllm==0.8.3"]
|
extras_require_map["vllm"] = ["vllm==0.8.5.post1"]
|
||||||
elif (major, minor) >= (2, 6):
|
elif (major, minor) >= (2, 6):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
_install_requires.append(
|
_install_requires.append(
|
||||||
"xformers==0.0.29.post2"
|
"xformers==0.0.29.post2"
|
||||||
) # vllm needs post2 w torch 2.6
|
) # vllm needs post2 w torch 2.6
|
||||||
extras_require_map["vllm"] = ["vllm==0.8.3"]
|
extras_require_map["vllm"] = ["vllm==0.8.5.post1"]
|
||||||
elif (major, minor) >= (2, 5):
|
elif (major, minor) >= (2, 5):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
if patch == 0:
|
if patch == 0:
|
||||||
@@ -142,6 +142,7 @@ extras_require = {
|
|||||||
"apollo-torch",
|
"apollo-torch",
|
||||||
"lomo-optim==0.1.1",
|
"lomo-optim==0.1.1",
|
||||||
"torch-optimi==0.2.1",
|
"torch-optimi==0.2.1",
|
||||||
|
"came_pytorch==0.1.3",
|
||||||
],
|
],
|
||||||
"ray": [
|
"ray": [
|
||||||
"ray[train]",
|
"ray[train]",
|
||||||
@@ -149,6 +150,9 @@ extras_require = {
|
|||||||
"vllm": [
|
"vllm": [
|
||||||
"vllm==0.7.2",
|
"vllm==0.7.2",
|
||||||
],
|
],
|
||||||
|
"llmcompressor": [
|
||||||
|
"llmcompressor==0.5.1",
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
install_requires, dependency_links, extras_require_build = parse_requirements(
|
install_requires, dependency_links, extras_require_build = parse_requirements(
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ import pkgutil
|
|||||||
|
|
||||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||||
|
|
||||||
__version__ = "0.8.0"
|
__version__ = "0.10.0.dev0"
|
||||||
|
|||||||
@@ -2,4 +2,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from axolotl.logging_config import configure_logging
|
||||||
|
|
||||||
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||||
|
configure_logging()
|
||||||
|
|||||||
@@ -82,6 +82,12 @@ class VllmServeCliArgs:
|
|||||||
"hardware support this feature."
|
"hardware support this feature."
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
serve_module: Optional[str] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "Module to serve. If not set, the default module will be used."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|||||||
@@ -16,8 +16,15 @@ AXOLOTL_LOGO = """
|
|||||||
@@@@ @@@@@@@@@@@@@@@@
|
@@@@ @@@@@@@@@@@@@@@@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
HAS_PRINTED_LOGO = False
|
||||||
|
|
||||||
|
|
||||||
def print_axolotl_text_art():
|
def print_axolotl_text_art():
|
||||||
"""Prints axolotl ASCII art."""
|
"""Prints axolotl ASCII art."""
|
||||||
|
|
||||||
|
global HAS_PRINTED_LOGO # pylint: disable=global-statement
|
||||||
|
if HAS_PRINTED_LOGO:
|
||||||
|
return
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
|
HAS_PRINTED_LOGO = True
|
||||||
print(AXOLOTL_LOGO)
|
print(AXOLOTL_LOGO)
|
||||||
|
|||||||
@@ -8,9 +8,6 @@ from accelerate.commands.config import config_args
|
|||||||
from huggingface_hub import HfApi
|
from huggingface_hub import HfApi
|
||||||
from huggingface_hub.utils import LocalTokenNotFoundError
|
from huggingface_hub.utils import LocalTokenNotFoundError
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
from typing import Union
|
from typing import Union
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
@@ -152,7 +153,15 @@ def prepare_plugins(cfg: DictDefault):
|
|||||||
plugin_manager.register(plugin_name)
|
plugin_manager.register(plugin_name)
|
||||||
|
|
||||||
|
|
||||||
def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefault:
|
def plugin_set_cfg(cfg: DictDefault):
|
||||||
|
if cfg.get("plugins"):
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
plugin_manager.cfg = cfg
|
||||||
|
|
||||||
|
|
||||||
|
def load_cfg(
|
||||||
|
config: str | Path | DictDefault = Path("examples/"), **kwargs
|
||||||
|
) -> DictDefault:
|
||||||
"""
|
"""
|
||||||
Loads the `axolotl` configuration stored at `config`, validates it, and performs
|
Loads the `axolotl` configuration stored at `config`, validates it, and performs
|
||||||
various setup.
|
various setup.
|
||||||
@@ -164,13 +173,24 @@ def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefa
|
|||||||
Returns:
|
Returns:
|
||||||
`DictDefault` mapping configuration keys to values.
|
`DictDefault` mapping configuration keys to values.
|
||||||
"""
|
"""
|
||||||
config = check_remote_config(config)
|
if isinstance(config, (str, Path)):
|
||||||
if Path(config).is_dir():
|
config = check_remote_config(config)
|
||||||
config = choose_config(Path(config))
|
if Path(config).is_dir():
|
||||||
|
config = choose_config(Path(config))
|
||||||
|
|
||||||
# Load the config from the yaml file
|
# Load the config from the yaml file
|
||||||
with open(config, encoding="utf-8") as file:
|
with open(config, encoding="utf-8") as file:
|
||||||
cfg: DictDefault = DictDefault(yaml.safe_load(file))
|
cfg: DictDefault = DictDefault(yaml.safe_load(file))
|
||||||
|
|
||||||
|
cfg.axolotl_config_path = config
|
||||||
|
else:
|
||||||
|
cfg = config
|
||||||
|
with NamedTemporaryFile(
|
||||||
|
mode="w", delete=False, suffix=".yml", prefix="axolotl_config_"
|
||||||
|
) as temp_file:
|
||||||
|
temp_file.write(yaml.dump(config.to_dict()))
|
||||||
|
temp_file.close()
|
||||||
|
cfg.axolotl_config_path = temp_file.name
|
||||||
|
|
||||||
# If there are any options passed in the cli, if it is something that seems valid
|
# If there are any options passed in the cli, if it is something that seems valid
|
||||||
# from the yaml, then overwrite the value
|
# from the yaml, then overwrite the value
|
||||||
@@ -184,8 +204,6 @@ def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefa
|
|||||||
else:
|
else:
|
||||||
cfg[k] = kwargs[k]
|
cfg[k] = kwargs[k]
|
||||||
|
|
||||||
cfg.axolotl_config_path = config
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
device_props = torch.cuda.get_device_properties("cuda")
|
device_props = torch.cuda.get_device_properties("cuda")
|
||||||
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
||||||
@@ -213,5 +231,6 @@ def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefa
|
|||||||
setup_wandb_env_vars(cfg)
|
setup_wandb_env_vars(cfg)
|
||||||
setup_mlflow_env_vars(cfg)
|
setup_mlflow_env_vars(cfg)
|
||||||
setup_comet_env_vars(cfg)
|
setup_comet_env_vars(cfg)
|
||||||
|
plugin_set_cfg(cfg)
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""CLI to run evaluation on a model."""
|
"""CLI to run evaluation on a model."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
@@ -14,6 +15,7 @@ from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
|||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.evaluate import evaluate
|
from axolotl.evaluate import evaluate
|
||||||
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -29,10 +31,14 @@ def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
|||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: CLI arguments.
|
cli_args: CLI arguments.
|
||||||
"""
|
"""
|
||||||
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
|
patch_optimized_env()
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
check_user_token()
|
if int(os.getenv("LOCAL_RANK", "0")) == 0:
|
||||||
|
check_user_token()
|
||||||
|
|
||||||
if cfg.rl:
|
if cfg.rl:
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|||||||
@@ -28,9 +28,8 @@ from axolotl.cli.utils import (
|
|||||||
fetch_from_github,
|
fetch_from_github,
|
||||||
filter_none_kwargs,
|
filter_none_kwargs,
|
||||||
)
|
)
|
||||||
from axolotl.cli.vllm_serve import do_vllm_serve
|
|
||||||
from axolotl.integrations.lm_eval.cli import lm_eval
|
from axolotl.integrations.lm_eval.cli import lm_eval
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
|
||||||
@@ -56,6 +55,8 @@ def preprocess(config: str, cloud: Optional[str] = None, **kwargs) -> None:
|
|||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
|
patch_optimized_env()
|
||||||
|
|
||||||
if cloud:
|
if cloud:
|
||||||
from axolotl.cli.cloud import do_cli_preprocess
|
from axolotl.cli.cloud import do_cli_preprocess
|
||||||
|
|
||||||
@@ -101,7 +102,7 @@ def train(
|
|||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
if "use_ray" in kwargs and kwargs["use_ray"]:
|
if "use_ray" in kwargs and kwargs["use_ray"]:
|
||||||
accelerate = False
|
accelerate = False
|
||||||
@@ -327,6 +328,8 @@ def fetch(directory: str, dest: Optional[str]) -> None:
|
|||||||
@add_options_from_dataclass(VllmServeCliArgs)
|
@add_options_from_dataclass(VllmServeCliArgs)
|
||||||
@filter_none_kwargs
|
@filter_none_kwargs
|
||||||
def vllm_serve(config: str, **cli_args: VllmServeCliArgs):
|
def vllm_serve(config: str, **cli_args: VllmServeCliArgs):
|
||||||
|
from axolotl.cli.vllm_serve import do_vllm_serve
|
||||||
|
|
||||||
do_vllm_serve(config, cli_args)
|
do_vllm_serve(config, cli_args)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
|||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
|
from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.trainer import disable_datasets_caching
|
from axolotl.utils.trainer import disable_datasets_caching
|
||||||
|
|
||||||
@@ -47,7 +48,10 @@ def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
|||||||
cfg.dataset_prepared_path = DEFAULT_DATASET_PREPARED_PATH
|
cfg.dataset_prepared_path = DEFAULT_DATASET_PREPARED_PATH
|
||||||
|
|
||||||
with disable_datasets_caching():
|
with disable_datasets_caching():
|
||||||
if cfg.rl:
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
if plugin_manager.load_datasets(cfg, preprocess=True):
|
||||||
|
pass
|
||||||
|
elif cfg.rl:
|
||||||
load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
else:
|
else:
|
||||||
load_datasets(cfg=cfg, cli_args=cli_args)
|
load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
"""CLI to run training on a model."""
|
"""CLI to run training on a model."""
|
||||||
|
|
||||||
|
import gc
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -17,7 +18,7 @@ from axolotl.cli.config import load_cfg
|
|||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.config import normalize_config, resolve_dtype
|
from axolotl.utils.config import normalize_config, resolve_dtype
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
@@ -35,21 +36,27 @@ def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
|||||||
cli_args: Training-specific CLI arguments.
|
cli_args: Training-specific CLI arguments.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
if int(os.getenv("LOCAL_RANK", "0")) == 0:
|
if int(os.getenv("LOCAL_RANK", "0")) == 0:
|
||||||
check_user_token()
|
check_user_token()
|
||||||
|
|
||||||
if cfg.rl:
|
plugin_manager = PluginManager.get_instance()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = plugin_manager.load_datasets(cfg, preprocess=False)
|
||||||
else:
|
if not dataset_meta:
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
if cfg.rl:
|
||||||
|
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
else:
|
||||||
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, tokenizer, trainer = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, tokenizer, trainer = train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
|
|
||||||
del model, tokenizer, trainer
|
del model, tokenizer, trainer
|
||||||
|
|
||||||
|
gc.collect()
|
||||||
|
|
||||||
plugin_manager = PluginManager.get_instance()
|
plugin_manager = PluginManager.get_instance()
|
||||||
plugin_manager.post_train_unload(cfg)
|
plugin_manager.post_train_unload(cfg)
|
||||||
|
|
||||||
|
|||||||
@@ -20,11 +20,9 @@ from transformers import (
|
|||||||
ProcessorMixin,
|
ProcessorMixin,
|
||||||
)
|
)
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ from pathlib import Path
|
|||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
from trl.scripts.vllm_serve import ScriptArguments
|
from trl.scripts.vllm_serve import ScriptArguments
|
||||||
from trl.scripts.vllm_serve import main as vllm_serve_main
|
|
||||||
|
|
||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
|
|
||||||
@@ -28,6 +27,9 @@ def do_vllm_serve(
|
|||||||
cfg = load_cfg(config)
|
cfg = load_cfg(config)
|
||||||
model = cfg.base_model
|
model = cfg.base_model
|
||||||
|
|
||||||
|
serve_module = cli_args.get("serve_module", "trl.scripts.vllm_serve")
|
||||||
|
vllm_serve_main = getattr(__import__(serve_module, fromlist=["main"]), "main")
|
||||||
|
|
||||||
tensor_parallel_size = (
|
tensor_parallel_size = (
|
||||||
cli_args.get("tensor_parallel_size") or cfg.vllm.tensor_parallel_size
|
cli_args.get("tensor_parallel_size") or cfg.vllm.tensor_parallel_size
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,5 +11,6 @@ MOE_ARCH_BLOCK = {
|
|||||||
],
|
],
|
||||||
"mixtral": "MixtralSparseMoeBlock",
|
"mixtral": "MixtralSparseMoeBlock",
|
||||||
"qwen2_moe": "Qwen2MoeSparseMoeBlock",
|
"qwen2_moe": "Qwen2MoeSparseMoeBlock",
|
||||||
|
"qwen3_moe": "Qwen3MoeSparseMoeBlock",
|
||||||
"deepseek_v2": "DeepseekV2MoE",
|
"deepseek_v2": "DeepseekV2MoE",
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ from axolotl.utils.data import prepare_dataset
|
|||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_processor, load_tokenizer
|
from axolotl.utils.models import load_processor, load_tokenizer
|
||||||
|
from axolotl.utils.schemas.enums import RLType
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -47,7 +48,8 @@ def sample_dataset(dataset: Dataset, num_samples: int) -> Dataset:
|
|||||||
def load_datasets(
|
def load_datasets(
|
||||||
*,
|
*,
|
||||||
cfg: DictDefault,
|
cfg: DictDefault,
|
||||||
cli_args: Union[PreprocessCliArgs, TrainerCliArgs],
|
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||||
|
debug: bool = False,
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""
|
||||||
Loads one or more training or evaluation datasets, calling
|
Loads one or more training or evaluation datasets, calling
|
||||||
@@ -56,6 +58,7 @@ def load_datasets(
|
|||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: Command-specific CLI arguments.
|
cli_args: Command-specific CLI arguments.
|
||||||
|
debug: Whether to print out tokenization of sample
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
Dataclass with fields for training and evaluation datasets and the computed
|
||||||
@@ -64,7 +67,8 @@ def load_datasets(
|
|||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
||||||
preprocess_iterable = (
|
preprocess_iterable = (
|
||||||
hasattr(cli_args, "iterable")
|
cli_args
|
||||||
|
and hasattr(cli_args, "iterable")
|
||||||
and cli_args.iterable is not None
|
and cli_args.iterable is not None
|
||||||
and cli_args.iterable
|
and cli_args.iterable
|
||||||
)
|
)
|
||||||
@@ -76,20 +80,25 @@ def load_datasets(
|
|||||||
preprocess_iterable=preprocess_iterable,
|
preprocess_iterable=preprocess_iterable,
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if ( # pylint: disable=too-many-boolean-expressions
|
||||||
cli_args.debug
|
cli_args
|
||||||
or cfg.debug
|
and (
|
||||||
or cli_args.debug_text_only
|
cli_args.debug
|
||||||
or int(cli_args.debug_num_examples) > 0
|
or cfg.debug
|
||||||
):
|
or cli_args.debug_text_only
|
||||||
|
or int(cli_args.debug_num_examples) > 0
|
||||||
|
)
|
||||||
|
) or debug:
|
||||||
LOG.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
num_examples = cli_args.debug_num_examples if cli_args else 1
|
||||||
|
text_only = cli_args.debug_text_only if cli_args else False
|
||||||
|
train_samples = sample_dataset(train_dataset, num_examples)
|
||||||
check_dataset_labels(
|
check_dataset_labels(
|
||||||
train_samples,
|
train_samples,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
num_examples=cli_args.debug_num_examples,
|
num_examples=num_examples,
|
||||||
text_only=cli_args.debug_text_only,
|
text_only=text_only,
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.info("printing prompters...")
|
LOG.info("printing prompters...")
|
||||||
@@ -125,7 +134,7 @@ def load_preference_datasets(
|
|||||||
total_num_steps: Optional[int] = int(
|
total_num_steps: Optional[int] = int(
|
||||||
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
||||||
)
|
)
|
||||||
if cfg.rl == "grpo":
|
if cfg.rl is RLType.GRPO:
|
||||||
total_num_steps = None
|
total_num_steps = None
|
||||||
|
|
||||||
if cli_args.debug or cfg.debug:
|
if cli_args.debug or cfg.debug:
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import importlib.util
|
|||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -60,6 +61,7 @@ from axolotl.core.training_args import (
|
|||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
||||||
from axolotl.monkeypatch.relora import ReLoRACallback
|
from axolotl.monkeypatch.relora import ReLoRACallback
|
||||||
|
from axolotl.monkeypatch.trainer.lr import patch_trainer_get_lr
|
||||||
from axolotl.processing_strategies import get_processing_strategy
|
from axolotl.processing_strategies import get_processing_strategy
|
||||||
from axolotl.utils import is_comet_available, is_mlflow_available
|
from axolotl.utils import is_comet_available, is_mlflow_available
|
||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
@@ -71,6 +73,7 @@ from axolotl.utils.callbacks import (
|
|||||||
SaveBetterTransformerModelCallback,
|
SaveBetterTransformerModelCallback,
|
||||||
bench_eval_callback_factory,
|
bench_eval_callback_factory,
|
||||||
causal_lm_bench_eval_callback_factory,
|
causal_lm_bench_eval_callback_factory,
|
||||||
|
colab_inference_post_train_callback,
|
||||||
log_prediction_callback_factory,
|
log_prediction_callback_factory,
|
||||||
)
|
)
|
||||||
from axolotl.utils.callbacks.lisa import lisa_callback_factory
|
from axolotl.utils.callbacks.lisa import lisa_callback_factory
|
||||||
@@ -84,7 +87,7 @@ from axolotl.utils.collators import (
|
|||||||
)
|
)
|
||||||
from axolotl.utils.collators.mm_chat import MultiModalChatDataCollator
|
from axolotl.utils.collators.mm_chat import MultiModalChatDataCollator
|
||||||
from axolotl.utils.models import ensure_dtype
|
from axolotl.utils.models import ensure_dtype
|
||||||
from axolotl.utils.schemas.enums import CustomSupportedOptimizers
|
from axolotl.utils.schemas.enums import CustomSupportedOptimizers, RLType
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torch._dynamo # pylint: disable=ungrouped-imports
|
import torch._dynamo # pylint: disable=ungrouped-imports
|
||||||
@@ -114,6 +117,8 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
if hasattr(model, "add_model_tags"):
|
if hasattr(model, "add_model_tags"):
|
||||||
model.add_model_tags(["axolotl"])
|
model.add_model_tags(["axolotl"])
|
||||||
|
|
||||||
|
patch_trainer_get_lr()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_ref(self):
|
def model_ref(self):
|
||||||
return self._model_ref
|
return self._model_ref
|
||||||
@@ -165,6 +170,9 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.cfg.gc_steps:
|
||||||
|
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
||||||
|
|
||||||
if self.cfg.use_wandb:
|
if self.cfg.use_wandb:
|
||||||
callbacks.append(
|
callbacks.append(
|
||||||
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||||
@@ -246,9 +254,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.loss_watchdog_threshold is not None:
|
if self.cfg.loss_watchdog_threshold is not None:
|
||||||
callbacks.append(LossWatchDogCallback(self.cfg))
|
callbacks.append(LossWatchDogCallback(self.cfg))
|
||||||
|
|
||||||
if self.cfg.gc_steps:
|
|
||||||
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
|
||||||
|
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
def get_post_trainer_create_callbacks(self, trainer):
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
@@ -290,6 +295,10 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
if self.cfg.lisa_step_interval and self.cfg.lisa_n_layers:
|
||||||
callbacks.append(lisa_callback_factory(trainer))
|
callbacks.append(lisa_callback_factory(trainer))
|
||||||
|
|
||||||
|
if any("COLAB_" in key for key in os.environ):
|
||||||
|
ColabCallback = colab_inference_post_train_callback(trainer)
|
||||||
|
callbacks.append(ColabCallback(self.cfg))
|
||||||
|
|
||||||
callbacks.extend(super().get_post_trainer_create_callbacks(trainer=trainer))
|
callbacks.extend(super().get_post_trainer_create_callbacks(trainer=trainer))
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
@@ -344,7 +353,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
||||||
training_arguments_kwargs["logging_steps"] = logging_steps
|
training_arguments_kwargs["logging_steps"] = logging_steps
|
||||||
|
|
||||||
if self.cfg.seed:
|
if self.cfg.seed is not None:
|
||||||
training_arguments_kwargs["seed"] = self.cfg.seed
|
training_arguments_kwargs["seed"] = self.cfg.seed
|
||||||
|
|
||||||
if self.cfg.gradient_checkpointing:
|
if self.cfg.gradient_checkpointing:
|
||||||
@@ -378,8 +387,12 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["adam_beta1"] = self.cfg.adam_beta1
|
training_arguments_kwargs["adam_beta1"] = self.cfg.adam_beta1
|
||||||
if self.cfg.adam_beta2:
|
if self.cfg.adam_beta2:
|
||||||
training_arguments_kwargs["adam_beta2"] = self.cfg.adam_beta2
|
training_arguments_kwargs["adam_beta2"] = self.cfg.adam_beta2
|
||||||
|
if self.cfg.adam_beta3:
|
||||||
|
training_arguments_kwargs["adam_beta3"] = self.cfg.adam_beta3
|
||||||
if self.cfg.adam_epsilon:
|
if self.cfg.adam_epsilon:
|
||||||
training_arguments_kwargs["adam_epsilon"] = self.cfg.adam_epsilon
|
training_arguments_kwargs["adam_epsilon"] = self.cfg.adam_epsilon
|
||||||
|
if self.cfg.adam_epsilon2:
|
||||||
|
training_arguments_kwargs["adam_epsilon2"] = self.cfg.adam_epsilon2
|
||||||
if self.cfg.max_grad_norm:
|
if self.cfg.max_grad_norm:
|
||||||
training_arguments_kwargs["max_grad_norm"] = self.cfg.max_grad_norm
|
training_arguments_kwargs["max_grad_norm"] = self.cfg.max_grad_norm
|
||||||
|
|
||||||
@@ -485,7 +498,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
|
|
||||||
# these are all the "standard" kwargs that are def used
|
# these are all the "standard" kwargs that are def used
|
||||||
training_arguments_kwargs["max_steps"] = (
|
training_arguments_kwargs["max_steps"] = (
|
||||||
total_num_steps if self.cfg.max_steps else -1
|
self.cfg.max_steps if self.cfg.max_steps else -1
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["max_seq_length"] = self.cfg.sequence_len
|
training_arguments_kwargs["max_seq_length"] = self.cfg.sequence_len
|
||||||
training_arguments_kwargs["per_device_train_batch_size"] = (
|
training_arguments_kwargs["per_device_train_batch_size"] = (
|
||||||
@@ -538,8 +551,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
report_to = []
|
report_to = []
|
||||||
if self.cfg.use_wandb:
|
if self.cfg.use_wandb:
|
||||||
report_to.append("wandb")
|
report_to.append("wandb")
|
||||||
if self.cfg.wandb_name:
|
|
||||||
training_arguments_kwargs["run_name"] = self.cfg.wandb_name
|
|
||||||
if self.cfg.use_mlflow:
|
if self.cfg.use_mlflow:
|
||||||
report_to.append("mlflow")
|
report_to.append("mlflow")
|
||||||
if self.cfg.use_tensorboard:
|
if self.cfg.use_tensorboard:
|
||||||
@@ -699,6 +710,20 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
optimizer_cls = ADOPT
|
optimizer_cls = ADOPT
|
||||||
adam_kwargs["decouple"] = True
|
adam_kwargs["decouple"] = True
|
||||||
optimizer_kwargs.update(adam_kwargs)
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
elif self.cfg.optimizer == "came_pytorch":
|
||||||
|
from came_pytorch import CAME
|
||||||
|
|
||||||
|
optimizer_cls = CAME
|
||||||
|
|
||||||
|
beta1 = training_arguments_kwargs.get("adam_beta1", 0.9)
|
||||||
|
beta2 = training_arguments_kwargs.get("adam_beta2", 0.999)
|
||||||
|
beta3 = training_arguments_kwargs.get("adam_beta3", 0.9999)
|
||||||
|
eps1 = training_arguments_kwargs.get("adam_epsilon", 1e-30)
|
||||||
|
eps2 = training_arguments_kwargs.get("adam_epsilon2", 1e-16)
|
||||||
|
adam_kwargs["betas"] = (beta1, beta2, beta3)
|
||||||
|
adam_kwargs["eps"] = (eps1, eps2)
|
||||||
|
|
||||||
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
|
|
||||||
# Parse any additional optimizer args from config
|
# Parse any additional optimizer args from config
|
||||||
if self.cfg.optim_args:
|
if self.cfg.optim_args:
|
||||||
@@ -798,14 +823,15 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
data_collator_kwargs = {
|
data_collator_kwargs = {
|
||||||
"padding": True, # True/"longest" is the default
|
"padding": True, # True/"longest" is the default
|
||||||
}
|
}
|
||||||
|
multiple = 64
|
||||||
if self.cfg.pad_to_sequence_len:
|
if self.cfg.pad_to_sequence_len:
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = 64 * math.ceil(
|
data_collator_kwargs["pad_to_multiple_of"] = multiple * math.ceil(
|
||||||
self.cfg.sequence_len / 64
|
self.cfg.sequence_len / multiple
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
|
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
|
||||||
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = 64
|
data_collator_kwargs["pad_to_multiple_of"] = multiple
|
||||||
|
|
||||||
if self.cfg.reward_model:
|
if self.cfg.reward_model:
|
||||||
data_collator_kwargs["max_length"] = self.cfg.sequence_len
|
data_collator_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
@@ -932,9 +958,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
collator = DataCollatorForSeq2Seq
|
collator = DataCollatorForSeq2Seq
|
||||||
|
|
||||||
kwargs["return_tensors"] = "pt"
|
kwargs["return_tensors"] = "pt"
|
||||||
if issubclass(collator, DataCollatorForSeq2Seq):
|
|
||||||
kwargs["sequence_parallel_degree"] = training_args.sequence_parallel_degree
|
|
||||||
kwargs["ring_attn_func"] = training_args.ring_attn_func
|
|
||||||
|
|
||||||
return collator(
|
return collator(
|
||||||
*collator_args,
|
*collator_args,
|
||||||
@@ -1014,6 +1037,10 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_args_kwargs["dataloader_prefetch_factor"] = (
|
training_args_kwargs["dataloader_prefetch_factor"] = (
|
||||||
self.cfg.dataloader_prefetch_factor
|
self.cfg.dataloader_prefetch_factor
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.cfg.seed is not None:
|
||||||
|
training_args_kwargs["seed"] = self.cfg.seed
|
||||||
|
|
||||||
if self.cfg.gradient_checkpointing:
|
if self.cfg.gradient_checkpointing:
|
||||||
training_args_kwargs["gradient_checkpointing"] = (
|
training_args_kwargs["gradient_checkpointing"] = (
|
||||||
self.cfg.gradient_checkpointing
|
self.cfg.gradient_checkpointing
|
||||||
@@ -1037,6 +1064,8 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
# default to saving each epoch if not defined
|
# default to saving each epoch if not defined
|
||||||
training_args_kwargs["save_strategy"] = "epoch"
|
training_args_kwargs["save_strategy"] = "epoch"
|
||||||
|
|
||||||
|
training_args_kwargs["save_only_model"] = self.cfg.save_only_model
|
||||||
|
|
||||||
if self.cfg.dataset_processes:
|
if self.cfg.dataset_processes:
|
||||||
training_args_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
|
training_args_kwargs["dataset_num_proc"] = self.cfg.dataset_processes
|
||||||
|
|
||||||
@@ -1051,9 +1080,16 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.rpo_alpha is not None:
|
if self.cfg.rpo_alpha is not None:
|
||||||
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
training_args_kwargs["rpo_alpha"] = self.cfg.rpo_alpha
|
||||||
|
|
||||||
|
if self.cfg.use_wandb:
|
||||||
|
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||||
|
|
||||||
|
training_args_kwargs["sequence_parallel_degree"] = (
|
||||||
|
self.cfg.sequence_parallel_degree
|
||||||
|
)
|
||||||
|
|
||||||
training_args_cls = None
|
training_args_cls = None
|
||||||
blocklist_args_kwargs = []
|
blocklist_args_kwargs = []
|
||||||
if self.cfg.rl == "simpo":
|
if self.cfg.rl is RLType.SIMPO:
|
||||||
training_args_cls = AxolotlCPOConfig
|
training_args_cls = AxolotlCPOConfig
|
||||||
training_args_kwargs["loss_type"] = "simpo"
|
training_args_kwargs["loss_type"] = "simpo"
|
||||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
@@ -1061,13 +1097,13 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.cpo_alpha is not None:
|
if self.cfg.cpo_alpha is not None:
|
||||||
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
||||||
|
|
||||||
elif self.cfg.rl == "orpo":
|
elif self.cfg.rl is RLType.ORPO:
|
||||||
training_args_cls = AxolotlORPOConfig
|
training_args_cls = AxolotlORPOConfig
|
||||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
if self.cfg.max_prompt_len:
|
if self.cfg.max_prompt_len:
|
||||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||||
|
|
||||||
elif self.cfg.rl == "kto":
|
elif self.cfg.rl is RLType.KTO:
|
||||||
training_args_cls = AxolotlKTOConfig
|
training_args_cls = AxolotlKTOConfig
|
||||||
|
|
||||||
training_args_kwargs["desirable_weight"] = (
|
training_args_kwargs["desirable_weight"] = (
|
||||||
@@ -1081,14 +1117,14 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.max_prompt_len:
|
if self.cfg.max_prompt_len:
|
||||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||||
|
|
||||||
elif self.cfg.rl == "grpo":
|
elif self.cfg.rl is RLType.GRPO:
|
||||||
training_args_cls = GRPOStrategy.get_training_args_class()
|
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||||
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||||
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
training_args_cls = AxolotlDPOConfig
|
training_args_cls = AxolotlDPOConfig
|
||||||
if self.cfg.rl == "ipo":
|
if self.cfg.rl is RLType.IPO:
|
||||||
training_args_kwargs["loss_type"] = "ipo"
|
training_args_kwargs["loss_type"] = "ipo"
|
||||||
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
training_args_kwargs["max_length"] = self.cfg.sequence_len
|
||||||
training_args_kwargs["max_completion_length"] = None
|
training_args_kwargs["max_completion_length"] = None
|
||||||
@@ -1121,71 +1157,84 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
**training_args_kwargs,
|
**training_args_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# unset run_name so wandb sets up experiment names
|
||||||
|
if self.cfg.use_wandb and training_args.run_name == training_args.output_dir:
|
||||||
|
training_args.run_name = ( # pylint: disable=attribute-defined-outside-init
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
return training_args
|
return training_args
|
||||||
|
|
||||||
def build(self, total_num_steps):
|
def build(self, total_num_steps):
|
||||||
training_args = self.build_training_arguments(total_num_steps)
|
training_args = self.build_training_arguments(total_num_steps)
|
||||||
dpo_trainer_kwargs = {}
|
trainer_kwargs = {}
|
||||||
if self.cfg.rl == "ipo":
|
if self.cfg.rl is RLType.IPO:
|
||||||
if self.cfg.dpo_label_smoothing:
|
if self.cfg.dpo_label_smoothing:
|
||||||
dpo_trainer_kwargs["label_smoothing"] = self.cfg.dpo_label_smoothing
|
trainer_kwargs["label_smoothing"] = self.cfg.dpo_label_smoothing
|
||||||
if self.eval_dataset:
|
if self.eval_dataset:
|
||||||
dpo_trainer_kwargs["eval_dataset"] = self.eval_dataset
|
trainer_kwargs["eval_dataset"] = self.eval_dataset
|
||||||
if self.cfg.adapter and self.peft_config:
|
if self.cfg.adapter and self.peft_config:
|
||||||
dpo_trainer_kwargs["peft_config"] = self.peft_config
|
if self.cfg.rl is not RLType.GRPO:
|
||||||
|
trainer_kwargs["peft_config"] = self.peft_config
|
||||||
if self.cfg.precompute_ref_log_probs is not None:
|
if self.cfg.precompute_ref_log_probs is not None:
|
||||||
dpo_trainer_kwargs["precompute_ref_log_probs"] = (
|
trainer_kwargs["precompute_ref_log_probs"] = (
|
||||||
self.cfg.precompute_ref_log_probs
|
self.cfg.precompute_ref_log_probs
|
||||||
)
|
)
|
||||||
if self.cfg.rl == "grpo":
|
if self.cfg.rl is RLType.GRPO:
|
||||||
trainer_cls = GRPOStrategy.get_trainer_class()
|
trainer_cls = GRPOStrategy.get_trainer_class(
|
||||||
|
sequence_parallel=self.cfg.sequence_parallel_degree > 1
|
||||||
|
)
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
||||||
dpo_trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
||||||
elif self.cfg.rl in ["dpo", "ipo"]:
|
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
||||||
trainer_cls = DPOStrategy.get_trainer_class()
|
trainer_cls = DPOStrategy.get_trainer_class()
|
||||||
trainer_cls_args = [self.model, self.model_ref]
|
trainer_cls_args = [self.model, self.model_ref]
|
||||||
elif self.cfg.rl == "orpo":
|
elif self.cfg.rl is RLType.ORPO:
|
||||||
trainer_cls = AxolotlORPOTrainer
|
trainer_cls = AxolotlORPOTrainer
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
elif self.cfg.rl in ["kto"]:
|
elif self.cfg.rl is RLType.KTO:
|
||||||
trainer_cls = AxolotlKTOTrainer
|
trainer_cls = AxolotlKTOTrainer
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
elif self.cfg.rl in ["simpo"]:
|
elif self.cfg.rl is RLType.SIMPO:
|
||||||
trainer_cls = AxolotlCPOTrainer
|
trainer_cls = AxolotlCPOTrainer
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported RL: {self.cfg.rl}")
|
raise ValueError(f"Unsupported RL: {self.cfg.rl}")
|
||||||
|
|
||||||
|
if self.cfg.plugins:
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
trainer_cls = plugin_manager.get_trainer_cls(self.cfg)
|
||||||
|
|
||||||
sig = inspect.signature(trainer_cls)
|
sig = inspect.signature(trainer_cls)
|
||||||
if "tokenizer" in sig.parameters.keys():
|
if "tokenizer" in sig.parameters.keys():
|
||||||
dpo_trainer_kwargs["tokenizer"] = self.tokenizer
|
trainer_kwargs["tokenizer"] = self.tokenizer
|
||||||
else:
|
else:
|
||||||
dpo_trainer_kwargs["processing_class"] = self.tokenizer
|
trainer_kwargs["processing_class"] = self.tokenizer
|
||||||
|
|
||||||
if self.cfg.datasets is not None and (
|
if self.cfg.datasets is not None and (
|
||||||
trainer_cls is DPOStrategy.get_trainer_class()
|
trainer_cls is DPOStrategy.get_trainer_class()
|
||||||
):
|
):
|
||||||
dpo_trainer_kwargs["dataset_tags"] = [
|
trainer_kwargs["dataset_tags"] = [
|
||||||
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
d["path"] for d in self.cfg.datasets if not Path(d["path"]).is_dir()
|
||||||
]
|
]
|
||||||
dpo_trainer = trainer_cls(
|
trainer = trainer_cls(
|
||||||
*trainer_cls_args,
|
*trainer_cls_args,
|
||||||
args=training_args,
|
args=training_args,
|
||||||
train_dataset=self.train_dataset,
|
train_dataset=self.train_dataset,
|
||||||
callbacks=self.get_callbacks(),
|
callbacks=self.get_callbacks(),
|
||||||
**dpo_trainer_kwargs,
|
**trainer_kwargs,
|
||||||
)
|
)
|
||||||
if self.cfg.fsdp:
|
if self.cfg.fsdp:
|
||||||
ensure_dtype(dpo_trainer.model, dtype=self.cfg.torch_dtype)
|
ensure_dtype(trainer.model, dtype=self.cfg.torch_dtype)
|
||||||
if self.cfg.rl in ["dpo", "ipo"] and dpo_trainer.ref_model:
|
if self.cfg.rl in [RLType.DPO, RLType.IPO] and trainer.ref_model:
|
||||||
ensure_dtype(dpo_trainer.ref_model, dtype=self.cfg.torch_dtype)
|
ensure_dtype(trainer.ref_model, dtype=self.cfg.torch_dtype)
|
||||||
|
|
||||||
dpo_trainer = self.hook_post_create_trainer(dpo_trainer)
|
trainer = self.hook_post_create_trainer(trainer)
|
||||||
for callback in self.get_post_trainer_create_callbacks(dpo_trainer):
|
for callback in self.get_post_trainer_create_callbacks(trainer):
|
||||||
dpo_trainer.add_callback(callback)
|
trainer.add_callback(callback)
|
||||||
|
|
||||||
return dpo_trainer
|
return trainer
|
||||||
|
|
||||||
|
|
||||||
class HFPPOTrainerBuilder(TrainerBuilderBase):
|
class HFPPOTrainerBuilder(TrainerBuilderBase):
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
from .base import AxolotlTrainer
|
from .base import AxolotlTrainer
|
||||||
from .dpo.trainer import AxolotlDPOTrainer
|
from .dpo.trainer import AxolotlDPOTrainer
|
||||||
from .grpo.trainer import AxolotlGRPOTrainer
|
from .grpo.trainer import AxolotlGRPOSequenceParallelTrainer, AxolotlGRPOTrainer
|
||||||
from .mamba import AxolotlMambaTrainer
|
from .mamba import AxolotlMambaTrainer
|
||||||
from .relora import ReLoRATrainer
|
from .relora import ReLoRATrainer
|
||||||
from .trl import (
|
from .trl import (
|
||||||
|
|||||||
@@ -114,6 +114,8 @@ class AxolotlTrainer(
|
|||||||
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
||||||
batch_max_len=batch_max_len,
|
batch_max_len=batch_max_len,
|
||||||
batch_size=batch_size,
|
batch_size=batch_size,
|
||||||
|
group_size=self.args.sample_packing_group_size,
|
||||||
|
bin_size=self.args.sample_packing_bin_size,
|
||||||
sequential=self.args.sample_packing_sequentially,
|
sequential=self.args.sample_packing_sequentially,
|
||||||
drop_last=True,
|
drop_last=True,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,14 +1,11 @@
|
|||||||
"""
|
"""DPO Specific Strategy for training"""
|
||||||
DPO Specific Strategy for training
|
|
||||||
"""
|
|
||||||
|
|
||||||
from axolotl.core.trainers.dpo.trainer import AxolotlDPOTrainer
|
from axolotl.core.trainers.dpo.trainer import AxolotlDPOTrainer
|
||||||
|
from axolotl.utils.schemas.enums import RLType
|
||||||
|
|
||||||
|
|
||||||
class DPOStrategy:
|
class DPOStrategy:
|
||||||
"""
|
"""Strategy for DPO training"""
|
||||||
Strategy for DPO training
|
|
||||||
"""
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_trainer_class(cls):
|
def get_trainer_class(cls):
|
||||||
@@ -23,7 +20,7 @@ class DPOStrategy:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def set_training_args_kwargs(cls, cfg):
|
def set_training_args_kwargs(cls, cfg):
|
||||||
training_args_kwargs = {}
|
training_args_kwargs = {}
|
||||||
if cfg.rl == "ipo":
|
if cfg.rl is RLType.IPO:
|
||||||
training_args_kwargs["loss_type"] = "ipo"
|
training_args_kwargs["loss_type"] = "ipo"
|
||||||
training_args_kwargs["max_length"] = cfg.sequence_len
|
training_args_kwargs["max_length"] = cfg.sequence_len
|
||||||
training_args_kwargs["max_completion_length"] = None
|
training_args_kwargs["max_completion_length"] = None
|
||||||
|
|||||||
@@ -3,15 +3,29 @@ DPO trainer for axolotl
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
|
import random
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from typing import Any, Dict, Union
|
from typing import Any, Dict, Optional, Union
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
import torch
|
import torch
|
||||||
|
import wandb
|
||||||
|
from accelerate import PartialState
|
||||||
|
from datasets import Dataset, IterableDataset
|
||||||
from peft.optimizers import create_loraplus_optimizer
|
from peft.optimizers import create_loraplus_optimizer
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers import Trainer
|
from torch.utils.data import DataLoader
|
||||||
|
from transformers import (
|
||||||
|
BaseImageProcessor,
|
||||||
|
FeatureExtractionMixin,
|
||||||
|
PreTrainedTokenizerBase,
|
||||||
|
ProcessorMixin,
|
||||||
|
Trainer,
|
||||||
|
)
|
||||||
|
from transformers.trainer_utils import EvalLoopOutput
|
||||||
from transformers.utils import is_sagemaker_mp_enabled
|
from transformers.utils import is_sagemaker_mp_enabled
|
||||||
from trl import DPOTrainer
|
from trl import DPOConfig, DPOTrainer, maybe_apply_chat_template, maybe_extract_prompt
|
||||||
|
from trl.trainer.utils import log_table_to_comet_experiment
|
||||||
|
|
||||||
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
||||||
from axolotl.core.trainers.utils import (
|
from axolotl.core.trainers.utils import (
|
||||||
@@ -81,6 +95,64 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
|
|
||||||
return super().push_to_hub(*args, **kwargs)
|
return super().push_to_hub(*args, **kwargs)
|
||||||
|
|
||||||
|
# TODO: remove this once https://github.com/huggingface/trl/pull/3377 is in a release
|
||||||
|
def _prepare_dataset(
|
||||||
|
self,
|
||||||
|
dataset: Union[Dataset, IterableDataset],
|
||||||
|
processing_class: Union[
|
||||||
|
PreTrainedTokenizerBase,
|
||||||
|
BaseImageProcessor,
|
||||||
|
FeatureExtractionMixin,
|
||||||
|
ProcessorMixin,
|
||||||
|
],
|
||||||
|
args: DPOConfig,
|
||||||
|
dataset_name: str,
|
||||||
|
) -> Union[Dataset, IterableDataset]:
|
||||||
|
# Build the kwargs for the `map` function
|
||||||
|
map_kwargs: Dict[str, Any] = {"writer_batch_size": 10}
|
||||||
|
if isinstance(dataset, Dataset): # IterableDataset does not support num_proc
|
||||||
|
map_kwargs["num_proc"] = args.dataset_num_proc
|
||||||
|
|
||||||
|
with PartialState().main_process_first():
|
||||||
|
# Extract prompt if needed
|
||||||
|
if isinstance(
|
||||||
|
dataset, Dataset
|
||||||
|
): # `IterableDataset.map` does not support `desc`
|
||||||
|
map_kwargs["desc"] = f"Extracting prompt in {dataset_name} dataset"
|
||||||
|
dataset = dataset.map(maybe_extract_prompt, **map_kwargs)
|
||||||
|
|
||||||
|
# Apply the chat template if needed
|
||||||
|
if isinstance(
|
||||||
|
dataset, Dataset
|
||||||
|
): # `IterableDataset.map` does not support `desc`
|
||||||
|
map_kwargs["desc"] = f"Applying chat template to {dataset_name} dataset"
|
||||||
|
dataset = dataset.map(
|
||||||
|
maybe_apply_chat_template,
|
||||||
|
fn_kwargs={"tokenizer": processing_class, "tools": args.tools},
|
||||||
|
**map_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Tokenize the dataset
|
||||||
|
if isinstance(
|
||||||
|
dataset, Dataset
|
||||||
|
): # `IterableDataset.map` does not support `desc`
|
||||||
|
map_kwargs["desc"] = f"Tokenizing {dataset_name} dataset"
|
||||||
|
|
||||||
|
dataset = dataset.map(
|
||||||
|
self.tokenize_row if not self.is_vision_model else self.process_row,
|
||||||
|
remove_columns=["chosen", "rejected"],
|
||||||
|
fn_kwargs={
|
||||||
|
"processing_class": processing_class,
|
||||||
|
"max_prompt_length": args.max_prompt_length,
|
||||||
|
"max_completion_length": args.max_completion_length,
|
||||||
|
# for enc-dec, we add the special tokens ([bos_token] + prompt + [eos_token]; completion + [eos_token])
|
||||||
|
"add_special_tokens": False,
|
||||||
|
},
|
||||||
|
**map_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
return dataset
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def tokenize_row(
|
def tokenize_row(
|
||||||
features,
|
features,
|
||||||
@@ -105,12 +177,8 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
# dpo trainer may incorrectly prepend the bos_token_id to the dpo outputs
|
# dpo trainer may incorrectly prepend the bos_token_id to the dpo outputs
|
||||||
if res["chosen_input_ids"][0] == processing_class.bos_token_id:
|
if res["chosen_input_ids"][0] == processing_class.bos_token_id:
|
||||||
res["chosen_input_ids"] = res["chosen_input_ids"][1:]
|
res["chosen_input_ids"] = res["chosen_input_ids"][1:]
|
||||||
res["chosen_labels"] = res["chosen_labels"][1:]
|
|
||||||
res["chosen_attention_mask"] = res["chosen_attention_mask"][1:]
|
|
||||||
if res["rejected_input_ids"][0] == processing_class.bos_token_id:
|
if res["rejected_input_ids"][0] == processing_class.bos_token_id:
|
||||||
res["rejected_input_ids"] = res["rejected_input_ids"][1:]
|
res["rejected_input_ids"] = res["rejected_input_ids"][1:]
|
||||||
res["rejected_labels"] = res["rejected_labels"][1:]
|
|
||||||
res["rejected_attention_mask"] = res["rejected_attention_mask"][1:]
|
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@@ -124,3 +192,69 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
return loss
|
return loss
|
||||||
|
|
||||||
|
# TODO: remove this once https://github.com/huggingface/trl/pull/3377 is in a release
|
||||||
|
def evaluation_loop(
|
||||||
|
self,
|
||||||
|
dataloader: DataLoader,
|
||||||
|
description: str,
|
||||||
|
prediction_loss_only: Optional[bool] = None,
|
||||||
|
ignore_keys: Optional[list[str]] = None,
|
||||||
|
metric_key_prefix: str = "eval",
|
||||||
|
) -> EvalLoopOutput:
|
||||||
|
"""
|
||||||
|
Overriding built-in evaluation loop to store metrics for each batch.
|
||||||
|
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
||||||
|
|
||||||
|
Works both with or without labels.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Sample and save to game log if requested (for one batch to save time)
|
||||||
|
if self.generate_during_eval:
|
||||||
|
# Generate random indices within the range of the total number of samples
|
||||||
|
num_samples = len(dataloader.dataset)
|
||||||
|
random_indices = random.sample(
|
||||||
|
range(num_samples), k=self.args.eval_batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
||||||
|
random_batch_dataset = dataloader.dataset.select(random_indices)
|
||||||
|
random_batch = self.data_collator(random_batch_dataset)
|
||||||
|
random_batch = self._prepare_inputs(random_batch)
|
||||||
|
|
||||||
|
policy_output_decoded, ref_output_decoded = (
|
||||||
|
self.generate_from_model_and_ref(self.model, random_batch)
|
||||||
|
)
|
||||||
|
|
||||||
|
table = pd.DataFrame(
|
||||||
|
columns=["Prompt", "Policy", "Ref Model"],
|
||||||
|
data=[
|
||||||
|
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
|
||||||
|
for prompt, pol, ref in zip(
|
||||||
|
random_batch_dataset["prompt"],
|
||||||
|
policy_output_decoded,
|
||||||
|
ref_output_decoded,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
if "wandb" in self.args.report_to and self.accelerator.is_main_process:
|
||||||
|
wandb.log({"game_log": wandb.Table(data=table)})
|
||||||
|
|
||||||
|
if "comet_ml" in self.args.report_to:
|
||||||
|
log_table_to_comet_experiment(
|
||||||
|
name="game_log.csv",
|
||||||
|
table=table,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Base evaluation
|
||||||
|
initial_output = super( # pylint: disable=bad-super-call
|
||||||
|
DPOTrainer, self
|
||||||
|
).evaluation_loop(
|
||||||
|
dataloader,
|
||||||
|
description,
|
||||||
|
prediction_loss_only,
|
||||||
|
ignore_keys,
|
||||||
|
metric_key_prefix,
|
||||||
|
)
|
||||||
|
|
||||||
|
return initial_output
|
||||||
|
|||||||
@@ -1,37 +1,41 @@
|
|||||||
"""
|
"""GRPO Specific Strategy for training"""
|
||||||
GRPO Specific Strategy for training
|
|
||||||
"""
|
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from trl.trainer.grpo_trainer import RewardFunc
|
from trl.trainer.grpo_trainer import RewardFunc
|
||||||
|
|
||||||
from axolotl.core.trainers.grpo.trainer import AxolotlGRPOTrainer
|
from axolotl.core.trainers.grpo.args import AxolotlGRPOConfig
|
||||||
|
from axolotl.core.trainers.grpo.trainer import (
|
||||||
|
AxolotlGRPOSequenceParallelTrainer,
|
||||||
|
AxolotlGRPOTrainer,
|
||||||
|
)
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.schemas.trl import TRLConfig
|
from axolotl.utils.schemas.trl import TRLConfig
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl")
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class GRPOStrategy:
|
class GRPOStrategy:
|
||||||
"""
|
"""Strategy for GRPO training"""
|
||||||
Strategy for GRPO training
|
|
||||||
"""
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_trainer_class(cls):
|
def get_trainer_class(
|
||||||
|
cls, sequence_parallel: bool
|
||||||
|
) -> type[AxolotlGRPOTrainer] | type[AxolotlGRPOSequenceParallelTrainer]:
|
||||||
|
if sequence_parallel:
|
||||||
|
return AxolotlGRPOSequenceParallelTrainer
|
||||||
return AxolotlGRPOTrainer
|
return AxolotlGRPOTrainer
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_training_args_class(cls):
|
def get_training_args_class(cls) -> type[AxolotlGRPOConfig]:
|
||||||
from axolotl.core.trainers.grpo.args import AxolotlGRPOConfig
|
|
||||||
|
|
||||||
return AxolotlGRPOConfig
|
return AxolotlGRPOConfig
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_training_args_kwargs(cls, cfg):
|
def set_training_args_kwargs(cls, cfg: DictDefault) -> dict[str, Any]:
|
||||||
grpo_args_kwargs = {}
|
grpo_args_kwargs: dict[str, Any] = {}
|
||||||
|
|
||||||
if not hasattr(cfg, "trl") or not cfg.trl:
|
if not hasattr(cfg, "trl") or not cfg.trl:
|
||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
@@ -40,8 +44,8 @@ class GRPOStrategy:
|
|||||||
|
|
||||||
if trl.use_vllm:
|
if trl.use_vllm:
|
||||||
grpo_args_kwargs["use_vllm"] = trl.use_vllm
|
grpo_args_kwargs["use_vllm"] = trl.use_vllm
|
||||||
grpo_args_kwargs["vllm_server_host"] = trl.vllm_server_host or trl.vllm.host
|
grpo_args_kwargs["vllm_server_host"] = trl.vllm_server_host or trl.vllm.host # type: ignore[attr-defined]
|
||||||
grpo_args_kwargs["vllm_server_port"] = trl.vllm_server_port or trl.vllm.port
|
grpo_args_kwargs["vllm_server_port"] = trl.vllm_server_port or trl.vllm.port # type: ignore[attr-defined]
|
||||||
if trl.vllm_server_timeout:
|
if trl.vllm_server_timeout:
|
||||||
grpo_args_kwargs["vllm_server_timeout"] = trl.vllm_server_timeout
|
grpo_args_kwargs["vllm_server_timeout"] = trl.vllm_server_timeout
|
||||||
if trl.vllm_guided_decoding_regex:
|
if trl.vllm_guided_decoding_regex:
|
||||||
@@ -63,6 +67,7 @@ class GRPOStrategy:
|
|||||||
|
|
||||||
grpo_args_kwargs["max_completion_length"] = trl.max_completion_length
|
grpo_args_kwargs["max_completion_length"] = trl.max_completion_length
|
||||||
grpo_args_kwargs["log_completions"] = trl.log_completions
|
grpo_args_kwargs["log_completions"] = trl.log_completions
|
||||||
|
grpo_args_kwargs["num_completions_to_print"] = trl.num_completions_to_print
|
||||||
|
|
||||||
if trl.reward_weights:
|
if trl.reward_weights:
|
||||||
grpo_args_kwargs["reward_weights"] = trl.reward_weights
|
grpo_args_kwargs["reward_weights"] = trl.reward_weights
|
||||||
@@ -70,6 +75,13 @@ class GRPOStrategy:
|
|||||||
if trl.scale_rewards is not None:
|
if trl.scale_rewards is not None:
|
||||||
grpo_args_kwargs["scale_rewards"] = trl.scale_rewards
|
grpo_args_kwargs["scale_rewards"] = trl.scale_rewards
|
||||||
|
|
||||||
|
if trl.loss_type is not None:
|
||||||
|
grpo_args_kwargs["loss_type"] = trl.loss_type
|
||||||
|
if trl.mask_truncated_completions is not None:
|
||||||
|
grpo_args_kwargs["mask_truncated_completions"] = (
|
||||||
|
trl.mask_truncated_completions
|
||||||
|
)
|
||||||
|
|
||||||
if trl.temperature is not None:
|
if trl.temperature is not None:
|
||||||
grpo_args_kwargs["temperature"] = trl.temperature
|
grpo_args_kwargs["temperature"] = trl.temperature
|
||||||
if trl.top_p is not None:
|
if trl.top_p is not None:
|
||||||
@@ -85,21 +97,27 @@ class GRPOStrategy:
|
|||||||
grpo_args_kwargs["num_iterations"] = trl.num_iterations
|
grpo_args_kwargs["num_iterations"] = trl.num_iterations
|
||||||
if trl.epsilon is not None:
|
if trl.epsilon is not None:
|
||||||
grpo_args_kwargs["epsilon"] = trl.epsilon
|
grpo_args_kwargs["epsilon"] = trl.epsilon
|
||||||
|
if trl.epsilon_high is not None:
|
||||||
|
grpo_args_kwargs["epsilon_high"] = trl.epsilon_high
|
||||||
|
|
||||||
|
if trl.use_liger_loss is not None:
|
||||||
|
grpo_args_kwargs["use_liger_loss"] = trl.use_liger_loss
|
||||||
|
|
||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_trainer_args(cls, cfg):
|
def set_trainer_args(cls, cfg: DictDefault) -> list[Any]:
|
||||||
trainer_args = []
|
trainer_args = []
|
||||||
if cfg.trl and cfg.trl.reward_funcs:
|
if cfg.trl and cfg.trl.reward_funcs:
|
||||||
reward_funcs = []
|
reward_funcs = []
|
||||||
for reward_func_fqn in cfg.trl.reward_funcs:
|
for reward_func_fqn in cfg.trl.reward_funcs:
|
||||||
reward_funcs.append(cls.get_reward_func(reward_func_fqn))
|
reward_funcs.append(cls.get_reward_func(reward_func_fqn))
|
||||||
trainer_args.append(reward_funcs)
|
trainer_args.append(reward_funcs)
|
||||||
|
|
||||||
return trainer_args
|
return trainer_args
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_trainer_kwargs(cls, cfg):
|
def set_trainer_kwargs(cls, cfg: DictDefault) -> dict[str, Any]:
|
||||||
trainer_kwargs = {}
|
trainer_kwargs = {}
|
||||||
if cfg.trl and cfg.trl.reward_processing_classes:
|
if cfg.trl and cfg.trl.reward_processing_classes:
|
||||||
trainer_kwargs["reward_processing_classes"] = (
|
trainer_kwargs["reward_processing_classes"] = (
|
||||||
@@ -113,7 +131,7 @@ class GRPOStrategy:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_blocklist_args_kwargs(cls):
|
def get_blocklist_args_kwargs(cls) -> list[str]:
|
||||||
return ["dataset_num_proc"]
|
return ["dataset_num_proc"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -124,18 +142,20 @@ class GRPOStrategy:
|
|||||||
Args:
|
Args:
|
||||||
reward_func_fqn (str): Fully qualified name of the reward function (e.g. r1_grpo.gsm8k_transform),
|
reward_func_fqn (str): Fully qualified name of the reward function (e.g. r1_grpo.gsm8k_transform),
|
||||||
or a HF hub path to the reward model.
|
or a HF hub path to the reward model.
|
||||||
Raises:
|
|
||||||
ValueError: If the reward function does not accept at least two arguments.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
RewardFunc: A callable that accepts prompts and completions and returns rewards,
|
RewardFunc: A callable that accepts prompts and completions and returns rewards,
|
||||||
or a path to a reward model.
|
or a path to a reward model.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the reward function does not accept at least two arguments.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# use importlib to dynamically load the reward function from the module
|
# use importlib to dynamically load the reward function from the module
|
||||||
reward_func_module_name = reward_func_fqn.split(".")[-1]
|
reward_func_module_name = reward_func_fqn.split(".")[-1]
|
||||||
reward_func_module = importlib.import_module(reward_func_fqn.split(".")[-2])
|
reward_func_module = importlib.import_module(
|
||||||
|
".".join(reward_func_fqn.split(".")[:-1])
|
||||||
|
)
|
||||||
reward_func = getattr(reward_func_module, reward_func_module_name)
|
reward_func = getattr(reward_func_module, reward_func_module_name)
|
||||||
if not len(inspect.signature(reward_func).parameters) >= 2:
|
if not len(inspect.signature(reward_func).parameters) >= 2:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
|||||||
@@ -11,6 +11,4 @@ from axolotl.core.training_args import AxolotlTrainingMixins
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
||||||
"""
|
"""Axolotl GRPO Config for GRPO training"""
|
||||||
Axolotl GRPO Config for GRPO training
|
|
||||||
"""
|
|
||||||
|
|||||||
172
src/axolotl/core/trainers/grpo/sampler.py
Normal file
172
src/axolotl/core/trainers/grpo/sampler.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
"""Repeat random sampler (similar to the one implemented in
|
||||||
|
https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py) that adds
|
||||||
|
sequence parallelism functionality; i.e., duplicating data across ranks in the same
|
||||||
|
sequence parallel group.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Iterator, Sized
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch.utils.data import Sampler
|
||||||
|
|
||||||
|
|
||||||
|
class SequenceParallelRepeatRandomSampler(Sampler):
|
||||||
|
"""Sampler for GRPO training with sequence parallelism.
|
||||||
|
|
||||||
|
This sampler ensures:
|
||||||
|
- Ranks in the same sequence parallel (SP) group receive identical data.
|
||||||
|
- Each index is repeated multiple times for sampling different completions.
|
||||||
|
- Entire batches are repeated for reuse in multiple updates.
|
||||||
|
- Data is properly distributed across SP groups.
|
||||||
|
|
||||||
|
In the table below, the values represent dataset indices. Each SP group has
|
||||||
|
`sequence_parallel_degree = 2` GPUs working together on the same data. There are 2
|
||||||
|
SP groups (SP0 and SP1), with `world_size = 4` total GPUs.
|
||||||
|
|
||||||
|
Sequence Parallel Groups
|
||||||
|
| SP0 | SP1 |
|
||||||
|
| GPU 0 | GPU 1 | GPU 2 | GPU 3 |
|
||||||
|
global_step step <---> mini_repeat_count=3
|
||||||
|
<----------> batch_size=2 per SP group
|
||||||
|
grad_accum=2 ▲ ▲ 0 0 [0 0 0 1 1 1] [2 2 2 3 3 3] <- SP groups get different data
|
||||||
|
▼ | 0 1 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Same data for each SP group GPU
|
||||||
|
|
|
||||||
|
| 1 2 [0 0 0 1 1 1] [2 2 2 3 3 3] <- Repeat same indices for iterations
|
||||||
|
num_iterations=2 ▼ 1 3 [0 0 0 1 1 1] [2 2 2 3 3 3] <- When using gradient accumulation
|
||||||
|
|
||||||
|
2 4 [4 4 4 5 5 5] [6 6 6 7 7 7] <- New batch of data indices
|
||||||
|
2 5 [4 4 4 5 5 5] [6 6 6 7 7 7]
|
||||||
|
...
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dataset: Dataset to sample from.
|
||||||
|
mini_repeat_count: How many times to repeat each sample immediately.
|
||||||
|
world_size: Total number of processes.
|
||||||
|
rank: Rank of current process.
|
||||||
|
batch_size: Number of samples per batch.
|
||||||
|
repeat_count: How many times to repeat the full sampling process.
|
||||||
|
sequence_parallel_degree: Number of ranks in a sequence parallel group.
|
||||||
|
shuffle: Whether to shuffle the dataset.
|
||||||
|
seed: Random seed for shuffling.
|
||||||
|
drop_last: Whether to drop the last incomplete batch.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dataset: Sized,
|
||||||
|
mini_repeat_count: int,
|
||||||
|
world_size: int,
|
||||||
|
rank: int,
|
||||||
|
batch_size: int = 1,
|
||||||
|
repeat_count: int = 1,
|
||||||
|
sequence_parallel_degree: int = 1,
|
||||||
|
shuffle: bool = True,
|
||||||
|
seed: int = 0,
|
||||||
|
drop_last: bool = False,
|
||||||
|
):
|
||||||
|
self.dataset = dataset
|
||||||
|
self.mini_repeat_count = mini_repeat_count
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.repeat_count = repeat_count
|
||||||
|
self.shuffle = shuffle
|
||||||
|
self.seed = seed
|
||||||
|
self.drop_last = drop_last
|
||||||
|
self.epoch = 0
|
||||||
|
|
||||||
|
self.world_size = world_size
|
||||||
|
self.rank = rank
|
||||||
|
|
||||||
|
# Sequence parallelism parameters
|
||||||
|
self.sequence_parallel_degree = sequence_parallel_degree
|
||||||
|
self.num_sp_groups = world_size // sequence_parallel_degree
|
||||||
|
self.sp_group_id = rank // sequence_parallel_degree
|
||||||
|
|
||||||
|
# Adjust dataset size for distributed sampling
|
||||||
|
self.num_samples = len(self.dataset)
|
||||||
|
self.total_size = self.num_samples
|
||||||
|
|
||||||
|
# Calculate effective number of samples per SP group
|
||||||
|
if (
|
||||||
|
self.drop_last
|
||||||
|
and self.total_size % (self.num_sp_groups * self.batch_size) != 0
|
||||||
|
):
|
||||||
|
# Drop last incomplete batch if drop_last is True
|
||||||
|
self.num_samples_per_sp_group = (
|
||||||
|
self.total_size // self.batch_size // self.num_sp_groups
|
||||||
|
) * self.batch_size
|
||||||
|
else:
|
||||||
|
# Round up to include last batch if drop_last is False
|
||||||
|
self.num_samples_per_sp_group = (
|
||||||
|
(self.total_size + self.batch_size * self.num_sp_groups - 1)
|
||||||
|
// (self.batch_size * self.num_sp_groups)
|
||||||
|
* self.batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
if shuffle:
|
||||||
|
self.generator = torch.Generator()
|
||||||
|
self.generator.manual_seed(seed)
|
||||||
|
|
||||||
|
def __iter__(self) -> Iterator[int]:
|
||||||
|
"""Creates iterator over dataset indices.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Iterator that yields indices into the dataset.
|
||||||
|
"""
|
||||||
|
# Deterministically shuffle based on epoch and seed
|
||||||
|
if self.shuffle:
|
||||||
|
indices = torch.randperm(
|
||||||
|
self.num_samples, generator=self.generator
|
||||||
|
).tolist()
|
||||||
|
else:
|
||||||
|
indices = list(range(self.num_samples))
|
||||||
|
|
||||||
|
# Add extra samples to make it evenly divisible by batch_size
|
||||||
|
if len(indices) % self.batch_size != 0:
|
||||||
|
padding = indices[: self.batch_size - len(indices) % self.batch_size]
|
||||||
|
indices += padding
|
||||||
|
|
||||||
|
# Subsample based on SP group ID
|
||||||
|
# Each SP group gets distinct batches of data
|
||||||
|
batch_indices = []
|
||||||
|
for i in range(0, len(indices), self.batch_size * self.num_sp_groups):
|
||||||
|
start_idx = i + self.sp_group_id * self.batch_size
|
||||||
|
end_idx = min(start_idx + self.batch_size, len(indices))
|
||||||
|
if start_idx < len(indices):
|
||||||
|
for j in range(self.batch_size):
|
||||||
|
if start_idx + j < end_idx:
|
||||||
|
batch_indices.append(indices[start_idx + j])
|
||||||
|
|
||||||
|
# Make sure batch_indices is exactly batch_size * num_batches_per_sp_group
|
||||||
|
if self.drop_last:
|
||||||
|
num_batches_per_sp_group = self.num_samples_per_sp_group // self.batch_size
|
||||||
|
target_len = self.batch_size * num_batches_per_sp_group
|
||||||
|
if len(batch_indices) > target_len:
|
||||||
|
batch_indices = batch_indices[:target_len]
|
||||||
|
|
||||||
|
# Apply the GRPO repeat pattern
|
||||||
|
final_indices = []
|
||||||
|
for _ in range(self.repeat_count):
|
||||||
|
for idx in batch_indices:
|
||||||
|
for _ in range(self.mini_repeat_count):
|
||||||
|
final_indices.append(idx)
|
||||||
|
|
||||||
|
return iter(final_indices)
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
"""Returns the total length of the iterable including repetitions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Total number of samples.
|
||||||
|
"""
|
||||||
|
# Total length including all repetitions
|
||||||
|
return (
|
||||||
|
self.num_samples_per_sp_group * self.mini_repeat_count * self.repeat_count
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_epoch(self, epoch: int) -> None:
|
||||||
|
"""Sets the epoch for this sampler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
epoch: Epoch number to use for shuffling.
|
||||||
|
"""
|
||||||
|
self.epoch = epoch
|
||||||
@@ -1,69 +1,653 @@
|
|||||||
"""
|
"""Axolotl GRPO trainers (with and without sequence parallelism handling)"""
|
||||||
Axolotl GRPO trainer
|
|
||||||
"""
|
|
||||||
|
|
||||||
from contextlib import nullcontext
|
# pylint: disable=too-many-lines,duplicate-code,protected-access,no-member
|
||||||
|
|
||||||
from accelerate.utils import is_deepspeed_available, is_peft_model
|
import warnings
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import datasets
|
||||||
|
import torch
|
||||||
|
import torch.distributed as dist
|
||||||
|
import torch.utils.data
|
||||||
|
from accelerate.utils import (
|
||||||
|
broadcast_object_list,
|
||||||
|
gather,
|
||||||
|
gather_object,
|
||||||
|
is_peft_available,
|
||||||
|
)
|
||||||
|
from datasets import Dataset, IterableDataset
|
||||||
|
from torch import nn
|
||||||
|
from torch.utils.data import (
|
||||||
|
BatchSampler,
|
||||||
|
DataLoader,
|
||||||
|
Sampler,
|
||||||
|
)
|
||||||
|
from transformers import (
|
||||||
|
PreTrainedModel,
|
||||||
|
PreTrainedTokenizerBase,
|
||||||
|
Trainer,
|
||||||
|
TrainerCallback,
|
||||||
|
)
|
||||||
|
from transformers.trainer_utils import seed_worker
|
||||||
from trl import GRPOTrainer
|
from trl import GRPOTrainer
|
||||||
from trl.extras.profiling import profiling_decorator
|
from trl.data_utils import (
|
||||||
|
apply_chat_template,
|
||||||
|
is_conversational,
|
||||||
|
maybe_apply_chat_template,
|
||||||
|
)
|
||||||
|
from trl.extras.profiling import profiling_context
|
||||||
|
from trl.models import unwrap_model_for_generation
|
||||||
|
from trl.trainer.grpo_config import GRPOConfig
|
||||||
|
from trl.trainer.grpo_trainer import RewardFunc, nanstd
|
||||||
|
from trl.trainer.utils import pad
|
||||||
|
|
||||||
|
from axolotl.core.trainers.grpo.sampler import SequenceParallelRepeatRandomSampler
|
||||||
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
from axolotl.core.trainers.mixins import RngLoaderMixin, SchedulerMixin
|
||||||
|
from axolotl.monkeypatch.attention.ring_attn.patch import get_ring_attn_group
|
||||||
|
|
||||||
if is_deepspeed_available():
|
if is_peft_available():
|
||||||
import deepspeed
|
# pylint: disable=unused-import
|
||||||
|
from peft import PeftConfig
|
||||||
|
|
||||||
|
|
||||||
class AxolotlGRPOTrainer(RngLoaderMixin, SchedulerMixin, GRPOTrainer):
|
class AxolotlGRPOTrainer(RngLoaderMixin, SchedulerMixin, GRPOTrainer):
|
||||||
"""
|
"""Extend the base GRPOTrainer for axolotl helpers"""
|
||||||
Extend the base GRPOTrainer for axolotl helpers
|
|
||||||
"""
|
|
||||||
|
|
||||||
_tag_names = ["trl", "grpo", "axolotl"]
|
_tag_names = ["trl", "grpo", "axolotl"]
|
||||||
|
|
||||||
@profiling_decorator
|
|
||||||
def _move_model_to_vllm(self):
|
class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
||||||
# For DeepSpeed ZeRO-3, we need to gather all parameters before operations
|
"""Extend the base GRPOTrainer for sequence parallelism handling"""
|
||||||
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
|
|
||||||
zero_stage_3 = deepspeed_plugin is not None and deepspeed_plugin.zero_stage == 3
|
def __init__(
|
||||||
gather_if_zero3 = (
|
self,
|
||||||
deepspeed.zero.GatheredParameters if zero_stage_3 else nullcontext
|
model: str | PreTrainedModel,
|
||||||
|
reward_funcs: RewardFunc | list[RewardFunc],
|
||||||
|
args: GRPOConfig | None = None,
|
||||||
|
train_dataset: Dataset | IterableDataset | None = None,
|
||||||
|
eval_dataset: (
|
||||||
|
Dataset | IterableDataset | dict[str, Dataset | IterableDataset] | None
|
||||||
|
) = None,
|
||||||
|
processing_class: PreTrainedTokenizerBase | None = None,
|
||||||
|
reward_processing_classes: (
|
||||||
|
PreTrainedTokenizerBase | list[PreTrainedTokenizerBase] | None
|
||||||
|
) = None,
|
||||||
|
callbacks: list[TrainerCallback] | None = None,
|
||||||
|
optimizers: tuple[
|
||||||
|
torch.optim.Optimizer | None, torch.optim.lr_scheduler.LambdaLR | None
|
||||||
|
] = (None, None),
|
||||||
|
peft_config: "PeftConfig | None" = None,
|
||||||
|
):
|
||||||
|
# First call the superclass constructor with all arguments
|
||||||
|
super().__init__(
|
||||||
|
model=model,
|
||||||
|
reward_funcs=reward_funcs,
|
||||||
|
args=args,
|
||||||
|
train_dataset=train_dataset,
|
||||||
|
eval_dataset=eval_dataset,
|
||||||
|
processing_class=processing_class,
|
||||||
|
reward_processing_classes=reward_processing_classes,
|
||||||
|
callbacks=callbacks,
|
||||||
|
optimizers=optimizers,
|
||||||
|
peft_config=peft_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_peft_model(self.model):
|
# Get number of SP groups (number of processes divided by SP degree)
|
||||||
# With PEFT and DeepSpeed ZeRO Stage 3, we must gather the full model at once before merging, as merging
|
num_processes = self.accelerator.num_processes
|
||||||
# adapters in a sharded manner is not supported.
|
num_sp_groups = num_processes // self.args.sequence_parallel_degree
|
||||||
with gather_if_zero3(list(self.model.parameters())):
|
|
||||||
self.model.merge_adapter()
|
|
||||||
|
|
||||||
# Update vLLM weights while parameters are gathered
|
# Calculate batch size per SP group (not per process)
|
||||||
for name, param in self.model.named_parameters():
|
sp_group_batch_size = self.args.per_device_train_batch_size * num_sp_groups
|
||||||
# When using PEFT, we need to recover the original parameter name and discard some parameters
|
possible_values = [
|
||||||
name = (
|
n_gen
|
||||||
name.removeprefix("base_model.model.")
|
for n_gen in range(2, sp_group_batch_size + 1)
|
||||||
.removeprefix("base_model.model.")
|
if (sp_group_batch_size) % n_gen == 0
|
||||||
.replace(".base_layer", "")
|
]
|
||||||
)
|
|
||||||
if self.model.prefix in name:
|
|
||||||
continue
|
|
||||||
# When module to save, remove its prefix and discard the original module
|
|
||||||
if "original_module" in name:
|
|
||||||
continue
|
|
||||||
name = name.replace("modules_to_save.default.", "")
|
|
||||||
|
|
||||||
if self.accelerator.is_main_process:
|
if self.num_generations not in possible_values:
|
||||||
self.vllm_client.update_named_param(name, param.data)
|
raise ValueError(
|
||||||
|
f"The batch size per SP group ({num_sp_groups} x "
|
||||||
|
f"{self.args.per_device_train_batch_size}) must be evenly divisible by "
|
||||||
|
f"the number of generations per prompt ({self.num_generations}). Given "
|
||||||
|
"the current configuration, the valid values for the number of "
|
||||||
|
f"generations are: {possible_values}."
|
||||||
|
)
|
||||||
|
|
||||||
# Unmerge adapters while parameters are still gathered
|
if self.args.eval_strategy != "no":
|
||||||
self.model.unmerge_adapter()
|
# If sequence parallelism is enabled, calculate batch size per SP group
|
||||||
# Parameters will automatically be repartitioned when exiting the context
|
sp_group_eval_batch_size = args.per_device_eval_batch_size * num_sp_groups # type: ignore[union-attr]
|
||||||
|
possible_values = [
|
||||||
|
n_gen
|
||||||
|
for n_gen in range(2, sp_group_eval_batch_size + 1)
|
||||||
|
if (sp_group_eval_batch_size) % n_gen == 0
|
||||||
|
]
|
||||||
|
|
||||||
|
if self.num_generations not in possible_values:
|
||||||
|
raise ValueError(
|
||||||
|
f"With sequence parallelism (degree {self.args.sequence_parallel_degree}), "
|
||||||
|
f"the eval batch size per SP group ({num_sp_groups} x {self.args.per_device_eval_batch_size}) "
|
||||||
|
f"must be evenly divisible by the number of generations per prompt "
|
||||||
|
f"({self.num_generations}). Given the current eval batch size, "
|
||||||
|
f"the valid values for the number of generations are: {possible_values}."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the SP group
|
||||||
|
self.sp_group = get_ring_attn_group()
|
||||||
|
self.rank = dist.get_rank()
|
||||||
|
self.world_size = dist.get_world_size()
|
||||||
|
self.local_rank = dist.get_rank(group=self.sp_group)
|
||||||
|
self.local_world_size = dist.get_world_size(group=self.sp_group)
|
||||||
|
|
||||||
|
def _get_train_sampler(self) -> Sampler:
|
||||||
|
effective_batch_size = (
|
||||||
|
self.args.per_device_train_batch_size
|
||||||
|
* self.world_size
|
||||||
|
* self.args.gradient_accumulation_steps
|
||||||
|
)
|
||||||
|
|
||||||
|
return SequenceParallelRepeatRandomSampler(
|
||||||
|
dataset=self.train_dataset,
|
||||||
|
mini_repeat_count=self.num_generations,
|
||||||
|
world_size=self.world_size,
|
||||||
|
rank=self.rank,
|
||||||
|
batch_size=effective_batch_size
|
||||||
|
// self.num_generations
|
||||||
|
// self.args.sequence_parallel_degree,
|
||||||
|
repeat_count=self.num_iterations * self.args.gradient_accumulation_steps,
|
||||||
|
sequence_parallel_degree=self.args.sequence_parallel_degree,
|
||||||
|
shuffle=True,
|
||||||
|
seed=self.args.seed,
|
||||||
|
drop_last=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_dataloader_params(self, is_eval=False, custom_batch_size=None):
|
||||||
|
"""Create common dataloader parameters for train or eval."""
|
||||||
|
batch_size = custom_batch_size or (
|
||||||
|
self.args.eval_batch_size if is_eval else self._train_batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"batch_size": batch_size,
|
||||||
|
"collate_fn": self.data_collator,
|
||||||
|
"num_workers": self.args.dataloader_num_workers,
|
||||||
|
"pin_memory": self.args.dataloader_pin_memory,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add persistent workers only for training
|
||||||
|
if not is_eval and hasattr(self.args, "dataloader_persistent_workers"):
|
||||||
|
params["persistent_workers"] = self.args.dataloader_persistent_workers
|
||||||
|
|
||||||
|
# Add prefetch factor if specified
|
||||||
|
if self.args.dataloader_prefetch_factor:
|
||||||
|
params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _prepare_dataloader(
|
||||||
|
self, dataset, sampler, is_eval=False, custom_batch_size=None
|
||||||
|
):
|
||||||
|
"""Prepare a dataloader with the given dataset and sampler."""
|
||||||
|
# Get base parameters
|
||||||
|
dataloader_params = self._create_dataloader_params(is_eval, custom_batch_size)
|
||||||
|
|
||||||
|
# Add sampler configuration
|
||||||
|
if not isinstance(dataset, torch.utils.data.IterableDataset):
|
||||||
|
if isinstance(sampler, BatchSampler):
|
||||||
|
# batch_size and batch_sampler are mutually exclusive
|
||||||
|
dataloader_params["batch_sampler"] = sampler
|
||||||
|
del dataloader_params["batch_size"]
|
||||||
|
else:
|
||||||
|
dataloader_params["sampler"] = sampler
|
||||||
|
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
||||||
|
|
||||||
|
if not is_eval:
|
||||||
|
dataloader_params["worker_init_fn"] = seed_worker
|
||||||
|
|
||||||
|
# Create the dataloader
|
||||||
|
dataloader = DataLoader(dataset, **dataloader_params)
|
||||||
|
|
||||||
|
if self.args.sample_packing and (
|
||||||
|
(not is_eval and not self.args.pretraining)
|
||||||
|
or (is_eval and self.args.eval_sample_packing is not False)
|
||||||
|
):
|
||||||
|
self.accelerator.even_batches = False
|
||||||
|
|
||||||
|
# Return unprepared dataloader if using sequence parallelism
|
||||||
|
# TODO(djsaunde): We might be able to use `accelerate`'s dataloader preparation
|
||||||
|
# if we use `dispatch_batches` and `slice_fn_for_dispatch` properly (i.e.,
|
||||||
|
# slice each batch along the sequence dimension).
|
||||||
|
if self.args.sequence_parallel_degree > 1:
|
||||||
|
return dataloader
|
||||||
|
|
||||||
|
# Otherwise prepare with accelerator
|
||||||
|
return self.accelerator.prepare_data_loader(dataloader)
|
||||||
|
|
||||||
|
def get_train_dataloader(self) -> DataLoader:
|
||||||
|
"""Get dataloader for training"""
|
||||||
|
train_dataset = self.train_dataset
|
||||||
|
# pylint: disable=access-member-before-definition
|
||||||
|
data_collator = self.data_collator # type: ignore
|
||||||
|
|
||||||
|
# Handle dataset preprocessing
|
||||||
|
if isinstance(train_dataset, datasets.Dataset):
|
||||||
|
# Add debug print before any modifications
|
||||||
|
if self.args.sample_packing and not self.args.pretraining:
|
||||||
|
train_dataset = train_dataset.remove_columns(["length"])
|
||||||
|
if not self.args.sample_packing or self.args.pretraining:
|
||||||
|
train_dataset = self._remove_unused_columns(
|
||||||
|
train_dataset, description="training"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# For non-PEFT models, simply gather and update each parameter individually.
|
self.data_collator = self._get_collator_with_removed_columns( # pylint: disable=attribute-defined-outside-init
|
||||||
for name, param in self.model.named_parameters():
|
data_collator,
|
||||||
with gather_if_zero3([param]):
|
description="training",
|
||||||
if self.accelerator.is_main_process:
|
)
|
||||||
self.vllm_client.update_named_param(name, param.data)
|
|
||||||
|
|
||||||
# Reset cache on main process
|
# Get sampler and create dataloader
|
||||||
if self.accelerator.is_main_process:
|
sampler = self._get_train_sampler()
|
||||||
self.vllm_client.reset_prefix_cache()
|
dataloader = self._prepare_dataloader(train_dataset, sampler, is_eval=False)
|
||||||
|
|
||||||
|
return dataloader
|
||||||
|
|
||||||
|
def _generate_and_score_completions(
|
||||||
|
self, inputs: list[dict[str, torch.Tensor | Any]]
|
||||||
|
) -> dict[str, torch.Tensor | Any]:
|
||||||
|
device = self.accelerator.device
|
||||||
|
mode = "eval" if self.control.should_evaluate else "train"
|
||||||
|
|
||||||
|
prompts = [x["prompt"] for x in inputs]
|
||||||
|
prompts_text = [
|
||||||
|
maybe_apply_chat_template(example, self.processing_class)["prompt"]
|
||||||
|
for example in inputs
|
||||||
|
]
|
||||||
|
prompt_inputs = self.processing_class(
|
||||||
|
text=prompts_text,
|
||||||
|
return_tensors="pt",
|
||||||
|
padding=True,
|
||||||
|
padding_side="left",
|
||||||
|
add_special_tokens=False,
|
||||||
|
)
|
||||||
|
prompt_inputs = Trainer._prepare_inputs(self, prompt_inputs)
|
||||||
|
prompt_ids, prompt_mask = (
|
||||||
|
prompt_inputs["input_ids"],
|
||||||
|
prompt_inputs["attention_mask"],
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.max_prompt_length is not None:
|
||||||
|
prompt_ids = prompt_ids[:, -self.max_prompt_length :]
|
||||||
|
prompt_mask = prompt_mask[:, -self.max_prompt_length :]
|
||||||
|
|
||||||
|
# Generate completions using either vLLM or regular generation
|
||||||
|
if self.args.use_vllm:
|
||||||
|
# First, have main process load weights if needed
|
||||||
|
# pylint: disable=access-member-before-definition
|
||||||
|
if self.state.global_step != self._last_loaded_step: # type: ignore[has-type]
|
||||||
|
self._move_model_to_vllm()
|
||||||
|
# pylint: disable=attribute-defined-outside-init
|
||||||
|
self._last_loaded_step = self.state.global_step
|
||||||
|
|
||||||
|
# Generate completions using vLLM: gather all prompts and use them in a single call in the main process
|
||||||
|
all_prompts_text = gather_object(prompts_text)
|
||||||
|
if self.accelerator.is_main_process:
|
||||||
|
if self.args.sequence_parallel_degree > 1:
|
||||||
|
# Calculate sequence parallel group information
|
||||||
|
world_size = self.accelerator.num_processes
|
||||||
|
sequence_parallel_degree = self.args.sequence_parallel_degree
|
||||||
|
num_sp_groups = world_size // sequence_parallel_degree
|
||||||
|
|
||||||
|
# Since processes in the same SP group have the same prompts, we need to ensure
|
||||||
|
# we only take one copy of each prompt from each SP group
|
||||||
|
ordered_set_of_prompts = []
|
||||||
|
for sp_group_id in range(num_sp_groups):
|
||||||
|
# Get the first process from each SP group (typically the group leader)
|
||||||
|
group_leader_rank = sp_group_id * sequence_parallel_degree
|
||||||
|
|
||||||
|
# Extract prompts from this SP group, accounting for num_generations duplicates
|
||||||
|
# We only need prompts from one rank in each SP group
|
||||||
|
group_prompts = all_prompts_text[
|
||||||
|
group_leader_rank
|
||||||
|
* len(prompts_text) : (group_leader_rank + 1)
|
||||||
|
* len(prompts_text) : self.num_generations
|
||||||
|
]
|
||||||
|
|
||||||
|
ordered_set_of_prompts.extend(group_prompts)
|
||||||
|
else:
|
||||||
|
# Since 'prompts' contains 'num_generations' duplicates, we first take unique prompts, and generate
|
||||||
|
# num_generations outputs for each one. This is faster than generating outputs for each duplicate
|
||||||
|
# prompt individually.
|
||||||
|
ordered_set_of_prompts = all_prompts_text[
|
||||||
|
:: self.num_generations * self.args.sequence_parallel_degree
|
||||||
|
]
|
||||||
|
|
||||||
|
with profiling_context(self, "vLLM.generate"):
|
||||||
|
completion_ids = self.vllm_client.generate(
|
||||||
|
prompts=ordered_set_of_prompts,
|
||||||
|
n=self.num_generations,
|
||||||
|
repetition_penalty=self.repetition_penalty,
|
||||||
|
temperature=self.temperature,
|
||||||
|
top_p=self.top_p,
|
||||||
|
top_k=-1 if self.top_k is None else self.top_k,
|
||||||
|
min_p=0.0 if self.min_p is None else self.min_p,
|
||||||
|
max_tokens=self.max_completion_length,
|
||||||
|
guided_decoding_regex=self.guided_decoding_regex,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
completion_ids = [None] * (
|
||||||
|
len(all_prompts_text) // self.args.sequence_parallel_degree
|
||||||
|
)
|
||||||
|
|
||||||
|
# Broadcast the completions from the main process to all processes
|
||||||
|
completion_ids = broadcast_object_list(completion_ids, from_process=0)
|
||||||
|
|
||||||
|
# Determine the appropriate slice based on sequence parallelism
|
||||||
|
if self.args.sequence_parallel_degree > 1:
|
||||||
|
# Calculate SP group ID (which group of ranks this rank belongs to)
|
||||||
|
sp_group_id = self.accelerator.process_index // self.local_world_size
|
||||||
|
|
||||||
|
# Calculate the start index for this SP group
|
||||||
|
sp_group_start = sp_group_id * len(prompts) * self.local_world_size
|
||||||
|
|
||||||
|
# All ranks in the same SP group get the same data slice
|
||||||
|
process_slice = slice(
|
||||||
|
sp_group_start,
|
||||||
|
sp_group_start + len(prompts),
|
||||||
|
)
|
||||||
|
completion_ids = completion_ids[process_slice]
|
||||||
|
else:
|
||||||
|
# Original behavior for non-sequence parallel case
|
||||||
|
process_slice = slice(
|
||||||
|
self.accelerator.process_index * len(prompts),
|
||||||
|
(self.accelerator.process_index + 1) * len(prompts),
|
||||||
|
)
|
||||||
|
completion_ids = completion_ids[process_slice]
|
||||||
|
|
||||||
|
# Pad the completions, and concatenate them with the prompts
|
||||||
|
completion_ids = [
|
||||||
|
torch.tensor(ids, device=device) for ids in completion_ids
|
||||||
|
]
|
||||||
|
completion_ids = pad(
|
||||||
|
completion_ids, padding_value=self.processing_class.pad_token_id
|
||||||
|
)
|
||||||
|
prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
||||||
|
else:
|
||||||
|
# Regular generation path
|
||||||
|
with unwrap_model_for_generation(
|
||||||
|
self.model_wrapped,
|
||||||
|
self.accelerator,
|
||||||
|
gather_deepspeed3_params=self.args.ds3_gather_for_generation,
|
||||||
|
) as unwrapped_model:
|
||||||
|
prompt_completion_ids = unwrapped_model.generate(
|
||||||
|
prompt_ids,
|
||||||
|
attention_mask=prompt_mask,
|
||||||
|
generation_config=self.generation_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Compute prompt length and extract completion ids
|
||||||
|
prompt_length = prompt_ids.size(1)
|
||||||
|
prompt_ids = prompt_completion_ids[:, :prompt_length]
|
||||||
|
completion_ids = prompt_completion_ids[:, prompt_length:]
|
||||||
|
|
||||||
|
# Mask everything after the first EOS token
|
||||||
|
is_eos = completion_ids == self.processing_class.eos_token_id
|
||||||
|
eos_idx = torch.full(
|
||||||
|
(is_eos.size(0),), is_eos.size(1), dtype=torch.long, device=device
|
||||||
|
)
|
||||||
|
eos_idx[is_eos.any(dim=1)] = is_eos.int().argmax(dim=1)[is_eos.any(dim=1)]
|
||||||
|
sequence_indices = torch.arange(is_eos.size(1), device=device).expand(
|
||||||
|
is_eos.size(0), -1
|
||||||
|
)
|
||||||
|
completion_mask = (sequence_indices <= eos_idx.unsqueeze(1)).int()
|
||||||
|
|
||||||
|
# If mask_truncated_completions is enabled, zero out truncated completions in completion_mask
|
||||||
|
if self.args.mask_truncated_completions:
|
||||||
|
truncated_completions = ~is_eos.any(dim=1)
|
||||||
|
completion_mask = (
|
||||||
|
completion_mask * (~truncated_completions).unsqueeze(1).int()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Concatenate prompt_mask with completion_mask for logit computation
|
||||||
|
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) # (B, P+C)
|
||||||
|
|
||||||
|
logits_to_keep = completion_ids.size(
|
||||||
|
1
|
||||||
|
) # we only need to compute the logits for the completion tokens
|
||||||
|
batch_size = (
|
||||||
|
self.args.per_device_train_batch_size
|
||||||
|
if mode == "train"
|
||||||
|
else self.args.per_device_eval_batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
# When using num_iterations == 1, old_per_token_logps == per_token_logps, so we can skip it's
|
||||||
|
# computation here, and use per_token_logps.detach() instead.
|
||||||
|
if self.num_iterations > 1:
|
||||||
|
old_per_token_logps = self._get_per_token_logps(
|
||||||
|
self.model,
|
||||||
|
prompt_completion_ids,
|
||||||
|
attention_mask,
|
||||||
|
logits_to_keep,
|
||||||
|
batch_size,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
old_per_token_logps = None
|
||||||
|
|
||||||
|
if self.beta == 0.0:
|
||||||
|
ref_per_token_logps = None
|
||||||
|
elif self.ref_model is not None:
|
||||||
|
ref_per_token_logps = self._get_per_token_logps(
|
||||||
|
self.ref_model,
|
||||||
|
prompt_completion_ids,
|
||||||
|
attention_mask,
|
||||||
|
logits_to_keep,
|
||||||
|
batch_size,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
with self.accelerator.unwrap_model(self.model).disable_adapter():
|
||||||
|
ref_per_token_logps = self._get_per_token_logps(
|
||||||
|
self.model,
|
||||||
|
prompt_completion_ids,
|
||||||
|
attention_mask,
|
||||||
|
logits_to_keep,
|
||||||
|
batch_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Decode the generated completions
|
||||||
|
completions_text = self.processing_class.batch_decode(
|
||||||
|
completion_ids, skip_special_tokens=True
|
||||||
|
)
|
||||||
|
if is_conversational(inputs[0]):
|
||||||
|
completions = []
|
||||||
|
for prompt, completion in zip(prompts, completions_text):
|
||||||
|
bootstrap = (
|
||||||
|
prompt.pop()["content"] if prompt[-1]["role"] == "assistant" else ""
|
||||||
|
)
|
||||||
|
completions.append(
|
||||||
|
[{"role": "assistant", "content": bootstrap + completion}]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
completions = completions_text
|
||||||
|
|
||||||
|
rewards_per_func = torch.zeros(
|
||||||
|
len(prompts), len(self.reward_funcs), device=device
|
||||||
|
)
|
||||||
|
for i, (reward_func, reward_processing_class, reward_func_name) in enumerate(
|
||||||
|
zip(
|
||||||
|
self.reward_funcs,
|
||||||
|
self.reward_processing_classes,
|
||||||
|
self.reward_func_names,
|
||||||
|
)
|
||||||
|
):
|
||||||
|
with profiling_context(self, reward_func_name):
|
||||||
|
if isinstance(
|
||||||
|
reward_func, nn.Module
|
||||||
|
): # Module instead of PretrainedModel for compat with compiled models
|
||||||
|
if is_conversational(inputs[0]):
|
||||||
|
messages = [
|
||||||
|
{"messages": p + c} for p, c in zip(prompts, completions)
|
||||||
|
]
|
||||||
|
texts = [
|
||||||
|
apply_chat_template(x, reward_processing_class)["text"]
|
||||||
|
for x in messages
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
texts = [p + c for p, c in zip(prompts, completions)]
|
||||||
|
reward_inputs = reward_processing_class(
|
||||||
|
text=texts,
|
||||||
|
return_tensors="pt",
|
||||||
|
padding=True,
|
||||||
|
padding_side="right",
|
||||||
|
add_special_tokens=False,
|
||||||
|
)
|
||||||
|
reward_inputs = Trainer._prepare_inputs(self, reward_inputs)
|
||||||
|
with torch.inference_mode():
|
||||||
|
rewards_per_func[:, i] = reward_func(**reward_inputs).logits[
|
||||||
|
:, 0
|
||||||
|
] # Shape (B*G,)
|
||||||
|
else:
|
||||||
|
# Repeat all input columns (but "prompt" and "completion") to match the number of generations
|
||||||
|
keys = [
|
||||||
|
key for key in inputs[0] if key not in ["prompt", "completion"]
|
||||||
|
]
|
||||||
|
reward_kwargs = {
|
||||||
|
key: [example[key] for example in inputs] for key in keys
|
||||||
|
}
|
||||||
|
output_reward_func = reward_func(
|
||||||
|
prompts=prompts, completions=completions, **reward_kwargs
|
||||||
|
)
|
||||||
|
# Convert None values to NaN
|
||||||
|
output_reward_func = [
|
||||||
|
reward if reward is not None else torch.nan
|
||||||
|
for reward in output_reward_func
|
||||||
|
]
|
||||||
|
|
||||||
|
rewards_per_func[:, i] = torch.tensor(
|
||||||
|
output_reward_func, dtype=torch.float32, device=device
|
||||||
|
)
|
||||||
|
|
||||||
|
# If all reward functions return None for a given row, issue a detailed warning
|
||||||
|
if torch.isnan(rewards_per_func).all(dim=1).any():
|
||||||
|
nan_row_idx = (
|
||||||
|
torch.isnan(rewards_per_func).all(dim=1).nonzero(as_tuple=True)[0][0]
|
||||||
|
)
|
||||||
|
row_reward_kwargs = {
|
||||||
|
key: value[nan_row_idx] for key, value in reward_kwargs.items()
|
||||||
|
}
|
||||||
|
row_reward_kwargs["prompt"] = prompts[nan_row_idx]
|
||||||
|
row_reward_kwargs["completion"] = completions[nan_row_idx]
|
||||||
|
warnings.warn(
|
||||||
|
f"All reward functions returned None for the following kwargs: {row_reward_kwargs}. "
|
||||||
|
"Please ensure that at least one reward function returns a valid reward."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Gather the reward per function: this part is crucial, because the rewards are normalized per group and the
|
||||||
|
# completions may be distributed across processes
|
||||||
|
rewards_per_func = gather(rewards_per_func)
|
||||||
|
|
||||||
|
# Apply weights to each reward function's output and sum
|
||||||
|
rewards = (
|
||||||
|
rewards_per_func * self.reward_weights.to(device).unsqueeze(0)
|
||||||
|
).nansum(dim=1)
|
||||||
|
|
||||||
|
# Compute grouped-wise rewards
|
||||||
|
mean_grouped_rewards = rewards.view(-1, self.num_generations).mean(dim=1)
|
||||||
|
std_grouped_rewards = rewards.view(-1, self.num_generations).std(dim=1)
|
||||||
|
|
||||||
|
# Normalize the rewards to compute the advantages
|
||||||
|
mean_grouped_rewards = mean_grouped_rewards.repeat_interleave(
|
||||||
|
self.num_generations, dim=0
|
||||||
|
)
|
||||||
|
std_grouped_rewards = std_grouped_rewards.repeat_interleave(
|
||||||
|
self.num_generations, dim=0
|
||||||
|
)
|
||||||
|
advantages = rewards - mean_grouped_rewards
|
||||||
|
if self.args.scale_rewards:
|
||||||
|
advantages = advantages / (std_grouped_rewards + 1e-4)
|
||||||
|
|
||||||
|
# Slice to keep only the local part of the data
|
||||||
|
if self.args.sequence_parallel_degree > 1:
|
||||||
|
# Calculate SP group ID (which group of ranks this rank belongs to)
|
||||||
|
sp_group_id = self.accelerator.process_index // self.local_world_size
|
||||||
|
|
||||||
|
# Calculate the start index for this SP group
|
||||||
|
sp_group_start = sp_group_id * len(prompts) * self.local_world_size
|
||||||
|
|
||||||
|
# All ranks in the same SP group get the same data slice
|
||||||
|
process_slice = slice(
|
||||||
|
sp_group_start,
|
||||||
|
sp_group_start + len(prompts),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Original behavior for non-sequence parallel case
|
||||||
|
process_slice = slice(
|
||||||
|
self.accelerator.process_index * len(prompts),
|
||||||
|
(self.accelerator.process_index + 1) * len(prompts),
|
||||||
|
)
|
||||||
|
advantages = advantages[process_slice]
|
||||||
|
|
||||||
|
# Log the metrics
|
||||||
|
if mode == "train":
|
||||||
|
self._total_train_tokens += (
|
||||||
|
self.accelerator.gather_for_metrics(attention_mask.sum()).sum().item()
|
||||||
|
)
|
||||||
|
self._metrics[mode]["num_tokens"] = [self._total_train_tokens]
|
||||||
|
|
||||||
|
# log completion lengths, mean, min, max
|
||||||
|
agg_completion_mask = self.accelerator.gather_for_metrics(
|
||||||
|
completion_mask.sum(1)
|
||||||
|
)
|
||||||
|
self._metrics[mode]["completions/mean_length"].append(
|
||||||
|
agg_completion_mask.float().mean().item()
|
||||||
|
)
|
||||||
|
self._metrics[mode]["completions/min_length"].append(
|
||||||
|
agg_completion_mask.float().min().item()
|
||||||
|
)
|
||||||
|
self._metrics[mode]["completions/max_length"].append(
|
||||||
|
agg_completion_mask.float().max().item()
|
||||||
|
)
|
||||||
|
|
||||||
|
# identify sequences that terminated with EOS and log their lengths
|
||||||
|
agg_terminated_with_eos = self.accelerator.gather_for_metrics(is_eos.any(dim=1))
|
||||||
|
term_completion_mask = agg_completion_mask[agg_terminated_with_eos]
|
||||||
|
clipped_completions_ratio = 1 - len(term_completion_mask) / len(
|
||||||
|
agg_completion_mask
|
||||||
|
)
|
||||||
|
self._metrics[mode]["completions/clipped_ratio"].append(
|
||||||
|
clipped_completions_ratio
|
||||||
|
)
|
||||||
|
if len(term_completion_mask) == 0:
|
||||||
|
# edge case where no completed sequences are found
|
||||||
|
term_completion_mask = torch.zeros(1, device=device)
|
||||||
|
self._metrics[mode]["completions/mean_terminated_length"].append(
|
||||||
|
term_completion_mask.float().mean().item()
|
||||||
|
)
|
||||||
|
self._metrics[mode]["completions/min_terminated_length"].append(
|
||||||
|
term_completion_mask.float().min().item()
|
||||||
|
)
|
||||||
|
self._metrics[mode]["completions/max_terminated_length"].append(
|
||||||
|
term_completion_mask.float().max().item()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate mean reward per function, but only for samples where the function was applied (non-NaN values)
|
||||||
|
for i, reward_func_name in enumerate(self.reward_func_names):
|
||||||
|
mean_rewards = torch.nanmean(rewards_per_func[:, i]).item()
|
||||||
|
self._metrics[mode][f"rewards/{reward_func_name}/mean"].append(mean_rewards)
|
||||||
|
std_rewards = nanstd(rewards_per_func[:, i]).item()
|
||||||
|
self._metrics[mode][f"rewards/{reward_func_name}/std"].append(std_rewards)
|
||||||
|
self._metrics[mode]["reward"].append(mean_grouped_rewards.mean().item())
|
||||||
|
self._metrics[mode]["reward_std"].append(std_grouped_rewards.mean().item())
|
||||||
|
|
||||||
|
# Log prompt and completion texts
|
||||||
|
self._textual_logs["prompt"].extend(gather_object(prompts_text))
|
||||||
|
self._textual_logs["completion"].extend(gather_object(completions_text))
|
||||||
|
for i, name in enumerate(self.reward_func_names):
|
||||||
|
self._textual_logs["rewards"][name].extend(rewards_per_func[:, i].tolist())
|
||||||
|
|
||||||
|
return {
|
||||||
|
"prompt_ids": prompt_ids,
|
||||||
|
"prompt_mask": prompt_mask,
|
||||||
|
"completion_ids": completion_ids,
|
||||||
|
"completion_mask": completion_mask,
|
||||||
|
"advantages": advantages,
|
||||||
|
"old_per_token_logps": old_per_token_logps,
|
||||||
|
"ref_per_token_logps": ref_per_token_logps,
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,9 +3,10 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from torch.optim.lr_scheduler import OneCycleLR
|
from torch.optim.lr_scheduler import LRScheduler, OneCycleLR
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.utils.schedulers import (
|
from axolotl.utils.schedulers import (
|
||||||
RexLR,
|
RexLR,
|
||||||
get_cosine_schedule_with_min_lr,
|
get_cosine_schedule_with_min_lr,
|
||||||
@@ -25,9 +26,9 @@ class SchedulerMixin(Trainer):
|
|||||||
|
|
||||||
def create_scheduler(
|
def create_scheduler(
|
||||||
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
||||||
):
|
) -> LRScheduler:
|
||||||
"""
|
"""
|
||||||
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
|
Set up the scheduler. The optimizer of the trainer must have been set up either before this method is called or
|
||||||
passed as an argument.
|
passed as an argument.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -47,7 +48,16 @@ class SchedulerMixin(Trainer):
|
|||||||
# fmt: off
|
# fmt: off
|
||||||
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
||||||
# fmt: on
|
# fmt: on
|
||||||
if self.args.alternate_lr_scheduler_type == "one_cycle":
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
lr_scheduler: LRScheduler | None = plugin_manager.create_lr_scheduler(
|
||||||
|
trainer=self,
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_training_steps=num_training_steps
|
||||||
|
)
|
||||||
|
if lr_scheduler is not None:
|
||||||
|
LOG.info(f"Using plugin-created lr_scheduler: {lr_scheduler}")
|
||||||
|
self.lr_scheduler = lr_scheduler
|
||||||
|
elif self.args.alternate_lr_scheduler_type == "one_cycle":
|
||||||
num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
|
num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
|
||||||
pct_start = num_warmup_steps / num_training_steps
|
pct_start = num_warmup_steps / num_training_steps
|
||||||
extra_lr_kwargs = {}
|
extra_lr_kwargs = {}
|
||||||
@@ -110,4 +120,4 @@ class SchedulerMixin(Trainer):
|
|||||||
if use_cosine_min_lr:
|
if use_cosine_min_lr:
|
||||||
LOG.warning("axolotl's cosine scheduler with min lr not used (e.g., because of deepspeed).")
|
LOG.warning("axolotl's cosine scheduler with min lr not used (e.g., because of deepspeed).")
|
||||||
|
|
||||||
return self.lr_scheduler
|
return self.lr_scheduler # type: ignore
|
||||||
|
|||||||
@@ -1,14 +1,12 @@
|
|||||||
"""Module for Axolotl trainer sequence parallelism mixin"""
|
"""Module for Axolotl trainer sequence parallelism mixin"""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import torch.distributed as dist
|
import torch.distributed as dist
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
from torch.utils.data import DistributedSampler, Sampler
|
from torch.utils.data import DistributedSampler, Sampler
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn import get_ring_attn_group
|
from axolotl.monkeypatch.attention.ring_attn import (
|
||||||
|
get_ring_attn_group,
|
||||||
LOG = logging.getLogger(__name__)
|
)
|
||||||
|
|
||||||
|
|
||||||
class SequenceParallelMixin:
|
class SequenceParallelMixin:
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""Module for ReLoRA trainer"""
|
"""Module for ReLoRA trainer"""
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from torch.optim.lr_scheduler import LRScheduler
|
||||||
|
|
||||||
from axolotl.core.trainers.base import AxolotlTrainer
|
from axolotl.core.trainers.base import AxolotlTrainer
|
||||||
from axolotl.monkeypatch.relora import ReLoRAScheduler
|
from axolotl.monkeypatch.relora import ReLoRAScheduler
|
||||||
@@ -19,9 +20,11 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
self,
|
self,
|
||||||
num_training_steps: int,
|
num_training_steps: int,
|
||||||
optimizer: torch.optim.Optimizer | None = None,
|
optimizer: torch.optim.Optimizer | None = None,
|
||||||
):
|
) -> LRScheduler:
|
||||||
optimizer = self.optimizer if optimizer is None else optimizer
|
optimizer = self.optimizer if optimizer is None else optimizer
|
||||||
lr_scheduler = super().create_scheduler(num_training_steps, optimizer)
|
lr_scheduler: LRScheduler = super().create_scheduler(
|
||||||
|
num_training_steps, optimizer
|
||||||
|
)
|
||||||
|
|
||||||
if self.args.relora_steps:
|
if self.args.relora_steps:
|
||||||
warmup_steps = (
|
warmup_steps = (
|
||||||
@@ -30,7 +33,7 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
anneal_steps = (
|
anneal_steps = (
|
||||||
self.args.relora_anneal_steps if self.args.relora_anneal_steps else 1
|
self.args.relora_anneal_steps if self.args.relora_anneal_steps else 1
|
||||||
)
|
)
|
||||||
self.lr_scheduler = ReLoRAScheduler(
|
self.lr_scheduler = ReLoRAScheduler( # type: ignore
|
||||||
optimizer,
|
optimizer,
|
||||||
lr_scheduler,
|
lr_scheduler,
|
||||||
self.args.relora_steps,
|
self.args.relora_steps,
|
||||||
@@ -38,6 +41,6 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
warmup_steps,
|
warmup_steps,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.lr_scheduler = lr_scheduler
|
self.lr_scheduler = lr_scheduler # type: ignore
|
||||||
|
|
||||||
return self.lr_scheduler
|
return self.lr_scheduler # type: ignore
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from PIL.Image import Resampling
|
|||||||
from transformers import TrainingArguments
|
from transformers import TrainingArguments
|
||||||
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
||||||
|
|
||||||
from axolotl.monkeypatch.attention.ring_attn.patch import RingAttnFunc
|
from axolotl.utils.schemas.enums import RingAttnFunc
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -227,6 +227,19 @@ class AxolotlTrainingMixins:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
adam_beta3: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "The beta3 hyperparameter used in some optimizers such as CAME"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
adam_epsilon2: Optional[float] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={
|
||||||
|
"help": "The epsilon2 hyperparameter used in some optimizers such as CAME"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
# multi-modal section
|
# multi-modal section
|
||||||
|
|
||||||
image_size: int | tuple[int, int] | None = field(
|
image_size: int | tuple[int, int] | None = field(
|
||||||
|
|||||||
@@ -11,20 +11,19 @@ from accelerate.logging import get_logger
|
|||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
from axolotl.train import (
|
||||||
from axolotl.train import TrainDatasetMeta
|
TrainDatasetMeta,
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
setup_model_and_tokenizer,
|
||||||
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import cleanup_distributed
|
from axolotl.utils.distributed import cleanup_distributed
|
||||||
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
|
||||||
from axolotl.utils.trainer import setup_trainer
|
from axolotl.utils.trainer import setup_trainer
|
||||||
|
|
||||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
src_dir = os.path.join(project_root, "src")
|
src_dir = os.path.join(project_root, "src")
|
||||||
sys.path.insert(0, src_dir)
|
sys.path.insert(0, src_dir)
|
||||||
|
|
||||||
configure_logging()
|
LOG = get_logger(__name__)
|
||||||
LOG = get_logger("axolotl.evaluate")
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_dataset(
|
def evaluate_dataset(
|
||||||
@@ -75,37 +74,22 @@ def evaluate(*, cfg: DictDefault, dataset_meta: TrainDatasetMeta) -> Dict[str, f
|
|||||||
Returns:
|
Returns:
|
||||||
Dictionary mapping metric names to their values.
|
Dictionary mapping metric names to their values.
|
||||||
"""
|
"""
|
||||||
# pylint: disable=duplicate-code
|
# Load tokenizer, processor and model
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
LOG.debug("loading model for evaluation...")
|
||||||
set_pytorch_cuda_alloc_conf()
|
model, tokenizer, _, processor = setup_model_and_tokenizer(cfg)
|
||||||
|
|
||||||
# Load tokenizer
|
|
||||||
LOG.debug(
|
|
||||||
f"loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}",
|
|
||||||
main_process_only=True,
|
|
||||||
)
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
|
|
||||||
# Load processor for multimodal models if needed
|
|
||||||
processor = None
|
|
||||||
if cfg.is_multimodal:
|
|
||||||
processor = load_processor(cfg, tokenizer)
|
|
||||||
|
|
||||||
# Get datasets
|
# Get datasets
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
train_dataset = dataset_meta.train_dataset
|
train_dataset = dataset_meta.train_dataset
|
||||||
eval_dataset = dataset_meta.eval_dataset
|
eval_dataset = dataset_meta.eval_dataset
|
||||||
total_num_steps = dataset_meta.total_num_steps
|
total_num_steps = dataset_meta.total_num_steps
|
||||||
|
|
||||||
# Load model
|
|
||||||
LOG.debug("loading model for evaluation...")
|
|
||||||
model, _ = load_model(cfg, tokenizer, processor=processor)
|
|
||||||
|
|
||||||
# Set up trainer
|
# Set up trainer
|
||||||
trainer = setup_trainer(
|
trainer = setup_trainer(
|
||||||
cfg,
|
cfg=cfg,
|
||||||
train_dataset=train_dataset,
|
train_dataset=train_dataset,
|
||||||
eval_dataset=eval_dataset,
|
eval_dataset=eval_dataset,
|
||||||
model=(model, None, None), # No need for model_ref or peft_config
|
model=model,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
total_num_steps=total_num_steps,
|
total_num_steps=total_num_steps,
|
||||||
|
|||||||
@@ -24,6 +24,9 @@ import logging
|
|||||||
from typing import OrderedDict
|
from typing import OrderedDict
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from torch.optim.lr_scheduler import LRScheduler
|
||||||
|
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
|
||||||
class BasePlugin:
|
class BasePlugin:
|
||||||
@@ -35,12 +38,15 @@ class BasePlugin:
|
|||||||
|
|
||||||
Methods:
|
Methods:
|
||||||
register(cfg): Registers the plugin with the given configuration.
|
register(cfg): Registers the plugin with the given configuration.
|
||||||
|
load_datasets(cfg): Loads and preprocesses the dataset for training.
|
||||||
pre_model_load(cfg): Performs actions before the model is loaded.
|
pre_model_load(cfg): Performs actions before the model is loaded.
|
||||||
post_model_load(cfg, model): Performs actions after the model is loaded.
|
post_model_build(cfg, model): Performs actions after the model is loaded, but before LoRA adapters are applied.
|
||||||
pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.
|
pre_lora_load(cfg, model): Performs actions before LoRA weights are loaded.
|
||||||
post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
||||||
|
post_model_load(cfg, model): Performs actions after the model is loaded, inclusive of any adapters.
|
||||||
|
post_trainer_create(cfg, trainer): Performs actions after the trainer is created.
|
||||||
create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
||||||
create_lr_scheduler(cfg, trainer, optimizer): Creates and returns a learning rate scheduler.
|
create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and returns a learning rate scheduler.
|
||||||
add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before training.
|
add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before training.
|
||||||
add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after training.
|
add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after training.
|
||||||
"""
|
"""
|
||||||
@@ -61,106 +67,139 @@ class BasePlugin:
|
|||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_input_args(self):
|
def get_input_args(self) -> str | None:
|
||||||
"""
|
"""
|
||||||
Returns a pydantic model for the plugin's input arguments.
|
Returns a pydantic model for the plugin's input arguments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def load_datasets(self, cfg: DictDefault, preprocess: bool = False):
|
||||||
|
"""
|
||||||
|
Loads and preprocesses the dataset for training.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg: The configuration for the plugin.
|
||||||
|
preprocess: Whether this is the preprocess step of the datasets.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dataset_meta: The metadata for the training dataset.
|
||||||
|
"""
|
||||||
|
|
||||||
def pre_model_load(self, cfg): # pylint: disable=unused-argument
|
def pre_model_load(self, cfg): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Performs actions before the model is loaded.
|
Performs actions before the model is loaded.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
|
"""
|
||||||
|
|
||||||
|
def post_model_build(self, cfg, model): # pylint: disable=unused-argument
|
||||||
|
"""
|
||||||
|
Performs actions after the model is built/loaded, but before any adapters are applied.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (dict): The configuration for the plugin.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def post_model_load(self, cfg, model): # pylint: disable=unused-argument
|
def post_model_load(self, cfg, model): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Performs actions after the model is loaded.
|
Performs actions after the model is loaded.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
model (object): The loaded model.
|
model (object): The loaded model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def pre_lora_load(self, cfg, model): # pylint: disable=unused-argument
|
def pre_lora_load(self, cfg, model): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Performs actions before LoRA weights are loaded.
|
Performs actions before LoRA weights are loaded.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
model (object): The loaded model.
|
model (object): The loaded model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def post_lora_load(self, cfg, model): # pylint: disable=unused-argument
|
def post_lora_load(self, cfg, model): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Performs actions after LoRA weights are loaded.
|
Performs actions after LoRA weights are loaded.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
model (object): The loaded model.
|
model (object): The loaded model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_trainer_cls(self, cfg): # pylint: disable=unused-argument):
|
def get_trainer_cls(self, cfg): # pylint: disable=unused-argument):
|
||||||
"""
|
"""
|
||||||
Returns a custom class for the trainer.
|
Returns a custom class for the trainer.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The global axolotl configuration.
|
cfg (dict): The global axolotl configuration.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
class: The class for the trainer.
|
class: The class for the trainer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def post_trainer_create(self, cfg, trainer): # pylint: disable=unused-argument
|
||||||
|
"""
|
||||||
|
Performs actions after the trainer is created.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (dict): The configuration for the plugin.
|
||||||
|
trainer (object): The trainer object for training.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def create_optimizer(self, cfg, trainer): # pylint: disable=unused-argument
|
def create_optimizer(self, cfg, trainer): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Creates and returns an optimizer for training.
|
Creates and returns an optimizer for training.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
object: The created optimizer.
|
object: The created optimizer.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def create_lr_scheduler(
|
def create_lr_scheduler(
|
||||||
self, cfg, trainer, optimizer
|
self, cfg, trainer, optimizer, num_training_steps
|
||||||
): # pylint: disable=unused-argument
|
) -> LRScheduler | None: # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Creates and returns a learning rate scheduler.
|
Creates and returns a learning rate scheduler.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
optimizer (object): The optimizer for training.
|
optimizer (object): The optimizer for training.
|
||||||
|
num_training_steps (int): Total number of training steps
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
object: The created learning rate scheduler.
|
object (LRScheduler): The created learning rate scheduler.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def add_callbacks_pre_trainer(self, cfg, model): # pylint: disable=unused-argument
|
def add_callbacks_pre_trainer(self, cfg, model): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
setup callbacks before creating the trainer.
|
setup callbacks before creating the trainer.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
model (object): The loaded model.
|
model (object): The loaded model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[callable]: A list of callback functions to be added to the TrainingArgs
|
List[callable]: A list of callback functions to be added to the TrainingArgs
|
||||||
"""
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@@ -171,12 +210,12 @@ class BasePlugin:
|
|||||||
Adds callbacks to the trainer after creating the trainer.
|
Adds callbacks to the trainer after creating the trainer.
|
||||||
This is useful for callbacks that require access to the model or trainer.
|
This is useful for callbacks that require access to the model or trainer.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[callable]: A list of callback functions to be added
|
List[callable]: A list of callback functions to be added
|
||||||
"""
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@@ -184,23 +223,23 @@ class BasePlugin:
|
|||||||
"""
|
"""
|
||||||
Performs actions after training is complete.
|
Performs actions after training is complete.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The axolotl configuration
|
cfg (dict): The axolotl configuration
|
||||||
model (object): The loaded model.
|
model (object): The loaded model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def post_train_unload(self, cfg): # pylint: disable=unused-argument
|
def post_train_unload(self, cfg): # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Performs actions after training is complete and the model is unloaded.
|
Performs actions after training is complete and the model is unloaded.
|
||||||
|
|
||||||
Parameters:
|
Args:
|
||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@@ -261,6 +300,7 @@ class PluginManager:
|
|||||||
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
|
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
|
||||||
|
|
||||||
_instance = None
|
_instance = None
|
||||||
|
_cfg = None
|
||||||
|
|
||||||
def __new__(cls):
|
def __new__(cls):
|
||||||
"""
|
"""
|
||||||
@@ -268,7 +308,9 @@ class PluginManager:
|
|||||||
"""
|
"""
|
||||||
if cls._instance is None:
|
if cls._instance is None:
|
||||||
cls._instance = super(PluginManager, cls).__new__(cls)
|
cls._instance = super(PluginManager, cls).__new__(cls)
|
||||||
cls._instance.plugins = collections.OrderedDict()
|
cls._instance.plugins: OrderedDict[str, BasePlugin] = (
|
||||||
|
collections.OrderedDict()
|
||||||
|
)
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -281,6 +323,14 @@ class PluginManager:
|
|||||||
PluginManager()
|
PluginManager()
|
||||||
return PluginManager._instance # type: ignore
|
return PluginManager._instance # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cfg(self):
|
||||||
|
return self._cfg
|
||||||
|
|
||||||
|
@cfg.setter
|
||||||
|
def cfg(self, cfg):
|
||||||
|
self._cfg = cfg
|
||||||
|
|
||||||
def register(self, plugin_name: str):
|
def register(self, plugin_name: str):
|
||||||
"""
|
"""
|
||||||
Registers a new plugin by its name.
|
Registers a new plugin by its name.
|
||||||
@@ -316,6 +366,27 @@ class PluginManager:
|
|||||||
input_args.append(input_args_from_plugin)
|
input_args.append(input_args_from_plugin)
|
||||||
return input_args
|
return input_args
|
||||||
|
|
||||||
|
def load_datasets(self, cfg, preprocess: bool = False):
|
||||||
|
"""
|
||||||
|
Calls the load_datasets method of each registered plugin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg: The configuration for the plugins.
|
||||||
|
preprocess : Whether this is preprocess step of the datasets.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dataset_meta: The dataset metadata loaded from all registered plugins.
|
||||||
|
"""
|
||||||
|
return_ds_meta = None
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
dataset_meta = plugin.load_datasets(cfg, preprocess)
|
||||||
|
if dataset_meta is not None:
|
||||||
|
if return_ds_meta is None:
|
||||||
|
return_ds_meta = dataset_meta
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Multiple plugins loaded datasets")
|
||||||
|
return return_ds_meta
|
||||||
|
|
||||||
def pre_model_load(self, cfg):
|
def pre_model_load(self, cfg):
|
||||||
"""
|
"""
|
||||||
Calls the pre_model_load method of all registered plugins.
|
Calls the pre_model_load method of all registered plugins.
|
||||||
@@ -329,9 +400,22 @@ class PluginManager:
|
|||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
plugin.pre_model_load(cfg)
|
plugin.pre_model_load(cfg)
|
||||||
|
|
||||||
|
def post_model_build(self, cfg, model):
|
||||||
|
"""
|
||||||
|
Calls the post_model_build method of all registered plugins after the model has been built/loaded,
|
||||||
|
but before any adapters have been applied.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (dict): The configuration for the plugins.
|
||||||
|
model (object): The loaded model.
|
||||||
|
"""
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
plugin.post_model_build(cfg, model)
|
||||||
|
|
||||||
def post_model_load(self, cfg, model):
|
def post_model_load(self, cfg, model):
|
||||||
"""
|
"""
|
||||||
Calls the post_model_load method of all registered plugins.
|
Calls the post_model_load method of all registered plugins after the model has been loaded
|
||||||
|
inclusive of any adapters
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
cfg (dict): The configuration for the plugins.
|
cfg (dict): The configuration for the plugins.
|
||||||
@@ -387,29 +471,43 @@ class PluginManager:
|
|||||||
return trainer_cls
|
return trainer_cls
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_optimizer(self, cfg, trainer):
|
def post_trainer_create(self, cfg, trainer):
|
||||||
"""
|
"""
|
||||||
Calls the create_optimizer method of all registered plugins and returns the first non-None optimizer.
|
Calls the post_trainer_create method of all registered plugins.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
cfg (dict): The configuration for the plugins.
|
cfg (dict): The configuration for the plugins.
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
plugin.post_trainer_create(cfg, trainer)
|
||||||
|
|
||||||
|
def create_optimizer(self, trainer):
|
||||||
|
"""
|
||||||
|
Calls the create_optimizer method of all registered plugins and returns the first non-None optimizer.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
trainer (object): The trainer object for training.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
object: The created optimizer, or None if none was found.
|
object: The created optimizer, or None if none was found.
|
||||||
"""
|
"""
|
||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
optimizer = plugin.create_optimizer(cfg, trainer)
|
optimizer = plugin.create_optimizer(self.cfg, trainer)
|
||||||
if optimizer is not None:
|
if optimizer is not None:
|
||||||
return optimizer
|
return optimizer
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_lr_scheduler(self, cfg, trainer, optimizer):
|
def create_lr_scheduler(
|
||||||
|
self, trainer, optimizer, num_training_steps
|
||||||
|
) -> LRScheduler | None:
|
||||||
"""
|
"""
|
||||||
Calls the create_lr_scheduler method of all registered plugins and returns the first non-None scheduler.
|
Calls the create_lr_scheduler method of all registered plugins and returns the first non-None scheduler.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
cfg (dict): The configuration for the plugins.
|
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
optimizer (object): The optimizer for training.
|
optimizer (object): The optimizer for training.
|
||||||
|
|
||||||
@@ -417,7 +515,12 @@ class PluginManager:
|
|||||||
object: The created learning rate scheduler, or None if none was found.
|
object: The created learning rate scheduler, or None if none was found.
|
||||||
"""
|
"""
|
||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
scheduler = plugin.create_lr_scheduler(cfg, trainer, optimizer)
|
scheduler: LRScheduler | None = plugin.create_lr_scheduler(
|
||||||
|
self.cfg,
|
||||||
|
trainer=trainer,
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_training_steps=num_training_steps,
|
||||||
|
)
|
||||||
if scheduler is not None:
|
if scheduler is not None:
|
||||||
return scheduler
|
return scheduler
|
||||||
return None
|
return None
|
||||||
@@ -458,6 +561,20 @@ class PluginManager:
|
|||||||
callbacks.extend(plugin_callbacks)
|
callbacks.extend(plugin_callbacks)
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
|
def post_train(self, cfg, model):
|
||||||
|
"""
|
||||||
|
Calls the post_train method of all registered plugins.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
cfg (dict): The configuration for the plugins.
|
||||||
|
model (object): The loaded model.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
for plugin in self.plugins.values():
|
||||||
|
plugin.post_train(cfg, model)
|
||||||
|
|
||||||
def post_train_unload(self, cfg):
|
def post_train_unload(self, cfg):
|
||||||
"""
|
"""
|
||||||
Calls the post_train_unload method of all registered plugins.
|
Calls the post_train_unload method of all registered plugins.
|
||||||
|
|||||||
@@ -27,15 +27,13 @@ pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transform
|
|||||||
```yaml
|
```yaml
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||||
|
|
||||||
cut_cross_entropy: true
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Supported Models
|
## Supported Models
|
||||||
|
|
||||||
- llama
|
- llama
|
||||||
- llama4_text
|
|
||||||
- llama4
|
- llama4
|
||||||
|
- llama4_text
|
||||||
- mllama
|
- mllama
|
||||||
- phi3
|
- phi3
|
||||||
- gemma
|
- gemma
|
||||||
@@ -45,6 +43,11 @@ cut_cross_entropy: true
|
|||||||
- mistral
|
- mistral
|
||||||
- mistral3
|
- mistral3
|
||||||
- qwen2
|
- qwen2
|
||||||
|
- qwen2_moe
|
||||||
|
- qwen2_vl
|
||||||
|
- qwen2_5_vl
|
||||||
|
- qwen3
|
||||||
|
- qwen3_moe
|
||||||
- cohere
|
- cohere
|
||||||
- cohere2
|
- cohere2
|
||||||
- glm
|
- glm
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ import torch
|
|||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
from axolotl.integrations.base import BasePlugin
|
||||||
from axolotl.utils import get_pytorch_version
|
from axolotl.utils import get_pytorch_version
|
||||||
from axolotl.utils.distributed import zero_only
|
from axolotl.utils.distributed import is_main_process
|
||||||
|
|
||||||
from .args import CutCrossEntropyArgs # pylint: disable=unused-import. # noqa: F401
|
from .args import CutCrossEntropyArgs # pylint: disable=unused-import. # noqa: F401
|
||||||
|
|
||||||
@@ -76,7 +76,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
cce_patch,
|
cce_patch,
|
||||||
)
|
)
|
||||||
|
|
||||||
with zero_only():
|
if is_main_process(use_environ=True):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
f"Applying Cut Cross Entropy to model type: {cfg.model_config_type}"
|
f"Applying Cut Cross Entropy to model type: {cfg.model_config_type}"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ class CutCrossEntropyArgs(BaseModel):
|
|||||||
Input args for Cut Cross Entropy.
|
Input args for Cut Cross Entropy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cut_cross_entropy: Optional[bool] = None
|
cut_cross_entropy: Optional[bool] = True
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
@@ -20,25 +20,15 @@ from cut_cross_entropy.transformers.utils import (
|
|||||||
from transformers.cache_utils import Cache
|
from transformers.cache_utils import Cache
|
||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||||
from transformers.models.cohere.modeling_cohere import (
|
from transformers.models.cohere.modeling_cohere import (
|
||||||
_CONFIG_FOR_DOC,
|
|
||||||
COHERE_INPUTS_DOCSTRING,
|
|
||||||
KwargsForCausalLM,
|
KwargsForCausalLM,
|
||||||
)
|
)
|
||||||
from transformers.processing_utils import Unpack
|
from transformers.processing_utils import Unpack
|
||||||
from transformers.utils import (
|
|
||||||
add_start_docstrings_to_model_forward,
|
|
||||||
replace_return_docstrings,
|
|
||||||
)
|
|
||||||
from transformers.utils.deprecation import deprecate_kwarg
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
|
||||||
_PATCH_OPTS: PatchOptions | None = None
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
@add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward(
|
def cce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None,
|
input_ids: torch.LongTensor | None = None,
|
||||||
|
|||||||
@@ -17,25 +17,15 @@ from cut_cross_entropy.transformers.utils import (
|
|||||||
from transformers.cache_utils import Cache
|
from transformers.cache_utils import Cache
|
||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||||
from transformers.models.gemma.modeling_gemma import (
|
from transformers.models.gemma.modeling_gemma import (
|
||||||
_CONFIG_FOR_DOC,
|
|
||||||
GEMMA_INPUTS_DOCSTRING,
|
|
||||||
KwargsForCausalLM,
|
KwargsForCausalLM,
|
||||||
)
|
)
|
||||||
from transformers.processing_utils import Unpack
|
from transformers.processing_utils import Unpack
|
||||||
from transformers.utils import (
|
|
||||||
add_start_docstrings_to_model_forward,
|
|
||||||
replace_return_docstrings,
|
|
||||||
)
|
|
||||||
from transformers.utils.deprecation import deprecate_kwarg
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
|
||||||
_PATCH_OPTS: PatchOptions | None = None
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
@add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward(
|
def cce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None,
|
input_ids: torch.LongTensor | None = None,
|
||||||
|
|||||||
@@ -20,15 +20,11 @@ from torch import nn
|
|||||||
from transformers.cache_utils import Cache, HybridCache
|
from transformers.cache_utils import Cache, HybridCache
|
||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||||
from transformers.models.gemma3.modeling_gemma3 import (
|
from transformers.models.gemma3.modeling_gemma3 import (
|
||||||
_CONFIG_FOR_DOC,
|
|
||||||
GEMMA3_INPUTS_DOCSTRING,
|
|
||||||
Gemma3CausalLMOutputWithPast,
|
Gemma3CausalLMOutputWithPast,
|
||||||
logger,
|
logger,
|
||||||
)
|
)
|
||||||
from transformers.utils import (
|
from transformers.utils import (
|
||||||
add_start_docstrings_to_model_forward,
|
|
||||||
is_torchdynamo_compiling,
|
is_torchdynamo_compiling,
|
||||||
replace_return_docstrings,
|
|
||||||
)
|
)
|
||||||
from transformers.utils.deprecation import deprecate_kwarg
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
|
||||||
@@ -38,10 +34,6 @@ _PATCH_OPTS: PatchOptions | None = None
|
|||||||
|
|
||||||
|
|
||||||
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
@add_start_docstrings_to_model_forward(GEMMA3_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward(
|
def cce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None,
|
input_ids: torch.LongTensor | None = None,
|
||||||
@@ -170,10 +162,6 @@ def cce_forward(
|
|||||||
|
|
||||||
|
|
||||||
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
@add_start_docstrings_to_model_forward(GEMMA3_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=Gemma3CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward_multimodal(
|
def cce_forward_multimodal(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None,
|
input_ids: torch.LongTensor | None = None,
|
||||||
|
|||||||
164
src/axolotl/integrations/cut_cross_entropy/monkeypatch/llama.py
Normal file
164
src/axolotl/integrations/cut_cross_entropy/monkeypatch/llama.py
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
"""Llama CCE patch. Adapted from transformers v4.51.2"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
apply_lce,
|
||||||
|
)
|
||||||
|
from transformers.cache_utils import Cache
|
||||||
|
from transformers.modeling_outputs import (
|
||||||
|
BaseModelOutputWithPast,
|
||||||
|
CausalLMOutputWithPast,
|
||||||
|
)
|
||||||
|
from transformers.models.llama.modeling_llama import (
|
||||||
|
KwargsForCausalLM,
|
||||||
|
)
|
||||||
|
from transformers.processing_utils import Unpack
|
||||||
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
from transformers.utils.generic import can_return_tuple
|
||||||
|
|
||||||
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@can_return_tuple
|
||||||
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
|
def cce_forward(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[Cache] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
||||||
|
**kwargs: Unpack[KwargsForCausalLM],
|
||||||
|
) -> CausalLMOutputWithPast:
|
||||||
|
r"""
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
||||||
|
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
||||||
|
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
||||||
|
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
||||||
|
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
||||||
|
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, LlamaForCausalLM
|
||||||
|
|
||||||
|
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||||
|
|
||||||
|
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> # Generate
|
||||||
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||||
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||||
|
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
||||||
|
```"""
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
|
||||||
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||||
|
outputs: BaseModelOutputWithPast = self.model(
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
cache_position=cache_position,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs.last_hidden_state
|
||||||
|
if hidden_states is None:
|
||||||
|
raise ValueError("hidden_states is None")
|
||||||
|
|
||||||
|
loss = None
|
||||||
|
logits = None
|
||||||
|
|
||||||
|
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
||||||
|
slice_indices = (
|
||||||
|
slice(-logits_to_keep, None)
|
||||||
|
if isinstance(logits_to_keep, int)
|
||||||
|
else logits_to_keep
|
||||||
|
)
|
||||||
|
if _PATCH_OPTS is not None and _PATCH_OPTS.use_lce(labels, self.training):
|
||||||
|
assert labels is not None
|
||||||
|
loss = apply_lce(
|
||||||
|
hidden_states[:, slice_indices, :],
|
||||||
|
self.lm_head.weight,
|
||||||
|
labels,
|
||||||
|
_PATCH_OPTS,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
||||||
|
|
||||||
|
if labels is not None:
|
||||||
|
loss = self.loss_function(
|
||||||
|
logits=logits,
|
||||||
|
labels=labels,
|
||||||
|
vocab_size=self.config.vocab_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
return CausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_llama(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
"""Patch Llama for CCE."""
|
||||||
|
global _PATCH_OPTS # pylint: disable=global-statement
|
||||||
|
from transformers.models.llama import modeling_llama
|
||||||
|
|
||||||
|
_PATCH_OPTS = patch_options
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_llama.LlamaForCausalLM
|
||||||
|
), f"Expected a LlamaForCausalLM model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(cce_forward, maybe_model)
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_llama.LlamaForCausalLM.forward = cce_forward
|
||||||
|
return None
|
||||||
@@ -16,22 +16,12 @@ from torch import nn
|
|||||||
from transformers.cache_utils import Cache
|
from transformers.cache_utils import Cache
|
||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||||
from transformers.models.llama4.modeling_llama4 import (
|
from transformers.models.llama4.modeling_llama4 import (
|
||||||
_CONFIG_FOR_DOC,
|
|
||||||
LLAMA4_INPUTS_DOCSTRING,
|
|
||||||
Llama4CausalLMOutputWithPast,
|
Llama4CausalLMOutputWithPast,
|
||||||
)
|
)
|
||||||
from transformers.utils import (
|
|
||||||
add_start_docstrings_to_model_forward,
|
|
||||||
replace_return_docstrings,
|
|
||||||
)
|
|
||||||
|
|
||||||
_PATCH_OPTS: PatchOptions | None = None
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
@add_start_docstrings_to_model_forward(LLAMA4_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward(
|
def cce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None,
|
input_ids: torch.LongTensor | None = None,
|
||||||
@@ -160,9 +150,6 @@ def cce_forward(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=Llama4CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward_multimodal(
|
def cce_forward_multimodal(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None, # type: ignore
|
input_ids: torch.LongTensor | None = None, # type: ignore
|
||||||
|
|||||||
@@ -19,15 +19,11 @@ from transformers.models.mistral3.modeling_mistral3 import (
|
|||||||
Mistral3CausalLMOutputWithPast,
|
Mistral3CausalLMOutputWithPast,
|
||||||
)
|
)
|
||||||
from transformers.models.mistral.modeling_mistral import (
|
from transformers.models.mistral.modeling_mistral import (
|
||||||
_CONFIG_FOR_DOC,
|
|
||||||
MISTRAL_INPUTS_DOCSTRING,
|
|
||||||
KwargsForCausalLM,
|
KwargsForCausalLM,
|
||||||
)
|
)
|
||||||
from transformers.processing_utils import Unpack
|
from transformers.processing_utils import Unpack
|
||||||
from transformers.utils import (
|
from transformers.utils import (
|
||||||
add_start_docstrings_to_model_forward,
|
|
||||||
is_torchdynamo_compiling,
|
is_torchdynamo_compiling,
|
||||||
replace_return_docstrings,
|
|
||||||
)
|
)
|
||||||
from transformers.utils.deprecation import deprecate_kwarg
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
|
||||||
@@ -35,10 +31,6 @@ _PATCH_OPTS: PatchOptions | None = None
|
|||||||
|
|
||||||
|
|
||||||
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def cce_forward(
|
def cce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor | None = None,
|
input_ids: torch.LongTensor | None = None,
|
||||||
|
|||||||
@@ -5,9 +5,7 @@
|
|||||||
import transformers
|
import transformers
|
||||||
from cut_cross_entropy.cce_utils import LinearCrossEntropyImpl
|
from cut_cross_entropy.cce_utils import LinearCrossEntropyImpl
|
||||||
from cut_cross_entropy.linear_cross_entropy import LCE_IMPL_DEFAULT
|
from cut_cross_entropy.linear_cross_entropy import LCE_IMPL_DEFAULT
|
||||||
from cut_cross_entropy.transformers.llama import patch_llama
|
|
||||||
from cut_cross_entropy.transformers.phi3 import patch_phi3
|
from cut_cross_entropy.transformers.phi3 import patch_phi3
|
||||||
from cut_cross_entropy.transformers.qwen2 import patch_qwen2
|
|
||||||
from cut_cross_entropy.transformers.utils import PatchOptions, TransformersModelT
|
from cut_cross_entropy.transformers.utils import PatchOptions, TransformersModelT
|
||||||
|
|
||||||
from axolotl.integrations.cut_cross_entropy.monkeypatch.cohere import (
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.cohere import (
|
||||||
@@ -24,6 +22,9 @@ from axolotl.integrations.cut_cross_entropy.monkeypatch.glm4 import (
|
|||||||
patch_glm,
|
patch_glm,
|
||||||
patch_glm4,
|
patch_glm4,
|
||||||
)
|
)
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.llama import (
|
||||||
|
patch_llama,
|
||||||
|
)
|
||||||
from axolotl.integrations.cut_cross_entropy.monkeypatch.llama4 import (
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.llama4 import (
|
||||||
patch_llama4,
|
patch_llama4,
|
||||||
patch_llama4_text,
|
patch_llama4_text,
|
||||||
@@ -33,6 +34,22 @@ from axolotl.integrations.cut_cross_entropy.monkeypatch.mistral3 import (
|
|||||||
patch_mistral3,
|
patch_mistral3,
|
||||||
)
|
)
|
||||||
from axolotl.integrations.cut_cross_entropy.monkeypatch.mllama import patch_mllama
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.mllama import patch_mllama
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.qwen2 import (
|
||||||
|
patch_qwen2,
|
||||||
|
)
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.qwen2_5_vl import (
|
||||||
|
patch_qwen2_5_vl,
|
||||||
|
)
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.qwen2_moe import (
|
||||||
|
patch_qwen2_moe,
|
||||||
|
)
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.qwen2_vl import (
|
||||||
|
patch_qwen2_vl,
|
||||||
|
)
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.qwen3 import patch_qwen3
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.qwen3_moe import (
|
||||||
|
patch_qwen3_moe,
|
||||||
|
)
|
||||||
|
|
||||||
CUT_CROSS_ENTROPY_MODEL_MAPPING = {
|
CUT_CROSS_ENTROPY_MODEL_MAPPING = {
|
||||||
"llama": patch_llama,
|
"llama": patch_llama,
|
||||||
@@ -47,6 +64,11 @@ CUT_CROSS_ENTROPY_MODEL_MAPPING = {
|
|||||||
"mistral": patch_mistral,
|
"mistral": patch_mistral,
|
||||||
"mistral3": patch_mistral3,
|
"mistral3": patch_mistral3,
|
||||||
"qwen2": patch_qwen2,
|
"qwen2": patch_qwen2,
|
||||||
|
"qwen2_moe": patch_qwen2_moe,
|
||||||
|
"qwen2_vl": patch_qwen2_vl,
|
||||||
|
"qwen2_5_vl": patch_qwen2_5_vl,
|
||||||
|
"qwen3": patch_qwen3,
|
||||||
|
"qwen3_moe": patch_qwen3_moe,
|
||||||
"cohere": patch_cohere,
|
"cohere": patch_cohere,
|
||||||
"cohere2": patch_cohere2,
|
"cohere2": patch_cohere2,
|
||||||
"glm": patch_glm,
|
"glm": patch_glm,
|
||||||
|
|||||||
@@ -0,0 +1,37 @@
|
|||||||
|
"""Qwen2 CCE patch. The model inherits Llama's modeling code and uses the same forward method."""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_qwen2(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
from transformers.models.qwen2 import modeling_qwen2
|
||||||
|
|
||||||
|
# Set the _PATCH_OPTS in the llama patch file
|
||||||
|
import axolotl.integrations.cut_cross_entropy.monkeypatch.llama as llama_patch
|
||||||
|
|
||||||
|
llama_patch._PATCH_OPTS = patch_options # pylint: disable=protected-access
|
||||||
|
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.llama import (
|
||||||
|
cce_forward,
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_qwen2.Qwen2ForCausalLM
|
||||||
|
), f"Expected a Qwen2ForCausalLM model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(cce_forward, maybe_model)
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_qwen2.Qwen2ForCausalLM.forward = cce_forward
|
||||||
|
return None
|
||||||
@@ -0,0 +1,246 @@
|
|||||||
|
"""Qwen2.5 VL CCE patch. Adapted from transformers v4.51.2"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
apply_lce,
|
||||||
|
)
|
||||||
|
from torch.nn import CrossEntropyLoss
|
||||||
|
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
|
||||||
|
Qwen2_5_VLCausalLMOutputWithPast,
|
||||||
|
)
|
||||||
|
|
||||||
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def cce_forward_multimodal(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
return_dict: Optional[bool] = None,
|
||||||
|
pixel_values: Optional[torch.Tensor] = None,
|
||||||
|
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
||||||
|
image_grid_thw: Optional[torch.LongTensor] = None,
|
||||||
|
video_grid_thw: Optional[torch.LongTensor] = None,
|
||||||
|
rope_deltas: Optional[torch.LongTensor] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
second_per_grid_ts: Optional[torch.Tensor] = None,
|
||||||
|
) -> Union[Tuple, Qwen2_5_VLCausalLMOutputWithPast]:
|
||||||
|
r"""
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from PIL import Image
|
||||||
|
>>> import requests
|
||||||
|
>>> from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
|
||||||
|
|
||||||
|
>>> model = Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
|
||||||
|
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "image"},
|
||||||
|
{"type": "text", "text": "What is shown in this image?"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
|
||||||
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
|
||||||
|
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||||
|
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
|
||||||
|
|
||||||
|
>>> # Generate
|
||||||
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||||
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||||
|
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
|
||||||
|
```"""
|
||||||
|
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
return_dict = (
|
||||||
|
return_dict if return_dict is not None else self.config.use_return_dict
|
||||||
|
)
|
||||||
|
|
||||||
|
if inputs_embeds is None:
|
||||||
|
inputs_embeds = self.model.embed_tokens(input_ids)
|
||||||
|
if pixel_values is not None:
|
||||||
|
pixel_values = pixel_values.type(self.visual.dtype)
|
||||||
|
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
|
||||||
|
n_image_tokens = (input_ids == self.config.image_token_id).sum().item()
|
||||||
|
n_image_features = image_embeds.shape[0]
|
||||||
|
if n_image_tokens != n_image_features:
|
||||||
|
raise ValueError(
|
||||||
|
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
|
||||||
|
)
|
||||||
|
|
||||||
|
mask = input_ids == self.config.image_token_id
|
||||||
|
mask_unsqueezed = mask.unsqueeze(-1)
|
||||||
|
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
|
||||||
|
image_mask = mask_expanded.to(inputs_embeds.device)
|
||||||
|
|
||||||
|
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
|
||||||
|
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) # type: ignore
|
||||||
|
|
||||||
|
if pixel_values_videos is not None:
|
||||||
|
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
|
||||||
|
video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
|
||||||
|
n_video_tokens = (input_ids == self.config.video_token_id).sum().item()
|
||||||
|
n_video_features = video_embeds.shape[0]
|
||||||
|
if n_video_tokens != n_video_features:
|
||||||
|
raise ValueError(
|
||||||
|
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
|
||||||
|
)
|
||||||
|
|
||||||
|
mask = input_ids == self.config.video_token_id
|
||||||
|
mask_unsqueezed = mask.unsqueeze(-1)
|
||||||
|
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
|
||||||
|
video_mask = mask_expanded.to(inputs_embeds.device)
|
||||||
|
|
||||||
|
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
|
||||||
|
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) # type: ignore
|
||||||
|
|
||||||
|
if attention_mask is not None:
|
||||||
|
attention_mask = attention_mask.to(inputs_embeds.device)
|
||||||
|
|
||||||
|
# if we get 4D attention mask we cannot calculate rope deltas anymore. TODO @raushan fixme
|
||||||
|
if position_ids is None and (attention_mask is None or attention_mask.ndim == 2):
|
||||||
|
# calculate RoPE index once per generation in the pre-fill stage only
|
||||||
|
if (
|
||||||
|
(cache_position is not None and cache_position[0] == 0)
|
||||||
|
or self.rope_deltas is None
|
||||||
|
or (past_key_values is None or past_key_values.get_seq_length() == 0) # type: ignore
|
||||||
|
):
|
||||||
|
position_ids, rope_deltas = self.get_rope_index(
|
||||||
|
input_ids,
|
||||||
|
image_grid_thw,
|
||||||
|
video_grid_thw,
|
||||||
|
second_per_grid_ts,
|
||||||
|
attention_mask,
|
||||||
|
)
|
||||||
|
self.rope_deltas = rope_deltas
|
||||||
|
# then use the prev pre-calculated rope-deltas to get the correct position ids
|
||||||
|
else:
|
||||||
|
batch_size, seq_length, _ = inputs_embeds.shape
|
||||||
|
delta = (
|
||||||
|
(cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
|
||||||
|
if cache_position is not None
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
position_ids = torch.arange(seq_length, device=inputs_embeds.device) # type: ignore
|
||||||
|
position_ids = position_ids.view(1, -1).expand(batch_size, -1) # type: ignore
|
||||||
|
if cache_position is not None: # otherwise `deltas` is an int `0`
|
||||||
|
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) # type: ignore
|
||||||
|
position_ids = position_ids.add(delta) # type: ignore
|
||||||
|
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) # type: ignore
|
||||||
|
|
||||||
|
outputs = self.model(
|
||||||
|
input_ids=None,
|
||||||
|
position_ids=position_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
return_dict=return_dict,
|
||||||
|
cache_position=cache_position,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs[0]
|
||||||
|
logits = None
|
||||||
|
loss = None
|
||||||
|
|
||||||
|
if _PATCH_OPTS is not None and _PATCH_OPTS.use_lce(labels, self.training):
|
||||||
|
assert labels is not None
|
||||||
|
loss = apply_lce(
|
||||||
|
hidden_states,
|
||||||
|
self.lm_head.weight,
|
||||||
|
labels,
|
||||||
|
_PATCH_OPTS,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logits = self.lm_head(hidden_states)
|
||||||
|
|
||||||
|
if labels is not None:
|
||||||
|
# Upcast to float if we need to compute the loss to avoid potential precision issues
|
||||||
|
logits = logits.float()
|
||||||
|
# Shift so that tokens < n predict n
|
||||||
|
shift_logits = logits[..., :-1, :].contiguous()
|
||||||
|
shift_labels = labels[..., 1:].contiguous()
|
||||||
|
# Flatten the tokens
|
||||||
|
loss_fct = CrossEntropyLoss()
|
||||||
|
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
||||||
|
shift_labels = shift_labels.view(-1)
|
||||||
|
# Enable model parallelism
|
||||||
|
shift_labels = shift_labels.to(shift_logits.device)
|
||||||
|
loss = loss_fct(shift_logits, shift_labels)
|
||||||
|
|
||||||
|
if not return_dict:
|
||||||
|
output = (logits,) + outputs[1:]
|
||||||
|
return (loss,) + output if loss is not None else output
|
||||||
|
|
||||||
|
return Qwen2_5_VLCausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
rope_deltas=self.rope_deltas,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_qwen2_5_vl(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
global _PATCH_OPTS # pylint: disable=global-statement
|
||||||
|
|
||||||
|
from transformers.models.qwen2_5_vl import modeling_qwen2_5_vl
|
||||||
|
|
||||||
|
_PATCH_OPTS = patch_options
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_qwen2_5_vl.Qwen2_5_VLForConditionalGeneration
|
||||||
|
), f"Expected a Qwen2_5_VLForConditionalGeneration model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(cce_forward_multimodal, maybe_model)
|
||||||
|
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_qwen2_5_vl.Qwen2_5_VLForConditionalGeneration.forward = (
|
||||||
|
cce_forward_multimodal
|
||||||
|
)
|
||||||
|
return None
|
||||||
@@ -0,0 +1,178 @@
|
|||||||
|
"""Qwen2 MoE CCE patch. Adapted from transformers v4.51.2"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
apply_lce,
|
||||||
|
)
|
||||||
|
from transformers.models.qwen2_moe.modeling_qwen2_moe import (
|
||||||
|
MoeCausalLMOutputWithPast,
|
||||||
|
MoeModelOutputWithPast,
|
||||||
|
load_balancing_loss_func,
|
||||||
|
)
|
||||||
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
from transformers.utils.generic import can_return_tuple
|
||||||
|
|
||||||
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@can_return_tuple
|
||||||
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
output_router_logits: Optional[bool] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
||||||
|
**loss_kwargs,
|
||||||
|
) -> MoeCausalLMOutputWithPast:
|
||||||
|
r"""
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
||||||
|
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
||||||
|
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
||||||
|
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
||||||
|
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
||||||
|
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM
|
||||||
|
|
||||||
|
>>> model = Qwen2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
||||||
|
|
||||||
|
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> # Generate
|
||||||
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||||
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||||
|
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
||||||
|
```"""
|
||||||
|
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_router_logits = (
|
||||||
|
output_router_logits
|
||||||
|
if output_router_logits is not None
|
||||||
|
else self.config.output_router_logits
|
||||||
|
)
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
|
||||||
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||||
|
outputs: MoeModelOutputWithPast = self.model(
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
output_router_logits=output_router_logits,
|
||||||
|
cache_position=cache_position,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs.last_hidden_state
|
||||||
|
loss = None
|
||||||
|
logits = None
|
||||||
|
|
||||||
|
if hidden_states is None:
|
||||||
|
raise ValueError("hidden_states is None")
|
||||||
|
|
||||||
|
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
||||||
|
slice_indices = (
|
||||||
|
slice(-logits_to_keep, None)
|
||||||
|
if isinstance(logits_to_keep, int)
|
||||||
|
else logits_to_keep
|
||||||
|
)
|
||||||
|
|
||||||
|
if _PATCH_OPTS is not None and _PATCH_OPTS.use_lce(labels, self.training):
|
||||||
|
assert labels is not None
|
||||||
|
loss = apply_lce(
|
||||||
|
hidden_states[:, slice_indices, :],
|
||||||
|
self.lm_head.weight,
|
||||||
|
labels,
|
||||||
|
_PATCH_OPTS,
|
||||||
|
**loss_kwargs,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
||||||
|
|
||||||
|
if labels is not None:
|
||||||
|
loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
|
||||||
|
|
||||||
|
aux_loss = None
|
||||||
|
if output_router_logits:
|
||||||
|
aux_loss = load_balancing_loss_func(
|
||||||
|
outputs.router_logits,
|
||||||
|
self.num_experts,
|
||||||
|
self.num_experts_per_tok,
|
||||||
|
attention_mask,
|
||||||
|
)
|
||||||
|
if labels is not None:
|
||||||
|
loss += self.router_aux_loss_coef * aux_loss.to( # type: ignore
|
||||||
|
loss.device # type: ignore
|
||||||
|
) # make sure to reside in the same device
|
||||||
|
|
||||||
|
return MoeCausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
aux_loss=aux_loss, # type: ignore
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
router_logits=outputs.router_logits,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_qwen2_moe(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
global _PATCH_OPTS # pylint: disable=global-statement
|
||||||
|
|
||||||
|
from transformers.models.qwen2_moe import modeling_qwen2_moe
|
||||||
|
|
||||||
|
_PATCH_OPTS = patch_options
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_qwen2_moe.Qwen2MoeForCausalLM
|
||||||
|
), f"Expected a Qwen3MoeForCausalLM model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(forward, maybe_model)
|
||||||
|
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_qwen2_moe.Qwen2MoeForCausalLM.forward = forward
|
||||||
|
return None
|
||||||
@@ -0,0 +1,239 @@
|
|||||||
|
"""Qwen2 VL CCE patch. Adapted from transformers v4.51.2"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
apply_lce,
|
||||||
|
)
|
||||||
|
from torch.nn import CrossEntropyLoss
|
||||||
|
from transformers.models.qwen2_vl.modeling_qwen2_vl import (
|
||||||
|
Qwen2VLCausalLMOutputWithPast,
|
||||||
|
)
|
||||||
|
|
||||||
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def cce_forward_multimodal(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
return_dict: Optional[bool] = None,
|
||||||
|
pixel_values: Optional[torch.Tensor] = None,
|
||||||
|
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
||||||
|
image_grid_thw: Optional[torch.LongTensor] = None,
|
||||||
|
video_grid_thw: Optional[torch.LongTensor] = None,
|
||||||
|
rope_deltas: Optional[torch.LongTensor] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
) -> Union[Tuple, Qwen2VLCausalLMOutputWithPast]:
|
||||||
|
r"""
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from PIL import Image
|
||||||
|
>>> import requests
|
||||||
|
>>> from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
|
||||||
|
|
||||||
|
>>> model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
|
||||||
|
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "image"},
|
||||||
|
{"type": "text", "text": "What is shown in this image?"},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
|
||||||
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
|
||||||
|
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||||
|
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
|
||||||
|
|
||||||
|
>>> # Generate
|
||||||
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||||
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||||
|
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
|
||||||
|
```"""
|
||||||
|
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
return_dict = (
|
||||||
|
return_dict if return_dict is not None else self.config.use_return_dict
|
||||||
|
)
|
||||||
|
|
||||||
|
if inputs_embeds is None:
|
||||||
|
inputs_embeds = self.model.embed_tokens(input_ids)
|
||||||
|
if pixel_values is not None:
|
||||||
|
pixel_values = pixel_values.type(self.visual.get_dtype())
|
||||||
|
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
|
||||||
|
n_image_tokens = (input_ids == self.config.image_token_id).sum().item()
|
||||||
|
n_image_features = image_embeds.shape[0]
|
||||||
|
if n_image_tokens != n_image_features:
|
||||||
|
raise ValueError(
|
||||||
|
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
|
||||||
|
)
|
||||||
|
image_mask = (
|
||||||
|
(input_ids == self.config.image_token_id)
|
||||||
|
.unsqueeze(-1)
|
||||||
|
.expand_as(inputs_embeds)
|
||||||
|
.to(inputs_embeds.device)
|
||||||
|
)
|
||||||
|
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
|
||||||
|
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) # type: ignore
|
||||||
|
|
||||||
|
if pixel_values_videos is not None:
|
||||||
|
pixel_values_videos = pixel_values_videos.type(self.visual.get_dtype())
|
||||||
|
video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
|
||||||
|
n_video_tokens = (input_ids == self.config.video_token_id).sum().item()
|
||||||
|
n_video_features = video_embeds.shape[0]
|
||||||
|
if n_video_tokens != n_video_features:
|
||||||
|
raise ValueError(
|
||||||
|
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
|
||||||
|
)
|
||||||
|
video_mask = (
|
||||||
|
(input_ids == self.config.video_token_id)
|
||||||
|
.unsqueeze(-1)
|
||||||
|
.expand_as(inputs_embeds)
|
||||||
|
.to(inputs_embeds.device)
|
||||||
|
)
|
||||||
|
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
|
||||||
|
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) # type: ignore
|
||||||
|
|
||||||
|
if attention_mask is not None:
|
||||||
|
attention_mask = attention_mask.to(inputs_embeds.device)
|
||||||
|
|
||||||
|
# if we get 4D attention mask we cannot calculate rope deltas anymore. TODO @raushan fixme
|
||||||
|
if position_ids is None and (attention_mask is None or attention_mask.ndim == 2):
|
||||||
|
# calculate RoPE index once per generation in the pre-fill stage only
|
||||||
|
if (
|
||||||
|
(cache_position is not None and cache_position[0] == 0)
|
||||||
|
or self.rope_deltas is None
|
||||||
|
or (past_key_values is None or past_key_values.get_seq_length() == 0) # type: ignore
|
||||||
|
):
|
||||||
|
position_ids, rope_deltas = self.get_rope_index(
|
||||||
|
input_ids, image_grid_thw, video_grid_thw, attention_mask
|
||||||
|
)
|
||||||
|
self.rope_deltas = rope_deltas
|
||||||
|
# then use the prev pre-calculated rope-deltas to get the correct position ids
|
||||||
|
else:
|
||||||
|
batch_size, seq_length, _ = inputs_embeds.shape
|
||||||
|
delta = (
|
||||||
|
cache_position[0] + self.rope_deltas
|
||||||
|
if cache_position is not None
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
position_ids = torch.arange(seq_length, device=inputs_embeds.device) # type: ignore
|
||||||
|
position_ids = position_ids.view(1, -1).expand(batch_size, -1) # type: ignore
|
||||||
|
if cache_position is not None: # otherwise `deltas` is an int `0`
|
||||||
|
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) # type: ignore
|
||||||
|
delta = delta.to(position_ids.device) # type: ignore
|
||||||
|
position_ids = position_ids.add(delta) # type: ignore
|
||||||
|
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) # type: ignore
|
||||||
|
|
||||||
|
outputs = self.model(
|
||||||
|
input_ids=None,
|
||||||
|
position_ids=position_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
return_dict=return_dict,
|
||||||
|
cache_position=cache_position,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs[0]
|
||||||
|
logits = None
|
||||||
|
loss = None
|
||||||
|
|
||||||
|
if _PATCH_OPTS is not None and _PATCH_OPTS.use_lce(labels, self.training):
|
||||||
|
assert labels is not None
|
||||||
|
loss = apply_lce(
|
||||||
|
hidden_states,
|
||||||
|
self.lm_head.weight,
|
||||||
|
labels,
|
||||||
|
_PATCH_OPTS,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logits = self.lm_head(hidden_states)
|
||||||
|
|
||||||
|
if labels is not None:
|
||||||
|
# Upcast to float if we need to compute the loss to avoid potential precision issues
|
||||||
|
logits = logits.float()
|
||||||
|
# Shift so that tokens < n predict n
|
||||||
|
shift_logits = logits[..., :-1, :].contiguous()
|
||||||
|
shift_labels = labels[..., 1:].contiguous()
|
||||||
|
# Flatten the tokens
|
||||||
|
loss_fct = CrossEntropyLoss()
|
||||||
|
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
||||||
|
shift_labels = shift_labels.view(-1)
|
||||||
|
# Enable model parallelism
|
||||||
|
shift_labels = shift_labels.to(shift_logits.device)
|
||||||
|
loss = loss_fct(shift_logits, shift_labels)
|
||||||
|
|
||||||
|
if not return_dict:
|
||||||
|
output = (logits,) + outputs[1:]
|
||||||
|
return (loss,) + output if loss is not None else output
|
||||||
|
|
||||||
|
return Qwen2VLCausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
rope_deltas=self.rope_deltas,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_qwen2_vl(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
global _PATCH_OPTS # pylint: disable=global-statement
|
||||||
|
|
||||||
|
from transformers.models.qwen2_vl import modeling_qwen2_vl
|
||||||
|
|
||||||
|
_PATCH_OPTS = patch_options
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_qwen2_vl.Qwen2VLForConditionalGeneration
|
||||||
|
), f"Expected a Qwen2VLForConditionalGeneration model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(cce_forward_multimodal, maybe_model)
|
||||||
|
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_qwen2_vl.Qwen2VLForConditionalGeneration.forward = cce_forward_multimodal
|
||||||
|
return None
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
"""Qwen3 CCE patch. The model inherits Llama's modeling code and uses the same forward method."""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_qwen3(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
from transformers.models.qwen3 import modeling_qwen3
|
||||||
|
|
||||||
|
# Set the _PATCH_OPTS in the llama patch file
|
||||||
|
import axolotl.integrations.cut_cross_entropy.monkeypatch.llama as llama_patch
|
||||||
|
|
||||||
|
llama_patch._PATCH_OPTS = patch_options # pylint: disable=protected-access
|
||||||
|
|
||||||
|
from axolotl.integrations.cut_cross_entropy.monkeypatch.llama import cce_forward
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_qwen3.Qwen3ForCausalLM
|
||||||
|
), f"Expected a Qwen3ForCausalLM model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(cce_forward, maybe_model)
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_qwen3.Qwen3ForCausalLM.forward = cce_forward
|
||||||
|
return None
|
||||||
@@ -0,0 +1,183 @@
|
|||||||
|
"""Qwen3 MoE CCE patch. Adapted from transformers v4.51.2"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
from types import MethodType
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from cut_cross_entropy.transformers.utils import (
|
||||||
|
PatchOptions,
|
||||||
|
TransformersModelT,
|
||||||
|
apply_lce,
|
||||||
|
)
|
||||||
|
from transformers.models.qwen3_moe.modeling_qwen3_moe import (
|
||||||
|
KwargsForCausalLM,
|
||||||
|
MoeCausalLMOutputWithPast,
|
||||||
|
MoeModelOutputWithPast,
|
||||||
|
load_balancing_loss_func,
|
||||||
|
)
|
||||||
|
from transformers.processing_utils import Unpack
|
||||||
|
from transformers.utils.deprecation import deprecate_kwarg
|
||||||
|
from transformers.utils.generic import can_return_tuple
|
||||||
|
|
||||||
|
_PATCH_OPTS: PatchOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@can_return_tuple
|
||||||
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
output_router_logits: Optional[bool] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
||||||
|
**kwargs: Unpack[KwargsForCausalLM],
|
||||||
|
) -> MoeCausalLMOutputWithPast:
|
||||||
|
r"""
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
||||||
|
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
||||||
|
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
||||||
|
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
||||||
|
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
||||||
|
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoTokenizer, Qwen3MoeForCausalLM
|
||||||
|
|
||||||
|
>>> model = Qwen3MoeForCausalLM.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
|
||||||
|
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
|
||||||
|
|
||||||
|
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
||||||
|
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
>>> # Generate
|
||||||
|
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||||
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||||
|
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
||||||
|
```"""
|
||||||
|
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_router_logits = (
|
||||||
|
output_router_logits
|
||||||
|
if output_router_logits is not None
|
||||||
|
else self.config.output_router_logits
|
||||||
|
)
|
||||||
|
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
|
||||||
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||||
|
outputs: MoeModelOutputWithPast = self.model(
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
output_router_logits=output_router_logits,
|
||||||
|
cache_position=cache_position,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs.last_hidden_state
|
||||||
|
|
||||||
|
if hidden_states is None:
|
||||||
|
raise ValueError("hidden_states is None")
|
||||||
|
|
||||||
|
loss = None
|
||||||
|
logits = None
|
||||||
|
|
||||||
|
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
||||||
|
slice_indices = (
|
||||||
|
slice(-logits_to_keep, None)
|
||||||
|
if isinstance(logits_to_keep, int)
|
||||||
|
else logits_to_keep
|
||||||
|
)
|
||||||
|
|
||||||
|
if _PATCH_OPTS is not None and _PATCH_OPTS.use_lce(labels, self.training):
|
||||||
|
assert labels is not None
|
||||||
|
loss = apply_lce(
|
||||||
|
hidden_states[:, slice_indices, :],
|
||||||
|
self.lm_head.weight,
|
||||||
|
labels,
|
||||||
|
_PATCH_OPTS,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
||||||
|
|
||||||
|
if labels is not None:
|
||||||
|
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
|
||||||
|
|
||||||
|
aux_loss = None
|
||||||
|
if output_router_logits:
|
||||||
|
aux_loss = load_balancing_loss_func(
|
||||||
|
outputs.router_logits,
|
||||||
|
self.num_experts,
|
||||||
|
self.num_experts_per_tok,
|
||||||
|
attention_mask,
|
||||||
|
)
|
||||||
|
if labels is not None:
|
||||||
|
loss += self.router_aux_loss_coef * aux_loss.to( # type: ignore
|
||||||
|
loss.device # type: ignore
|
||||||
|
) # make sure to reside in the same device
|
||||||
|
|
||||||
|
return MoeCausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
aux_loss=aux_loss, # type: ignore
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
router_logits=outputs.router_logits,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_qwen3_moe(
|
||||||
|
maybe_model: TransformersModelT | str | transformers.PretrainedConfig,
|
||||||
|
patch_options: PatchOptions,
|
||||||
|
) -> TransformersModelT | None:
|
||||||
|
global _PATCH_OPTS # pylint: disable=global-statement
|
||||||
|
|
||||||
|
from transformers.models.qwen3_moe import modeling_qwen3_moe
|
||||||
|
|
||||||
|
_PATCH_OPTS = patch_options
|
||||||
|
|
||||||
|
if isinstance(maybe_model, transformers.PreTrainedModel):
|
||||||
|
assert isinstance(
|
||||||
|
maybe_model, modeling_qwen3_moe.Qwen3MoeForCausalLM
|
||||||
|
), f"Expected a Qwen3MoeForCausalLM model. Got {type(maybe_model)}."
|
||||||
|
maybe_model.forward = MethodType(forward, maybe_model)
|
||||||
|
|
||||||
|
return maybe_model
|
||||||
|
|
||||||
|
modeling_qwen3_moe.Qwen3MoeForCausalLM.forward = forward
|
||||||
|
return None
|
||||||
@@ -35,6 +35,9 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
sequence_len,
|
sequence_len,
|
||||||
roles_to_train=None,
|
roles_to_train=None,
|
||||||
train_on_eos=None,
|
train_on_eos=None,
|
||||||
|
train_on_eot=None,
|
||||||
|
eot_tokens=None,
|
||||||
|
split_thinking: bool | None = False,
|
||||||
logprobs_field="logprobs",
|
logprobs_field="logprobs",
|
||||||
gen_temperature=1.0,
|
gen_temperature=1.0,
|
||||||
kd_temperature=1.0,
|
kd_temperature=1.0,
|
||||||
@@ -50,6 +53,9 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
sequence_len,
|
sequence_len,
|
||||||
roles_to_train=roles_to_train,
|
roles_to_train=roles_to_train,
|
||||||
train_on_eos=train_on_eos,
|
train_on_eos=train_on_eos,
|
||||||
|
train_on_eot=train_on_eot,
|
||||||
|
eot_tokens=eot_tokens,
|
||||||
|
split_thinking=split_thinking,
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ import logging
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
from axolotl.integrations.base import BasePlugin
|
||||||
|
from axolotl.utils.distributed import is_main_process
|
||||||
|
|
||||||
from ...utils.distributed import zero_only
|
|
||||||
from .args import LigerArgs # pylint: disable=unused-import. # noqa: F401
|
from .args import LigerArgs # pylint: disable=unused-import. # noqa: F401
|
||||||
from .utils import patch_with_compile_disable
|
from .utils import patch_with_compile_disable
|
||||||
|
|
||||||
@@ -85,7 +85,7 @@ class LigerPlugin(BasePlugin):
|
|||||||
kwargs["geglu"] = cfg.liger_glu_activation
|
kwargs["geglu"] = cfg.liger_glu_activation
|
||||||
elif "swiglu" in liger_fn_sig.parameters:
|
elif "swiglu" in liger_fn_sig.parameters:
|
||||||
kwargs["swiglu"] = cfg.liger_glu_activation
|
kwargs["swiglu"] = cfg.liger_glu_activation
|
||||||
with zero_only():
|
if is_main_process(use_environ=True):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
f"Applying LIGER to {cfg.model_config_type} with kwargs: {kwargs}"
|
f"Applying LIGER to {cfg.model_config_type} with kwargs: {kwargs}"
|
||||||
)
|
)
|
||||||
@@ -151,6 +151,30 @@ class LigerPlugin(BasePlugin):
|
|||||||
rms_norm=cfg.liger_rms_norm,
|
rms_norm=cfg.liger_rms_norm,
|
||||||
layer_norm=cfg.liger_layer_norm,
|
layer_norm=cfg.liger_layer_norm,
|
||||||
)
|
)
|
||||||
|
elif cfg.model_config_type == "qwen3":
|
||||||
|
from axolotl.integrations.liger.models.qwen3 import (
|
||||||
|
apply_liger_kernel_to_qwen3,
|
||||||
|
)
|
||||||
|
|
||||||
|
apply_liger_kernel_to_qwen3(
|
||||||
|
cross_entropy=cfg.liger_cross_entropy,
|
||||||
|
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
|
||||||
|
glu_activation=cfg.liger_glu_activation,
|
||||||
|
rms_norm=cfg.liger_rms_norm,
|
||||||
|
layer_norm=cfg.liger_layer_norm,
|
||||||
|
)
|
||||||
|
elif cfg.model_config_type == "qwen3_moe":
|
||||||
|
from axolotl.integrations.liger.models.qwen3_moe import (
|
||||||
|
apply_liger_kernel_to_qwen3_moe,
|
||||||
|
)
|
||||||
|
|
||||||
|
apply_liger_kernel_to_qwen3_moe(
|
||||||
|
cross_entropy=cfg.liger_cross_entropy,
|
||||||
|
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
|
||||||
|
glu_activation=cfg.liger_glu_activation,
|
||||||
|
rms_norm=cfg.liger_rms_norm,
|
||||||
|
layer_norm=cfg.liger_layer_norm,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
f"Unsupported model config type: {cfg.model_config_type}. Liger not applied."
|
f"Unsupported model config type: {cfg.model_config_type}. Liger not applied."
|
||||||
|
|||||||
0
src/axolotl/integrations/liger/models/__init__.py
Normal file
0
src/axolotl/integrations/liger/models/__init__.py
Normal file
@@ -14,10 +14,6 @@ from torch.nn import CrossEntropyLoss
|
|||||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||||
|
|
||||||
|
|
||||||
# @add_start_docstrings_to_model_forward(DeepseekV2_INPUTS_DOCSTRING)
|
|
||||||
# @replace_return_docstrings(
|
|
||||||
# output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
# )
|
|
||||||
def lce_forward(
|
def lce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor = None,
|
input_ids: torch.LongTensor = None,
|
||||||
|
|||||||
@@ -13,21 +13,11 @@ from liger_kernel.transformers.fused_linear_cross_entropy import (
|
|||||||
from torch.nn import CrossEntropyLoss
|
from torch.nn import CrossEntropyLoss
|
||||||
from transformers.modeling_outputs import MoeCausalLMOutputWithPast
|
from transformers.modeling_outputs import MoeCausalLMOutputWithPast
|
||||||
from transformers.models.jamba.modeling_jamba import (
|
from transformers.models.jamba.modeling_jamba import (
|
||||||
_CONFIG_FOR_DOC,
|
|
||||||
JAMBA_INPUTS_DOCSTRING,
|
|
||||||
HybridMambaAttentionDynamicCache,
|
HybridMambaAttentionDynamicCache,
|
||||||
load_balancing_loss_func,
|
load_balancing_loss_func,
|
||||||
)
|
)
|
||||||
from transformers.utils import (
|
|
||||||
add_start_docstrings_to_model_forward,
|
|
||||||
replace_return_docstrings,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING)
|
|
||||||
@replace_return_docstrings(
|
|
||||||
output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
||||||
)
|
|
||||||
def lce_forward(
|
def lce_forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.LongTensor = None,
|
input_ids: torch.LongTensor = None,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user