Compare commits
54 Commits
uv-fixup
...
scattermoe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42922f8f8b | ||
|
|
7041592ca7 | ||
|
|
fec0c3a99e | ||
|
|
31d8d068bb | ||
|
|
66fea258c7 | ||
|
|
07ff389be8 | ||
|
|
2dcca15f65 | ||
|
|
c5db90aa3f | ||
|
|
163bd4dd5a | ||
|
|
f291ac029c | ||
|
|
5ef3f28340 | ||
|
|
999b3fec2e | ||
|
|
8f3fb517b3 | ||
|
|
830e9f7eaf | ||
|
|
d230cbbde3 | ||
|
|
a098df527b | ||
|
|
7da5f94379 | ||
|
|
4a5876df7a | ||
|
|
defee62d99 | ||
|
|
f56efdb4ab | ||
|
|
d8a646c80d | ||
|
|
a806704e94 | ||
|
|
d8a05744d7 | ||
|
|
ff77fa2488 | ||
|
|
e1ff756245 | ||
|
|
083c5a0421 | ||
|
|
79908b3c6e | ||
|
|
819b157c7b | ||
|
|
fccc712dae | ||
|
|
23ad40bdd5 | ||
|
|
cf4d550c88 | ||
|
|
43b1c80aa6 | ||
|
|
a36aaa70ce | ||
|
|
80f7088ad1 | ||
|
|
46b9f40f2a | ||
|
|
8f19169eb0 | ||
|
|
876941ffd0 | ||
|
|
d65e1b960c | ||
|
|
0a23ae08f7 | ||
|
|
fc2d63ee5f | ||
|
|
c119382337 | ||
|
|
6c8c73e5a4 | ||
|
|
a260d330ed | ||
|
|
da17c7c0d9 | ||
|
|
cada93cee5 | ||
|
|
56162f71db | ||
|
|
6c44afaea1 | ||
|
|
234931d512 | ||
|
|
6a8baf8fa7 | ||
|
|
1eaf4d7418 | ||
|
|
4b8bc52424 | ||
|
|
28cc085283 | ||
|
|
8e2a102cca | ||
|
|
753906cfc7 |
9
.github/CONTRIBUTING.md
vendored
9
.github/CONTRIBUTING.md
vendored
@@ -68,7 +68,12 @@ You can skip certain CI checks by including specific keywords in your commit mes
|
||||
|
||||
### Code Style
|
||||
|
||||
axolotl uses [{codestyle}]({URLofCodestyle}) as its code style guide. Please ensure that your code follows these guidelines.
|
||||
axolotl uses [Ruff](https://docs.astral.sh/ruff/) as its code style guide. Please ensure that your code follows these guidelines.
|
||||
|
||||
Use the pre-commit linter to ensure that your code is formatted consistently.
|
||||
```bash
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
### Commit Messages
|
||||
|
||||
@@ -78,6 +83,6 @@ Write clear and concise commit messages that briefly describe the changes made i
|
||||
|
||||
- [GitHub Help](https://help.github.com/)
|
||||
- [GitHub Pull Request Documentation](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests)
|
||||
- [{codestyle}]({URLofCodestyle})
|
||||
- [Ruff](https://docs.astral.sh/ruff/)
|
||||
|
||||
Thank you once again for your interest in contributing to axolotl. We look forward to collaborating with you and creating an even better project together!
|
||||
|
||||
19
.github/workflows/base.yml
vendored
19
.github/workflows/base.yml
vendored
@@ -15,6 +15,9 @@ on:
|
||||
- '.github/workflows/base.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-base:
|
||||
if: ${{ github.repository_owner == 'axolotl-ai-cloud' && (github.event_name != 'pull_request' || !github.event.pull_request.draft) }}
|
||||
@@ -124,7 +127,7 @@ jobs:
|
||||
images: |
|
||||
axolotlai/axolotl-base
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
if: ${{ github.event_name != 'pull_request' && env.HAS_DOCKERHUB_CREDS == 'true' }}
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
@@ -132,7 +135,7 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/${{ matrix.dockerfile }}
|
||||
@@ -173,6 +176,14 @@ jobs:
|
||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
dockerfile: "Dockerfile-uv-base"
|
||||
platforms: "linux/amd64,linux/arm64"
|
||||
- cuda: "128"
|
||||
cuda_version: 12.8.1
|
||||
cudnn_version: ""
|
||||
python_version: "3.12"
|
||||
pytorch: 2.9.1
|
||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
dockerfile: "Dockerfile-uv-base"
|
||||
platforms: "linux/amd64,linux/arm64"
|
||||
- cuda: "128"
|
||||
cuda_version: 12.8.1
|
||||
cudnn_version: ""
|
||||
@@ -239,7 +250,7 @@ jobs:
|
||||
images: |
|
||||
axolotlai/axolotl-base-uv
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
if: ${{ github.event_name != 'pull_request' && env.HAS_DOCKERHUB_CREDS == 'true' }}
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
@@ -247,7 +258,7 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/${{ matrix.dockerfile }}
|
||||
|
||||
3
.github/workflows/lint.yml
vendored
3
.github/workflows/lint.yml
vendored
@@ -13,6 +13,9 @@ on:
|
||||
- ".pre-commit-config.yaml"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
name: pre-commit
|
||||
|
||||
18
.github/workflows/main.yml
vendored
18
.github/workflows/main.yml
vendored
@@ -8,6 +8,9 @@ on:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-axolotl:
|
||||
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||
@@ -110,6 +113,12 @@ jobs:
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras:
|
||||
platforms: "linux/amd64,linux/arm64"
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.12"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras:
|
||||
platforms: "linux/amd64,linux/arm64"
|
||||
is_latest: true
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
@@ -174,6 +183,7 @@ jobs:
|
||||
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||
# this job needs to be run on self-hosted GPU runners...
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
@@ -259,6 +269,7 @@ jobs:
|
||||
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||
# this job needs to be run on self-hosted GPU runners...
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
@@ -266,6 +277,12 @@ jobs:
|
||||
python_version: "3.11"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras:
|
||||
platforms: "linux/amd64,linux/arm64"
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.12"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras:
|
||||
is_latest: true
|
||||
platforms: "linux/amd64,linux/arm64"
|
||||
- cuda: 128
|
||||
@@ -326,6 +343,7 @@ jobs:
|
||||
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||
# this job needs to be run on self-hosted GPU runners...
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 128
|
||||
|
||||
34
.github/workflows/multi-gpu-e2e.yml
vendored
34
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -8,6 +8,7 @@ on:
|
||||
- 'setup.py'
|
||||
- 'pyproject.toml'
|
||||
- '.github/workflows/multi-gpu-e2e.yml'
|
||||
- 'scripts/cutcrossentropy_install.py'
|
||||
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||
- 'src/axolotl/utils/distributed.py'
|
||||
workflow_dispatch:
|
||||
@@ -19,6 +20,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
MODAL_IMAGE_BUILDER_VERSION: "2025.06"
|
||||
|
||||
@@ -35,19 +39,13 @@ jobs:
|
||||
pytorch: 2.8.0
|
||||
axolotl_extras: fbgemm-gpu
|
||||
num_gpus: 2
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras: "fbgemm-gpu"
|
||||
num_gpus: 2
|
||||
- cuda: 129
|
||||
cuda_version: 12.9.1
|
||||
python_version: "3.12"
|
||||
pytorch: 2.9.1
|
||||
axolotl_extras: "fbgemm-gpu"
|
||||
num_gpus: 2
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
# - cuda: 129
|
||||
# cuda_version: 12.9.1
|
||||
# python_version: "3.12"
|
||||
# pytorch: 2.9.1
|
||||
# axolotl_extras: "fbgemm-gpu"
|
||||
# num_gpus: 2
|
||||
# dockerfile: "Dockerfile-uv.jinja"
|
||||
- cuda: 130
|
||||
cuda_version: 13.0.0
|
||||
python_version: "3.11"
|
||||
@@ -55,6 +53,13 @@ jobs:
|
||||
axolotl_extras:
|
||||
# axolotl_extras: fbgemm-gpu
|
||||
num_gpus: 2
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.10.0
|
||||
axolotl_extras: "fbgemm-gpu"
|
||||
num_gpus: 2
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
runs-on: [self-hosted, modal]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
@@ -76,8 +81,9 @@ jobs:
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: |
|
||||
modal run -m cicd.multigpu
|
||||
|
||||
3
.github/workflows/nightlies.yml
vendored
3
.github/workflows/nightlies.yml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Runs at 00:00 UTC every day
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-axolotl:
|
||||
if: ${{ ! contains(github.event.commits[0].message, '[skip docker]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||
|
||||
2
.github/workflows/precommit-autoupdate.yml
vendored
2
.github/workflows/precommit-autoupdate.yml
vendored
@@ -5,6 +5,8 @@ on:
|
||||
- cron: '0 0 1 * *' # Run monthly
|
||||
workflow_dispatch: # Manual kickoff
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
auto-update:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
8
.github/workflows/preview-docs.yml
vendored
8
.github/workflows/preview-docs.yml
vendored
@@ -14,14 +14,8 @@ on:
|
||||
- .github/workflows/preview-docs.yml
|
||||
|
||||
permissions:
|
||||
checks: write
|
||||
contents: write
|
||||
deployments: write
|
||||
issues: write
|
||||
discussions: write
|
||||
pages: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
statuses: write
|
||||
|
||||
jobs:
|
||||
preview:
|
||||
|
||||
9
.github/workflows/pypi.yml
vendored
9
.github/workflows/pypi.yml
vendored
@@ -3,9 +3,11 @@ name: publish pypi
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup_release:
|
||||
name: Create Release
|
||||
@@ -28,7 +30,8 @@ jobs:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/axolotl
|
||||
permissions:
|
||||
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
|
||||
contents: read
|
||||
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
@@ -46,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Extract tag name
|
||||
id: tag
|
||||
run: echo ::set-output name=TAG_NAME::$(echo $GITHUB_REF | cut -d / -f 3)
|
||||
run: echo "TAG_NAME=$(echo $GITHUB_REF | cut -d / -f 3)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Update version in VERSION file
|
||||
run: |
|
||||
|
||||
42
.github/workflows/tests-nightly.yml
vendored
42
.github/workflows/tests-nightly.yml
vendored
@@ -3,6 +3,13 @@ on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Runs at 00:00 UTC every day
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- '.github/workflows/tests-nightly.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
@@ -18,15 +25,26 @@ jobs:
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
|
||||
prime-cdn-s3-cache:
|
||||
name: Prefetch S3 once to prime the CDN cache
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !github.event.pull_request.draft }}
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Restore Cache from S3
|
||||
id: hf-cache-restore-s3
|
||||
run: |
|
||||
curl -v -H "Range: bytes=0-1023" -L https://axolotl-ci.b-cdn.net/hf-cache.tar.zst > /dev/null
|
||||
|
||||
pytest:
|
||||
name: PyTest
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prime-cdn-s3-cache]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 2
|
||||
matrix:
|
||||
python_version: ["3.11"]
|
||||
pytorch_version: ["2.8.0", "2.9.0", "2.9.1"]
|
||||
python_version: ["3.12"] # TODO include py3.14 once https://github.com/mistralai/mistral-common/pull/194 is merged
|
||||
pytorch_version: ["2.8.0", "2.9.1", "2.10.0"]
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
@@ -48,7 +66,7 @@ jobs:
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==26.0 setuptools==75.8.0 wheel
|
||||
pip3 install --upgrade packaging==26.0 setuptools==78.1.1 wheel
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
@@ -102,16 +120,23 @@ jobs:
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.8.0
|
||||
pytorch: 2.9.1
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
nightly_build: "true"
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.10.0
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
- cuda: 130
|
||||
cuda_version: 13.0.0
|
||||
python_version: "3.12"
|
||||
pytorch: 2.9.1
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
nightly_build: "true"
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -132,9 +157,11 @@ jobs:
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: |
|
||||
modal run cicd.e2e_tests
|
||||
docker-e2e-multigpu-tests:
|
||||
@@ -175,7 +202,8 @@ jobs:
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: |
|
||||
modal run cicd.multigpu
|
||||
|
||||
30
.github/workflows/tests.yml
vendored
30
.github/workflows/tests.yml
vendored
@@ -28,6 +28,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
TRANSFORMERS_IS_CI: "yes"
|
||||
|
||||
@@ -46,11 +49,22 @@ jobs:
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
|
||||
prime-cdn-s3-cache:
|
||||
name: Prefetch S3 once to prime the CDN cache
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !github.event.pull_request.draft }}
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Restore Cache from S3
|
||||
id: hf-cache-restore-s3
|
||||
run: |
|
||||
curl -v -H "Range: bytes=0-1023" -L https://axolotl-ci.b-cdn.net/hf-cache.tar.zst > /dev/null
|
||||
|
||||
pytest:
|
||||
name: PyTest
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !github.event.pull_request.draft }}
|
||||
# needs: [preload-cache]
|
||||
needs: [prime-cdn-s3-cache]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -146,6 +160,7 @@ jobs:
|
||||
name: PyTest from Source Dist
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !github.event.pull_request.draft }}
|
||||
needs: [prime-cdn-s3-cache]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -156,7 +171,7 @@ jobs:
|
||||
# pytorch_version: "2.8.0"
|
||||
# - python_version: "3.14"
|
||||
# pytorch_version: "2.9.1"
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: cleanup node
|
||||
@@ -291,9 +306,10 @@ jobs:
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: |
|
||||
modal run cicd.e2e_tests
|
||||
|
||||
@@ -359,9 +375,10 @@ jobs:
|
||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "GPU_TYPE=${{ matrix.gpu_type || 'L40S'}}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: |
|
||||
modal run cicd.e2e_tests
|
||||
|
||||
@@ -375,8 +392,8 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 129
|
||||
cuda_version: 12.9.1
|
||||
- cuda: 128
|
||||
cuda_version: 12.8.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.9.1
|
||||
num_gpus: 1
|
||||
@@ -401,7 +418,6 @@ jobs:
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "MODAL_IMAGE_BUILDER_VERSION=2024.10" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.cleanup
|
||||
|
||||
@@ -11,7 +11,7 @@ repos:
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.14.10
|
||||
rev: v0.15.4
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
@@ -26,7 +26,7 @@ repos:
|
||||
'pydantic>=2.5.3',
|
||||
]
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.9.2
|
||||
rev: 1.9.4
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: [
|
||||
|
||||
32
README.md
32
README.md
@@ -29,8 +29,23 @@
|
||||
|
||||
## 🎉 Latest Updates
|
||||
|
||||
- 2025/12: Axolotl now includes support for [Kimi-Linear](https://docs.axolotl.ai/docs/models/kimi-linear.html), [Plano-Orchestrator](https://docs.axolotl.ai/docs/models/plano.html), [MiMo](https://docs.axolotl.ai/docs/models/mimo.html), [InternVL 3.5](https://docs.axolotl.ai/docs/models/internvl3_5.html), [Olmo3](https://docs.axolotl.ai/docs/models/olmo3.html), [Trinity](https://docs.axolotl.ai/docs/models/trinity.html), and [Ministral3](https://docs.axolotl.ai/docs/models/ministral3.html).
|
||||
- 2026/03:
|
||||
- New model support has been added in Axolotl for [Mistral Small 4](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/mistral4), [Qwen3.5, Qwen3.5 MoE](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/qwen3.5), [GLM-4.7-Flash](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/glm47-flash), [GLM-4.6V](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/glm46v), and [GLM-4.5-Air](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/glm45).
|
||||
- [MoE expert quantization](https://docs.axolotl.ai/docs/expert_quantization.html) support (via `quantize_moe_experts: true`) greatly reduces VRAM when training MoE models (FSDP2 compat).
|
||||
- 2026/02:
|
||||
- [ScatterMoE LoRA](https://github.com/axolotl-ai-cloud/axolotl/pull/3410) support. LoRA fine-tuning directly on MoE expert weights using custom Triton kernels.
|
||||
- Axolotl now has support for [SageAttention](https://github.com/axolotl-ai-cloud/axolotl/pull/2823) and [GDPO](https://github.com/axolotl-ai-cloud/axolotl/pull/3353) (Generalized DPO).
|
||||
- 2026/01:
|
||||
- New integration for [EAFT](https://github.com/axolotl-ai-cloud/axolotl/pull/3366) (Entropy-Aware Focal Training), weights loss by entropy of the top-k logit distribution, and [Scalable Softmax](https://github.com/axolotl-ai-cloud/axolotl/pull/3338), improves long context in attention.
|
||||
- 2025/12:
|
||||
- Axolotl now includes support for [Kimi-Linear](https://docs.axolotl.ai/docs/models/kimi-linear.html), [Plano-Orchestrator](https://docs.axolotl.ai/docs/models/plano.html), [MiMo](https://docs.axolotl.ai/docs/models/mimo.html), [InternVL 3.5](https://docs.axolotl.ai/docs/models/internvl3_5.html), [Olmo3](https://docs.axolotl.ai/docs/models/olmo3.html), [Trinity](https://docs.axolotl.ai/docs/models/trinity.html), and [Ministral3](https://docs.axolotl.ai/docs/models/ministral3.html).
|
||||
- [Distributed Muon Optimizer](https://github.com/axolotl-ai-cloud/axolotl/pull/3264) support has been added for FSDP2 pretraining.
|
||||
- 2025/10: New model support has been added in Axolotl for: [Qwen3 Next](https://docs.axolotl.ai/docs/models/qwen3-next.html), [Qwen2.5-vl, Qwen3-vl](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/qwen2_5-vl), [Qwen3, Qwen3MoE](https://docs.axolotl.ai/docs/models/qwen3.html), [Granite 4](https://docs.axolotl.ai/docs/models/granite4.html), [HunYuan](https://docs.axolotl.ai/docs/models/hunyuan.html), [Magistral 2509](https://docs.axolotl.ai/docs/models/magistral/vision.html), [Apertus](https://docs.axolotl.ai/docs/models/apertus.html), and [Seed-OSS](https://docs.axolotl.ai/docs/models/seed-oss.html).
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Expand older updates</summary>
|
||||
|
||||
- 2025/09: Axolotl now has text diffusion training. Read more [here](https://github.com/axolotl-ai-cloud/axolotl/tree/main/src/axolotl/integrations/diffusion).
|
||||
- 2025/08: QAT has been updated to include NVFP4 support. See [PR](https://github.com/axolotl-ai-cloud/axolotl/pull/3107).
|
||||
- 2025/07:
|
||||
@@ -39,15 +54,10 @@
|
||||
- FP8 finetuning with fp8 gather op is now possible in Axolotl via `torchao`. Get started [here](https://docs.axolotl.ai/docs/mixed_precision.html#sec-fp8)!
|
||||
- [Voxtral](https://docs.axolotl.ai/docs/models/voxtral.html), [Magistral 1.1](https://docs.axolotl.ai/docs/models/magistral.html), and [Devstral](https://docs.axolotl.ai/docs/models/devstral.html) with mistral-common tokenizer support has been integrated in Axolotl!
|
||||
- TiledMLP support for single-GPU to multi-GPU training with DDP, DeepSpeed and FSDP support has been added to support Arctic Long Sequence Training. (ALST). See [examples](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/alst) for using ALST with Axolotl!
|
||||
- 2025/05: Quantization Aware Training (QAT) support has been added to Axolotl. Explore the [docs](https://docs.axolotl.ai/docs/qat.html) to learn more!
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Expand older updates</summary>
|
||||
|
||||
- 2025/03: Axolotl has implemented Sequence Parallelism (SP) support. Read the [blog](https://huggingface.co/blog/axolotl-ai-co/long-context-with-sequence-parallelism-in-axolotl) and [docs](https://docs.axolotl.ai/docs/sequence_parallelism.html) to learn how to scale your context length when fine-tuning.
|
||||
- 2025/06: Magistral with mistral-common tokenizer support has been added to Axolotl. See [docs](https://docs.axolotl.ai/docs/models/magistral.html) to start training your own Magistral models with Axolotl!
|
||||
- 2025/05: Quantization Aware Training (QAT) support has been added to Axolotl. Explore the [docs](https://docs.axolotl.ai/docs/qat.html) to learn more!
|
||||
- 2025/04: Llama 4 support has been added in Axolotl. See [docs](https://docs.axolotl.ai/docs/models/llama-4.html) to start training your own Llama 4 models with Axolotl's linearized version!
|
||||
- 2025/03: Axolotl has implemented Sequence Parallelism (SP) support. Read the [blog](https://huggingface.co/blog/axolotl-ai-co/long-context-with-sequence-parallelism-in-axolotl) and [docs](https://docs.axolotl.ai/docs/sequence_parallelism.html) to learn how to scale your context length when fine-tuning.
|
||||
- 2025/03: (Beta) Fine-tuning Multimodal models is now supported in Axolotl. Check out the [docs](https://docs.axolotl.ai/docs/multimodal.html) to fine-tune your own!
|
||||
- 2025/02: Axolotl has added LoRA optimizations to reduce memory usage and improve training speed for LoRA and QLoRA in single GPU and multi-GPU training (DDP and DeepSpeed). Jump into the [docs](https://docs.axolotl.ai/docs/lora_optims.html) to give it a try.
|
||||
- 2025/02: Axolotl has added GRPO support. Dive into our [blog](https://huggingface.co/blog/axolotl-ai-co/training-llms-w-interpreter-feedback-wasm) and [GRPO example](https://github.com/axolotl-ai-cloud/grpo_code) and have some fun!
|
||||
@@ -62,10 +72,10 @@ Axolotl is a free and open-source tool designed to streamline post-training and
|
||||
Features:
|
||||
|
||||
- **Multiple Model Support**: Train various models like GPT-OSS, LLaMA, Mistral, Mixtral, Pythia, and many more models available on the Hugging Face Hub.
|
||||
- **Multimodal Training**: Fine-tune vision-language models (VLMs) including LLaMA-Vision, Qwen2-VL, Pixtral, LLaVA, SmolVLM2, and audio models like Voxtral with image, video, and audio support.
|
||||
- **Training Methods**: Full fine-tuning, LoRA, QLoRA, GPTQ, QAT, Preference Tuning (DPO, IPO, KTO, ORPO), RL (GRPO), and Reward Modelling (RM) / Process Reward Modelling (PRM).
|
||||
- **Multimodal Training**: Fine-tune vision-language models (VLMs) including LLaMA-Vision, Qwen2-VL, Pixtral, LLaVA, SmolVLM2, GLM-4.6V, InternVL 3.5, Gemma 3n, and audio models like Voxtral with image, video, and audio support.
|
||||
- **Training Methods**: Full fine-tuning, LoRA, QLoRA, GPTQ, QAT, Preference Tuning (DPO, IPO, KTO, ORPO), RL (GRPO, GDPO), and Reward Modelling (RM) / Process Reward Modelling (PRM).
|
||||
- **Easy Configuration**: Re-use a single YAML configuration file across the full fine-tuning pipeline: dataset preprocessing, training, evaluation, quantization, and inference.
|
||||
- **Performance Optimizations**: [Multipacking](https://docs.axolotl.ai/docs/multipack.html), [Flash Attention](https://github.com/Dao-AILab/flash-attention), [Xformers](https://github.com/facebookresearch/xformers), [Flex Attention](https://pytorch.org/blog/flexattention/), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), [Cut Cross Entropy](https://github.com/apple/ml-cross-entropy/tree/main), [Sequence Parallelism (SP)](https://docs.axolotl.ai/docs/sequence_parallelism.html), [LoRA optimizations](https://docs.axolotl.ai/docs/lora_optims.html), [Multi-GPU training (FSDP1, FSDP2, DeepSpeed)](https://docs.axolotl.ai/docs/multi-gpu.html), [Multi-node training (Torchrun, Ray)](https://docs.axolotl.ai/docs/multi-node.html), and many more!
|
||||
- **Performance Optimizations**: [Multipacking](https://docs.axolotl.ai/docs/multipack.html), [Flash Attention 2/3/4](https://docs.axolotl.ai/docs/attention.html#flash-attention), [Xformers](https://docs.axolotl.ai/docs/attention.html#xformers), [Flex Attention](https://docs.axolotl.ai/docs/attention.html#flex-attention), [SageAttention](https://docs.axolotl.ai/docs/attention.html#sageattention), [Liger Kernel](https://docs.axolotl.ai/docs/custom_integrations.html#liger-kernels), [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy), [ScatterMoE](https://docs.axolotl.ai/docs/custom_integrations.html#kernels-integration), [Sequence Parallelism (SP)](https://docs.axolotl.ai/docs/sequence_parallelism.html), [LoRA optimizations](https://docs.axolotl.ai/docs/lora_optims.html), [Multi-GPU training (FSDP1, FSDP2, DeepSpeed)](https://docs.axolotl.ai/docs/multi-gpu.html), [Multi-node training (Torchrun, Ray)](https://docs.axolotl.ai/docs/multi-node.html), and many more!
|
||||
- **Flexible Dataset Handling**: Load from local, HuggingFace, and cloud (S3, Azure, GCP, OCI) datasets.
|
||||
- **Cloud Ready**: We ship [Docker images](https://hub.docker.com/u/axolotlai) and also [PyPI packages](https://pypi.org/project/axolotl/) for use on cloud platforms and local hardware.
|
||||
|
||||
|
||||
@@ -331,6 +331,7 @@ website:
|
||||
- docs/sequence_parallelism.qmd
|
||||
- docs/gradient_checkpointing.qmd
|
||||
- docs/nd_parallelism.qmd
|
||||
- docs/expert_quantization.qmd
|
||||
|
||||
- section: "Troubleshooting"
|
||||
contents:
|
||||
|
||||
208
benchmarks/bench_entropy.py
Normal file
208
benchmarks/bench_entropy.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""Benchmark for entropy_from_logits Triton kernel vs original chunked implementation.
|
||||
|
||||
Usage: CUDA_VISIBLE_DEVICES=0 python benchmarks/bench_entropy.py
|
||||
"""
|
||||
|
||||
import gc
|
||||
import statistics
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from axolotl.monkeypatch.trainer.utils import entropy_from_logits
|
||||
|
||||
V = 151936 # Qwen vocab
|
||||
WARMUP = 5
|
||||
BENCH_ITERS = 20
|
||||
MEM_ITERS = 10
|
||||
|
||||
|
||||
def entropy_from_logits_original(logits: torch.Tensor, chunk_size: int = 128):
|
||||
"""Original chunked implementation (reference)."""
|
||||
original_shape = logits.shape[:-1]
|
||||
num_classes = logits.shape[-1]
|
||||
flat_logits = logits.reshape(-1, num_classes)
|
||||
entropies = []
|
||||
for chunk in flat_logits.split(chunk_size, dim=0):
|
||||
logps = F.log_softmax(chunk, dim=-1)
|
||||
chunk_entropy = -(torch.exp(logps) * logps).sum(-1)
|
||||
entropies.append(chunk_entropy)
|
||||
return torch.cat(entropies, dim=0).reshape(original_shape)
|
||||
|
||||
|
||||
def _clean_gpu():
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
torch.cuda.reset_accumulated_memory_stats()
|
||||
torch.cuda.synchronize()
|
||||
|
||||
|
||||
def profile_time(fn, logits, n_iters=BENCH_ITERS):
|
||||
for _ in range(WARMUP):
|
||||
out = fn(logits, chunk_size=128)
|
||||
del out
|
||||
torch.cuda.synchronize()
|
||||
|
||||
times = []
|
||||
for _ in range(n_iters):
|
||||
s = torch.cuda.Event(enable_timing=True)
|
||||
e = torch.cuda.Event(enable_timing=True)
|
||||
s.record()
|
||||
out = fn(logits, chunk_size=128)
|
||||
e.record()
|
||||
torch.cuda.synchronize()
|
||||
times.append(s.elapsed_time(e))
|
||||
del out
|
||||
return times
|
||||
|
||||
|
||||
def profile_memory(fn, logits, n_iters=MEM_ITERS):
|
||||
for _ in range(WARMUP):
|
||||
out = fn(logits, chunk_size=128)
|
||||
del out
|
||||
torch.cuda.synchronize()
|
||||
|
||||
peaks = []
|
||||
for _ in range(n_iters):
|
||||
_clean_gpu()
|
||||
base = torch.cuda.max_memory_allocated()
|
||||
out = fn(logits, chunk_size=128)
|
||||
torch.cuda.synchronize()
|
||||
peaks.append(torch.cuda.max_memory_allocated() - base)
|
||||
del out
|
||||
return [p / 1e6 for p in peaks]
|
||||
|
||||
|
||||
def fmt(values, unit=""):
|
||||
mean = statistics.mean(values)
|
||||
std = statistics.stdev(values) if len(values) > 1 else 0.0
|
||||
return f"{mean:8.2f} ± {std:5.2f} {unit} [min={min(values):.2f}, max={max(values):.2f}]"
|
||||
|
||||
|
||||
def benchmark_contiguous():
|
||||
print("=" * 60)
|
||||
print(
|
||||
f"CONTIGUOUS BENCHMARK (warmup={WARMUP}, time={BENCH_ITERS}, mem={MEM_ITERS})"
|
||||
)
|
||||
print("=" * 60)
|
||||
|
||||
configs = [
|
||||
(1, 2048),
|
||||
(1, 8192),
|
||||
(1, 16384),
|
||||
(4, 4096),
|
||||
(8, 2048),
|
||||
(16, 2048),
|
||||
(16, 4096),
|
||||
]
|
||||
|
||||
for B, L in configs:
|
||||
mem_gb = B * L * V * 2 / 1e9
|
||||
if mem_gb > 28:
|
||||
print(f"\n skip B={B}, L={L} ({mem_gb:.1f} GB)")
|
||||
continue
|
||||
|
||||
N = B * L
|
||||
print(f"\n{'─' * 60}")
|
||||
print(f"B={B:2d}, L={L:5d} ({N:6d} rows, logits {mem_gb:.2f} GB)")
|
||||
print(f"{'─' * 60}")
|
||||
|
||||
torch.manual_seed(42)
|
||||
logits = torch.randn(B, L, V, device="cuda", dtype=torch.bfloat16)
|
||||
|
||||
t_orig = profile_time(entropy_from_logits_original, logits)
|
||||
t_triton = profile_time(entropy_from_logits, logits)
|
||||
orig_mean = statistics.mean(t_orig)
|
||||
triton_mean = statistics.mean(t_triton)
|
||||
|
||||
print(" TIME (ms):")
|
||||
print(f" original: {fmt(t_orig, 'ms')}")
|
||||
print(f" triton: {fmt(t_triton, 'ms')}")
|
||||
print(f" speedup: {orig_mean / triton_mean:.2f}x")
|
||||
|
||||
m_orig = profile_memory(entropy_from_logits_original, logits)
|
||||
m_triton = profile_memory(entropy_from_logits, logits)
|
||||
orig_peak = statistics.mean(m_orig)
|
||||
triton_peak = statistics.mean(m_triton)
|
||||
|
||||
print(" MEMORY (peak overhead):")
|
||||
print(f" original: {fmt(m_orig, 'MB')}")
|
||||
print(f" triton: {fmt(m_triton, 'MB')}")
|
||||
print(f" saved: {orig_peak - triton_peak:.1f} MB")
|
||||
|
||||
del logits
|
||||
_clean_gpu()
|
||||
|
||||
|
||||
def benchmark_noncontiguous():
|
||||
print("\n" + "=" * 60)
|
||||
print(
|
||||
f"NON-CONTIGUOUS BENCHMARK (warmup={WARMUP}, time={BENCH_ITERS}, mem={MEM_ITERS})"
|
||||
)
|
||||
print("=" * 60)
|
||||
|
||||
configs = [
|
||||
(4, 2048, "transpose"),
|
||||
(4, 8192, "transpose"),
|
||||
(8, 2048, "transpose"),
|
||||
(4, 4096, "slice_batch"),
|
||||
]
|
||||
|
||||
for B, L, method in configs:
|
||||
torch.manual_seed(42)
|
||||
|
||||
if method == "transpose":
|
||||
raw = torch.randn(L, B, V, device="cuda", dtype=torch.bfloat16)
|
||||
logits_nc = raw.transpose(0, 1)
|
||||
raw_gb = L * B * V * 2 / 1e9
|
||||
elif method == "slice_batch":
|
||||
raw = torch.randn(B * 2, L, V, device="cuda", dtype=torch.bfloat16)
|
||||
logits_nc = raw[::2]
|
||||
raw_gb = B * 2 * L * V * 2 / 1e9
|
||||
else:
|
||||
continue
|
||||
|
||||
if raw_gb > 28:
|
||||
print(f"\n skip B={B}, L={L}, {method} ({raw_gb:.1f} GB)")
|
||||
del raw, logits_nc
|
||||
torch.cuda.empty_cache()
|
||||
continue
|
||||
|
||||
N = B * L
|
||||
print(f"\n{'─' * 60}")
|
||||
print(f"B={B}, L={L} {method} ({N} rows, raw {raw_gb:.2f} GB)")
|
||||
print(f"{'─' * 60}")
|
||||
|
||||
def original_with_copy(logits, chunk_size=128):
|
||||
return entropy_from_logits_original(
|
||||
logits.contiguous(), chunk_size=chunk_size
|
||||
)
|
||||
|
||||
t_orig = profile_time(original_with_copy, logits_nc)
|
||||
t_triton = profile_time(entropy_from_logits, logits_nc)
|
||||
orig_mean = statistics.mean(t_orig)
|
||||
triton_mean = statistics.mean(t_triton)
|
||||
|
||||
print(" TIME (ms):")
|
||||
print(f" orig+copy: {fmt(t_orig, 'ms')}")
|
||||
print(f" triton-strided:{fmt(t_triton, 'ms')}")
|
||||
print(f" speedup: {orig_mean / triton_mean:.2f}x")
|
||||
|
||||
m_orig = profile_memory(original_with_copy, logits_nc)
|
||||
m_triton = profile_memory(entropy_from_logits, logits_nc)
|
||||
orig_peak = statistics.mean(m_orig)
|
||||
triton_peak = statistics.mean(m_triton)
|
||||
|
||||
print(" MEMORY (peak overhead):")
|
||||
print(f" orig+copy: {fmt(m_orig, 'MB')}")
|
||||
print(f" triton-strided:{fmt(m_triton, 'MB')}")
|
||||
print(f" saved: {orig_peak - triton_peak:.1f} MB")
|
||||
|
||||
del raw, logits_nc
|
||||
_clean_gpu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
benchmark_contiguous()
|
||||
benchmark_noncontiguous()
|
||||
284
benchmarks/bench_scattermoe_lora.py
Normal file
284
benchmarks/bench_scattermoe_lora.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""Benchmark for ScatterMoE LoRA Triton kernels.
|
||||
|
||||
Measures forward, backward dX, and backward dA/dB kernels at common MoE
|
||||
model shapes. Reports per-kernel timings, LoRA overhead vs base scatter2scatter,
|
||||
and full fwd+bwd autograd throughput.
|
||||
|
||||
Usage:
|
||||
CUDA_VISIBLE_DEVICES=0 python benchmarks/bench_scattermoe_lora.py
|
||||
CUDA_VISIBLE_DEVICES=0 python benchmarks/bench_scattermoe_lora.py --ranks 16 64
|
||||
CUDA_VISIBLE_DEVICES=0 python benchmarks/bench_scattermoe_lora.py --models Qwen/Qwen3.5-35B-A3B
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import gc
|
||||
import time
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
|
||||
from axolotl.integrations.kernels.libs.scattermoe_lora.kernels import (
|
||||
lora_ops,
|
||||
ops as base_ops,
|
||||
)
|
||||
from axolotl.integrations.kernels.libs.scattermoe_lora.parallel_experts import (
|
||||
flatten_sort_count,
|
||||
)
|
||||
from axolotl.integrations.kernels.libs.scattermoe_lora.parallel_linear_lora import (
|
||||
ScatterMoELoRA,
|
||||
)
|
||||
|
||||
DEVICE = "cuda"
|
||||
DTYPE = torch.bfloat16
|
||||
WARMUP = 5
|
||||
ITERS = 20
|
||||
|
||||
# ─── Model configs ──────────────────────────────────────────────────────────
|
||||
|
||||
BUILTIN_CONFIGS = {
|
||||
"Qwen3.5-35B-A3B": (256, 2048, 512, 8), # E, H, I, k
|
||||
"Qwen3-30B-A3B": (128, 2048, 768, 8),
|
||||
"OLMoE-1B-7B": (64, 2048, 1024, 8),
|
||||
"Mixtral-8x7B": (8, 4096, 14336, 2),
|
||||
}
|
||||
|
||||
|
||||
def _resolve_config(spec):
|
||||
"""Resolve a model spec to (E, H, I, k). Accepts builtin names or HF IDs."""
|
||||
key = spec.lower().replace("/", "-")
|
||||
for name, cfg in BUILTIN_CONFIGS.items():
|
||||
if key in name.lower() or name.lower() in key:
|
||||
return name, cfg
|
||||
|
||||
from transformers import AutoConfig
|
||||
|
||||
hf_cfg = AutoConfig.from_pretrained(spec, trust_remote_code=True)
|
||||
if callable(getattr(hf_cfg, "get_text_config", None)):
|
||||
tc = hf_cfg.get_text_config()
|
||||
if hasattr(tc, "model_type") and tc.model_type != hf_cfg.model_type:
|
||||
hf_cfg = tc
|
||||
hidden = hf_cfg.hidden_size
|
||||
inter = getattr(hf_cfg, "moe_intermediate_size", None) or hf_cfg.intermediate_size
|
||||
experts = (
|
||||
getattr(hf_cfg, "num_experts", None)
|
||||
or getattr(hf_cfg, "num_local_experts", None)
|
||||
or getattr(hf_cfg, "n_routed_experts", None)
|
||||
)
|
||||
top_k = (
|
||||
getattr(hf_cfg, "num_experts_per_tok", None)
|
||||
or getattr(hf_cfg, "num_experts_per_token", None)
|
||||
or 2
|
||||
)
|
||||
name = spec.split("/")[-1]
|
||||
return name, (experts, hidden, inter, top_k)
|
||||
|
||||
|
||||
# ─── Benchmark helpers ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _clean():
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.synchronize()
|
||||
|
||||
|
||||
def _bench(fn, warmup=WARMUP, iters=ITERS):
|
||||
for _ in range(warmup):
|
||||
fn()
|
||||
torch.cuda.synchronize()
|
||||
times = []
|
||||
for _ in range(iters):
|
||||
torch.cuda.synchronize()
|
||||
t0 = time.perf_counter()
|
||||
fn()
|
||||
torch.cuda.synchronize()
|
||||
times.append((time.perf_counter() - t0) * 1000)
|
||||
times.sort()
|
||||
return times[len(times) // 2]
|
||||
|
||||
|
||||
def _setup(num_experts, K, N, T, top_k, R):
|
||||
torch.manual_seed(42)
|
||||
x = torch.randn(T, K, device=DEVICE, dtype=DTYPE)
|
||||
W = torch.randn(num_experts, K, N, device=DEVICE, dtype=DTYPE) * 0.02
|
||||
lora_A = torch.randn(R * num_experts, K, device=DEVICE, dtype=DTYPE) * 0.01
|
||||
lora_B = torch.randn(N, R * num_experts, device=DEVICE, dtype=DTYPE) * 0.01
|
||||
logits = torch.randn(T, num_experts, device=DEVICE)
|
||||
_, top_idx = torch.topk(torch.softmax(logits, dim=-1), top_k, dim=-1)
|
||||
sei, ssi, eo = flatten_sort_count(top_idx, num_experts)
|
||||
gx = base_ops.group(x, ssi, fan_out=top_k)
|
||||
dy = torch.randn(gx.size(0), N, device=DEVICE, dtype=DTYPE)
|
||||
return x, W, lora_A, lora_B, sei, ssi, eo, gx, dy
|
||||
|
||||
|
||||
# ─── Kernel wrappers (avoid B023 loop-variable capture) ──────────────────────
|
||||
|
||||
|
||||
def _call_fwd(x, W, sei, ssi, top_k, lA, lB):
|
||||
return lora_ops.scatter2scatter_lora(
|
||||
X=x,
|
||||
W=W,
|
||||
sorted_expert_idxs=sei,
|
||||
sorted_scattered_idxs=ssi,
|
||||
k=top_k,
|
||||
lora_A=lA,
|
||||
lora_B=lB,
|
||||
scaling=2.0,
|
||||
)
|
||||
|
||||
|
||||
def _call_base(x, W, sei, ssi, top_k):
|
||||
return base_ops.scatter2scatter(
|
||||
X=x,
|
||||
W=W,
|
||||
sorted_expert_idxs=sei,
|
||||
sorted_scattered_idxs=ssi,
|
||||
k=top_k,
|
||||
)
|
||||
|
||||
|
||||
def _call_dx(dy, W, sei, ssi, lA, lB):
|
||||
return lora_ops.scatter2scatter_lora_dX(
|
||||
DY=dy,
|
||||
W=W,
|
||||
sorted_expert_idxs=sei,
|
||||
sorted_scattered_idxs=ssi,
|
||||
k=1,
|
||||
lora_A=lA,
|
||||
lora_B=lB,
|
||||
scaling=2.0,
|
||||
dy_grouped=True,
|
||||
dx_grouped=False,
|
||||
)
|
||||
|
||||
|
||||
def _call_bwd(dy, gx, lA, lB, eo, num_experts):
|
||||
return lora_ops.group_bwd_lora(
|
||||
DY=dy,
|
||||
X=gx,
|
||||
lora_A=lA,
|
||||
lora_B=lB,
|
||||
expert_offsets=eo,
|
||||
E=num_experts,
|
||||
scaling=2.0,
|
||||
)
|
||||
|
||||
|
||||
# ─── Main ────────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="ScatterMoE LoRA kernel benchmark")
|
||||
parser.add_argument(
|
||||
"--models",
|
||||
"-m",
|
||||
nargs="+",
|
||||
help="Model names or HF IDs (default: all builtins)",
|
||||
)
|
||||
parser.add_argument("--ranks", "-r", nargs="+", type=int, default=[16, 32, 64])
|
||||
parser.add_argument("--seq-len", "-T", type=int, default=2048)
|
||||
args = parser.parse_args()
|
||||
|
||||
T = args.seq_len
|
||||
print(f"GPU: {torch.cuda.get_device_name()}")
|
||||
print(f"T={T}, ranks={args.ranks}\n")
|
||||
|
||||
if args.models:
|
||||
configs = [_resolve_config(m) for m in args.models]
|
||||
else:
|
||||
configs = list(BUILTIN_CONFIGS.items())
|
||||
|
||||
for model_name, (num_experts, hidden, inter, top_k) in configs:
|
||||
print(f"{'=' * 70}")
|
||||
print(f" {model_name}: E={num_experts}, H={hidden}, I={inter}, k={top_k}")
|
||||
print(f"{'=' * 70}")
|
||||
|
||||
for R in args.ranks:
|
||||
for proj, K, N in [("gate_up", hidden, 2 * inter), ("down", inter, hidden)]:
|
||||
_clean()
|
||||
x, W, lA, lB, sei, ssi, eo, gx, dy = _setup(
|
||||
num_experts, K, N, T, top_k, R
|
||||
)
|
||||
|
||||
# Forward with LoRA (auto-dispatched: fused or split)
|
||||
dispatch = (
|
||||
"split"
|
||||
if (
|
||||
num_experts <= lora_ops._SPLIT_LORA_FWD_MAX_EXPERTS
|
||||
and K * N >= lora_ops._SPLIT_LORA_FWD_THRESHOLD
|
||||
)
|
||||
else "fused"
|
||||
)
|
||||
t_fwd = _bench(partial(_call_fwd, x, W, sei, ssi, top_k, lA, lB))
|
||||
t_base = _bench(partial(_call_base, x, W, sei, ssi, top_k))
|
||||
t_dx = _bench(partial(_call_dx, dy, W, sei, ssi, lA, lB))
|
||||
t_bwd = _bench(partial(_call_bwd, dy, gx, lA, lB, eo, num_experts))
|
||||
|
||||
total = t_fwd + t_dx + t_bwd
|
||||
overhead = t_fwd / t_base - 1 if t_base > 0 else 0
|
||||
|
||||
print(
|
||||
f" R={R:>2} {proj:<8} "
|
||||
f"fwd={t_fwd:>6.2f}ms [{dispatch}] "
|
||||
f"base={t_base:>6.2f}ms "
|
||||
f"(+{overhead * 100:.0f}%) "
|
||||
f"dx={t_dx:>6.2f}ms bwd={t_bwd:>6.2f}ms "
|
||||
f"total={total:>6.2f}ms"
|
||||
)
|
||||
|
||||
# Full autograd fwd+bwd with memory measurement
|
||||
x_ag = x.clone().requires_grad_(True)
|
||||
lA_ag = lA.clone().requires_grad_(True)
|
||||
lB_ag = lB.clone().requires_grad_(True)
|
||||
|
||||
def _run_autograd(
|
||||
_x=x_ag,
|
||||
_W=W,
|
||||
_k=top_k,
|
||||
_sei=sei,
|
||||
_ssi=ssi,
|
||||
_eo=eo,
|
||||
_lA=lA_ag,
|
||||
_lB=lB_ag,
|
||||
):
|
||||
out = ScatterMoELoRA.apply(
|
||||
_x,
|
||||
_W,
|
||||
_k,
|
||||
_sei,
|
||||
_ssi,
|
||||
_eo,
|
||||
_lA,
|
||||
_lB,
|
||||
2.0,
|
||||
None,
|
||||
None,
|
||||
False,
|
||||
False,
|
||||
True,
|
||||
False,
|
||||
)
|
||||
out.sum().backward()
|
||||
_x.grad = None
|
||||
_lA.grad = None
|
||||
_lB.grad = None
|
||||
|
||||
t_full = _bench(_run_autograd)
|
||||
|
||||
_clean()
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
mem_before = torch.cuda.memory_allocated()
|
||||
_run_autograd()
|
||||
torch.cuda.synchronize()
|
||||
mem_peak = torch.cuda.max_memory_allocated() - mem_before
|
||||
|
||||
print(
|
||||
f" full_fwd_bwd={t_full:>6.2f}ms "
|
||||
f"peak_delta={mem_peak / 1e6:>6.1f}MB"
|
||||
)
|
||||
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
191
benchmarks/bench_selective_logsoftmax.py
Normal file
191
benchmarks/bench_selective_logsoftmax.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Benchmark for selective_log_softmax Triton kernel vs original implementation.
|
||||
|
||||
Usage: CUDA_VISIBLE_DEVICES=0 python benchmarks/bench_selective_logsoftmax.py
|
||||
"""
|
||||
|
||||
import gc
|
||||
import statistics
|
||||
|
||||
import torch
|
||||
|
||||
from axolotl.monkeypatch.trainer.utils import (
|
||||
selective_log_softmax,
|
||||
selective_log_softmax_original,
|
||||
)
|
||||
|
||||
V = 151936 # Qwen vocab
|
||||
WARMUP = 5
|
||||
BENCH_ITERS = 20
|
||||
MEM_ITERS = 10
|
||||
|
||||
|
||||
def _clean_gpu():
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
torch.cuda.reset_accumulated_memory_stats()
|
||||
torch.cuda.synchronize()
|
||||
|
||||
|
||||
def profile_time(fn, args, n_iters=BENCH_ITERS):
|
||||
for _ in range(WARMUP):
|
||||
fn(*args)
|
||||
torch.cuda.synchronize()
|
||||
|
||||
times = []
|
||||
for _ in range(n_iters):
|
||||
s = torch.cuda.Event(enable_timing=True)
|
||||
e = torch.cuda.Event(enable_timing=True)
|
||||
s.record()
|
||||
fn(*args)
|
||||
e.record()
|
||||
torch.cuda.synchronize()
|
||||
times.append(s.elapsed_time(e))
|
||||
return times
|
||||
|
||||
|
||||
def profile_memory(fn, args, n_iters=MEM_ITERS):
|
||||
for _ in range(WARMUP):
|
||||
out = fn(*args)
|
||||
del out
|
||||
torch.cuda.synchronize()
|
||||
|
||||
peaks = []
|
||||
for _ in range(n_iters):
|
||||
_clean_gpu()
|
||||
base = torch.cuda.max_memory_allocated()
|
||||
out = fn(*args)
|
||||
torch.cuda.synchronize()
|
||||
peaks.append(torch.cuda.max_memory_allocated() - base)
|
||||
del out
|
||||
return [p / 1e6 for p in peaks]
|
||||
|
||||
|
||||
def fmt(values, unit=""):
|
||||
mean = statistics.mean(values)
|
||||
std = statistics.stdev(values) if len(values) > 1 else 0.0
|
||||
return f"{mean:8.2f} ± {std:5.2f} {unit} [min={min(values):.2f}, max={max(values):.2f}]"
|
||||
|
||||
|
||||
def benchmark_forward():
|
||||
print("=" * 60)
|
||||
print(f"FORWARD BENCHMARK (warmup={WARMUP}, time={BENCH_ITERS}, mem={MEM_ITERS})")
|
||||
print("=" * 60)
|
||||
|
||||
configs = [
|
||||
(1, 2048),
|
||||
(1, 8192),
|
||||
(4, 4096),
|
||||
(8, 2048),
|
||||
(16, 2048),
|
||||
(16, 4096),
|
||||
]
|
||||
|
||||
for B, L in configs:
|
||||
mem_gb = B * L * V * 2 / 1e9
|
||||
if mem_gb > 28:
|
||||
print(f"\n skip B={B}, L={L} ({mem_gb:.1f} GB)")
|
||||
continue
|
||||
|
||||
N = B * L
|
||||
print(f"\n{'─' * 60}")
|
||||
print(f"B={B:2d}, L={L:5d} ({N:6d} rows, logits {mem_gb:.2f} GB)")
|
||||
print(f"{'─' * 60}")
|
||||
|
||||
torch.manual_seed(42)
|
||||
logits = torch.randn(B, L, V, device="cuda", dtype=torch.bfloat16)
|
||||
index = torch.randint(0, V, (B, L), device="cuda")
|
||||
|
||||
t_orig = profile_time(selective_log_softmax_original, (logits, index))
|
||||
t_triton = profile_time(selective_log_softmax, (logits, index))
|
||||
orig_mean = statistics.mean(t_orig)
|
||||
triton_mean = statistics.mean(t_triton)
|
||||
|
||||
print(" TIME (ms):")
|
||||
print(f" original: {fmt(t_orig, 'ms')}")
|
||||
print(f" triton: {fmt(t_triton, 'ms')}")
|
||||
print(f" speedup: {orig_mean / triton_mean:.2f}x")
|
||||
|
||||
m_orig = profile_memory(selective_log_softmax_original, (logits, index))
|
||||
m_triton = profile_memory(selective_log_softmax, (logits, index))
|
||||
orig_peak = statistics.mean(m_orig)
|
||||
triton_peak = statistics.mean(m_triton)
|
||||
|
||||
print(" MEMORY (peak overhead):")
|
||||
print(f" original: {fmt(m_orig, 'MB')}")
|
||||
print(f" triton: {fmt(m_triton, 'MB')}")
|
||||
print(f" saved: {orig_peak - triton_peak:.1f} MB")
|
||||
|
||||
del logits, index
|
||||
_clean_gpu()
|
||||
|
||||
|
||||
def benchmark_backward():
|
||||
print("\n" + "=" * 60)
|
||||
print(f"FWD+BWD BENCHMARK (warmup={WARMUP}, time={BENCH_ITERS}, mem={MEM_ITERS})")
|
||||
print("=" * 60)
|
||||
|
||||
configs = [
|
||||
(1, 2048),
|
||||
(1, 8192),
|
||||
(4, 4096),
|
||||
(8, 2048),
|
||||
(16, 2048),
|
||||
(16, 4096),
|
||||
]
|
||||
|
||||
def fwd_bwd_original(logits, index):
|
||||
logits.grad = None
|
||||
out = selective_log_softmax_original(logits, index)
|
||||
out.sum().backward()
|
||||
|
||||
def fwd_bwd_triton(logits, index):
|
||||
logits.grad = None
|
||||
out = selective_log_softmax(logits, index)
|
||||
out.sum().backward()
|
||||
|
||||
for B, L in configs:
|
||||
mem_gb = B * L * V * 2 / 1e9
|
||||
if mem_gb > 20:
|
||||
print(f"\n skip B={B}, L={L} ({mem_gb:.1f} GB, need room for grads)")
|
||||
continue
|
||||
|
||||
N = B * L
|
||||
print(f"\n{'─' * 60}")
|
||||
print(f"B={B:2d}, L={L:5d} ({N:6d} rows, logits {mem_gb:.2f} GB)")
|
||||
print(f"{'─' * 60}")
|
||||
|
||||
torch.manual_seed(42)
|
||||
logits_orig = torch.randn(
|
||||
B, L, V, device="cuda", dtype=torch.bfloat16, requires_grad=True
|
||||
)
|
||||
logits_tri = logits_orig.detach().clone().requires_grad_(True)
|
||||
index = torch.randint(0, V, (B, L), device="cuda")
|
||||
|
||||
t_orig = profile_time(fwd_bwd_original, (logits_orig, index))
|
||||
t_triton = profile_time(fwd_bwd_triton, (logits_tri, index))
|
||||
orig_mean = statistics.mean(t_orig)
|
||||
triton_mean = statistics.mean(t_triton)
|
||||
|
||||
print(" FWD+BWD TIME (ms):")
|
||||
print(f" original: {fmt(t_orig, 'ms')}")
|
||||
print(f" triton: {fmt(t_triton, 'ms')}")
|
||||
print(f" speedup: {orig_mean / triton_mean:.2f}x")
|
||||
|
||||
m_orig = profile_memory(fwd_bwd_original, (logits_orig, index))
|
||||
m_triton = profile_memory(fwd_bwd_triton, (logits_tri, index))
|
||||
orig_peak = statistics.mean(m_orig)
|
||||
triton_peak = statistics.mean(m_triton)
|
||||
|
||||
print(" FWD+BWD MEMORY (peak overhead):")
|
||||
print(f" original: {fmt(m_orig, 'MB')}")
|
||||
print(f" triton: {fmt(m_triton, 'MB')}")
|
||||
print(f" saved: {orig_peak - triton_peak:.1f} MB")
|
||||
|
||||
del logits_orig, logits_tri, index
|
||||
_clean_gpu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
benchmark_forward()
|
||||
benchmark_backward()
|
||||
@@ -31,8 +31,9 @@ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||
fi
|
||||
|
||||
RUN uv pip install packaging==26.0 setuptools==75.8.0
|
||||
RUN uv pip install packaging==26.0 setuptools==78.1.1
|
||||
RUN uv pip install torchvision
|
||||
RUN uv pip uninstall causal_conv1d
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
|
||||
@@ -32,7 +32,8 @@ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||
fi
|
||||
|
||||
RUN pip install packaging==26.0 setuptools==75.8.0 psutil
|
||||
RUN pip install packaging==26.0 setuptools==78.1.1 psutil
|
||||
RUN pip uninstall -y causal_conv1d
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
|
||||
@@ -3,6 +3,12 @@ set -e
|
||||
|
||||
python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__"
|
||||
|
||||
# curl -L https://axolotl-ci.b-cdn.net/hf-cache.tar.zst | tar -xpf - -C "${HF_HOME}/hub/" --use-compress-program unzstd --strip-components=1
|
||||
hf download "NousResearch/Meta-Llama-3-8B"
|
||||
hf download "NousResearch/Meta-Llama-3-8B-Instruct"
|
||||
hf download "microsoft/Phi-4-reasoning"
|
||||
hf download "microsoft/Phi-3.5-mini-instruct"
|
||||
|
||||
# Run unit tests with initial coverage report
|
||||
pytest -v --durations=10 -n8 \
|
||||
--ignore=tests/e2e/ \
|
||||
|
||||
@@ -22,6 +22,7 @@ RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
WORKDIR /workspace/axolotl
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets; don't install deepspeed with arm64
|
||||
RUN pip uninstall -y causal_conv1d
|
||||
RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
BASE_EXTRAS="flash-attn,ring-flash-attn,optimizers,ray"; \
|
||||
else \
|
||||
|
||||
@@ -22,6 +22,7 @@ RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
WORKDIR /workspace/axolotl
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets; don't install deepspeed with arm64
|
||||
RUN uv pip uninstall causal_conv1d
|
||||
RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
BASE_EXTRAS="flash-attn,ring-flash-attn,optimizers,ray"; \
|
||||
else \
|
||||
|
||||
@@ -13,9 +13,10 @@ sdp_attention: true
|
||||
|
||||
For more details: [PyTorch docs](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
|
||||
## Flash Attention 2
|
||||
## Flash Attention
|
||||
|
||||
Uses efficient kernels to compute attention.
|
||||
Axolotl supports Flash Attention 2, 3, and 4. The best available version is used automatically
|
||||
based on your installed packages and GPU.
|
||||
|
||||
```yaml
|
||||
flash_attention: true
|
||||
@@ -23,11 +24,9 @@ flash_attention: true
|
||||
|
||||
For more details: [Flash Attention](https://github.com/Dao-AILab/flash-attention/)
|
||||
|
||||
### Nvidia
|
||||
### Flash Attention 2
|
||||
|
||||
Requirements: Ampere, Ada, or Hopper GPUs
|
||||
|
||||
Note: For Turing GPUs or lower, please use other attention methods.
|
||||
Requirements: Ampere, Ada, or Hopper GPUs (Turing or lower not supported)
|
||||
|
||||
```bash
|
||||
pip install flash-attn --no-build-isolation
|
||||
@@ -35,11 +34,12 @@ pip install flash-attn --no-build-isolation
|
||||
|
||||
::: {.callout-tip}
|
||||
|
||||
If you get `undefined symbol` while training, ensure you installed PyTorch prior to Axolotl. Alternatively, try reinstall or downgrade a version.
|
||||
If you get `undefined symbol` while training, ensure you installed PyTorch prior to Axolotl.
|
||||
Alternatively, try reinstall or downgrade a version.
|
||||
|
||||
:::
|
||||
|
||||
#### Flash Attention 3
|
||||
### Flash Attention 3
|
||||
|
||||
Requirements: Hopper only and CUDA 12.8 (recommended)
|
||||
|
||||
@@ -50,6 +50,44 @@ cd flash-attention/hopper
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
### Flash Attention 4
|
||||
|
||||
Requirements: Hopper or Blackwell GPUs
|
||||
|
||||
```bash
|
||||
pip install flash-attn-4
|
||||
```
|
||||
|
||||
Or from source:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Dao-AILab/flash-attention.git
|
||||
cd flash-attention/flash_attn/cute
|
||||
|
||||
pip install -e .
|
||||
|
||||
# FA2's flash_attn package includes a cute/ stub that shadows FA4.
|
||||
# Remove it so Python can find the real FA4 module:
|
||||
rm -r $(python -c "import flash_attn; print(flash_attn.__path__[0])")/cute
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
|
||||
**Hopper (SM90) users**: The backward kernel is not yet included in the pip package. To use FA4
|
||||
for training on Hopper, install from source using the instructions above.
|
||||
|
||||
:::
|
||||
|
||||
::: {.callout-warning}
|
||||
|
||||
FA4 only supports head dimensions up to 128 (`d ≤ 128`). The DeepSeek shape `(192, 128)` is
|
||||
also supported but only on Blackwell. Axolotl automatically detects incompatible head dimensions
|
||||
and falls back to FA2/3.
|
||||
|
||||
:::
|
||||
|
||||
For more details: [flash-attention/flash_attn/cute](https://github.com/Dao-AILab/flash-attention/tree/main/flash_attn/cute)
|
||||
|
||||
### AMD
|
||||
|
||||
Requirements: ROCm 6.0 and above.
|
||||
|
||||
67
docs/expert_quantization.qmd
Normal file
67
docs/expert_quantization.qmd
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
title: "MoE Expert Quantization"
|
||||
description: "Reduce VRAM usage when training MoE model adapters by quantizing expert weights on load"
|
||||
---
|
||||
|
||||
Transformers v5 changed MoE expert layers from `nn.Linear` to fused `nn.Parameter` (3D+ tensors).
|
||||
This means `bitsandbytes` can no longer quantize them during model loading, resulting in all expert
|
||||
weights being loaded in full bf16 precision and causing massive VRAM usage.
|
||||
|
||||
`quantize_moe_experts` solves this by quantizing expert weights during model loading.
|
||||
It intercepts the weight loading process, quantizes each expert tensor on the fly, and
|
||||
immediately frees the original bf16 tensor from VRAM. This dramatically reduces peak memory.
|
||||
For example, GLM-4.7-Flash QLoRA drops from ~127GiB to ~23GiB reserved memory.
|
||||
|
||||
## Usage
|
||||
|
||||
Enable expert quantization in your Axolotl config:
|
||||
|
||||
```yaml
|
||||
quantize_moe_experts: true
|
||||
```
|
||||
|
||||
This works with both 4-bit (QLoRA) and 8-bit (LoRA) quantization.
|
||||
|
||||
### Expert LoRA targeting
|
||||
|
||||
You can optionally apply LoRA adapters directly to expert weights using `lora_target_parameters`:
|
||||
|
||||
```yaml
|
||||
lora_target_parameters:
|
||||
- mlp.experts.gate_up_proj
|
||||
- mlp.experts.down_proj
|
||||
# - mlp.gate.weight # router
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
`lora_dropout` must be `0` when using `lora_target_parameters`.
|
||||
:::
|
||||
|
||||
## Requirements
|
||||
|
||||
- Requires (`adapter: lora` and `load_in_8bit: true`) or (`adapter: qlora` and `load_in_4bit: true`)
|
||||
- CUDA GPUs only (not tested with ROCm or other backends)
|
||||
- FSDP2 compatible for distributed training
|
||||
|
||||
## Limitations
|
||||
|
||||
- `lora_target_linear` is not compatible with `quantize_moe_experts`. See [Expert LoRA targeting](#expert-lora-targeting) instead.
|
||||
- `cpu_ram_efficient_loading` hangs / takes long time with FSDP2 + QLoRA.
|
||||
- Total model parameter count may display incorrectly (trainable param count is correct).
|
||||
- FSDP LoRA (8-bit) may have a large initial VRAM spike at the first 1-2 steps, which then drops. QLoRA does not exhibit this.
|
||||
- FSDP2 may use more VRAM per GPU than single GPU training due to not all layers being properly sharded across ranks.
|
||||
- Model loading takes longer due to on-demand quantization, even on consecutive runs.
|
||||
- DeepSpeed has not been tested.
|
||||
|
||||
## Implementation details
|
||||
|
||||
The quantization is applied by patching transformers to intercept weight loading.
|
||||
When a 3D+ CUDA tensor with "expert" in its name is detected:
|
||||
|
||||
- **4-bit mode:** Uses bitsandbytes NF4 parametrization (configurable via `bnb_4bit_quant_type`).
|
||||
- **8-bit mode:** Uses a custom row-wise int8 parametrization with bitsandbytes dequantization.
|
||||
|
||||
The original bf16 tensor is freed immediately after quantization. Multiple sub-patches are applied to
|
||||
transformers, PEFT and accelerate FSDP2 to support these parametrized expert modules.
|
||||
|
||||
For full implementation details, see [PR #3439](https://github.com/axolotl-ai-cloud/axolotl/pull/3439).
|
||||
@@ -13,6 +13,7 @@ format:
|
||||
- [Pixtral](#sec-pixtral)
|
||||
- [Llava-1.5](#sec-llava-15)
|
||||
- [Mistral-Small-3.1](#sec-mistral-small-31)
|
||||
- [Mistral-Small-4](#sec-mistral-small-4)
|
||||
- [Magistral-Small-2509](#sec-magistral-small-2509)
|
||||
- [Voxtral](#sec-voxtral)
|
||||
- [Gemma-3](#sec-gemma-3)
|
||||
@@ -108,6 +109,12 @@ Please make sure to install vision lib via `pip install 'mistral-common[opencv]=
|
||||
base_model: mistralai/Mistral-Small-3.1-24B-Instruct-2503
|
||||
```
|
||||
|
||||
### Mistral-Small-4 {#sec-mistral-small-4}
|
||||
|
||||
```yaml
|
||||
base_model: mistralai/Mistral-Small-4-119B-2603
|
||||
```
|
||||
|
||||
### Magistral-Small-2509 {#sec-magistral-small-2509}
|
||||
|
||||
::: {.callout-tip}
|
||||
|
||||
@@ -66,6 +66,15 @@ Provides efficient Triton kernels to improve training speed and reduce memory us
|
||||
|
||||
- **Learn more:** [Custom Integrations - Liger Kernels](custom_integrations.qmd#liger-kernels)
|
||||
|
||||
### Expert Kernels
|
||||
|
||||
Optimized kernel implementations for Mixture of Experts (MoE) model training.
|
||||
|
||||
- **ScatterMoE**: Triton-based MoE kernels with fused LoRA support.
|
||||
- **SonicMoE**: CUTLASS-based MoE kernels for NVIDIA Hopper and Blackwell GPUs.
|
||||
|
||||
- **Learn more:** [Custom Integrations - Kernels Integration](custom_integrations.qmd#kernels-integration)
|
||||
|
||||
## Long Context Models
|
||||
|
||||
Techniques to train models on sequences longer than their original context window.
|
||||
@@ -131,3 +140,10 @@ Simulates quantization effects during training, helping the model adapt and pote
|
||||
Allows you to finetune LoRA adapters on top of a model that has already been quantized using the GPTQ method.
|
||||
|
||||
- **Example:** [GPTQ LoRA Example](https://github.com/axolotl-ai-cloud/axolotl/blob/main/examples/llama-2/gptq-lora.yml)
|
||||
|
||||
### MoE Expert Quantization
|
||||
|
||||
Quantizes MoE expert weights on load to reduce VRAM when training MoE models with adapters. Required for Transformers v5+ MoE models where experts use fused `nn.Parameter` tensors.
|
||||
|
||||
- **Config:** `quantize_moe_experts: true`
|
||||
- **Learn more:** [MoE Expert Quantization](expert_quantization.qmd)
|
||||
|
||||
207
docs/rlhf.qmd
207
docs/rlhf.qmd
@@ -721,6 +721,213 @@ trl:
|
||||
|
||||
For more information, see [GRPO docs](https://huggingface.co/docs/trl/v0.17.0/en/grpo_trainer#loss-types).
|
||||
|
||||
#### Async GRPO
|
||||
|
||||
Async GRPO overlaps vLLM generation with training by producing rollouts in a background thread. While the model trains on the current batch, the next batch is already being generated. This can significantly reduce wall-clock time per step.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
use_data_producer: true # Enable data producer protocol
|
||||
use_vllm: true
|
||||
async_prefetch: true # Generate rollouts in background thread
|
||||
prefetch_depth: 1 # Number of rollouts to prefetch
|
||||
vllm_sync_interval: 2 # Sync weights to vLLM every N steps
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
Because the background thread generates completions with slightly stale model weights, async GRPO uses importance sampling correction to account for the distribution shift. This is controlled by `vllm_importance_sampling_correction: true` (default when async is enabled).
|
||||
:::
|
||||
|
||||
##### vLLM LoRA Sync
|
||||
|
||||
By default, weight sync to vLLM merges the LoRA adapter into the base model and broadcasts all parameters via NCCL. LoRA sync is a faster alternative that saves only the adapter weights to the filesystem and has vLLM load them natively using Punica kernels.
|
||||
|
||||
```yaml
|
||||
adapter: lora
|
||||
lora_r: 32
|
||||
lora_alpha: 64
|
||||
lora_target_linear: true
|
||||
|
||||
trl:
|
||||
vllm_lora_sync: true # Enable native LoRA sync
|
||||
```
|
||||
|
||||
When `vllm_lora_sync: true` is set, axolotl automatically selects the LoRA-aware vLLM serve module. Start vLLM as usual:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
```
|
||||
|
||||
Then start training on a separate GPU:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-tip}
|
||||
LoRA sync is especially beneficial with multi-GPU training (FSDP/DeepSpeed), where NCCL merge-sync can cause GPU contention with vLLM generation.
|
||||
:::
|
||||
|
||||
##### Streaming Partial Batch
|
||||
|
||||
Instead of scoring the entire batch at once, streaming mode scores one prompt group at a time. This enables finer-grained zero-advantage skipping and reduces peak memory usage during scoring.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
streaming_partial_batch: true
|
||||
```
|
||||
|
||||
##### Importance Sampling Correction
|
||||
|
||||
When using async prefetch, completions are generated from a slightly older version of the model. Importance sampling (IS) correction adjusts the policy gradient to account for this distribution shift.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
vllm_importance_sampling_correction: true # Enable IS correction
|
||||
importance_sampling_level: token # 'token' or 'sequence'
|
||||
off_policy_mask_threshold: 0.5 # Mask sequences with IS ratio below this
|
||||
```
|
||||
|
||||
- `importance_sampling_level: token` applies per-token IS ratios (recommended with Liger kernel)
|
||||
- `importance_sampling_level: sequence` applies per-sequence IS ratios
|
||||
- `off_policy_mask_threshold` masks out sequences where the IS ratio indicates they are too far off-policy
|
||||
|
||||
##### Replay Buffer
|
||||
|
||||
The replay buffer caches rollout groups that had learning signal (non-zero reward variance) and uses them to replace zero-signal groups in later batches.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
replay_buffer_size: 100 # Max cached groups (0 = disabled)
|
||||
replay_recompute_logps: true # Recompute log-probs for replayed data (recommended)
|
||||
```
|
||||
|
||||
::: {.callout-note}
|
||||
When `replay_recompute_logps: true` (default), old log-probabilities are recomputed using the current model weights. This fixes the IS mismatch that would otherwise occur when replaying stale data.
|
||||
:::
|
||||
|
||||
##### Deferred Re-rolling
|
||||
|
||||
Failed prompts (where the model produces zero reward for all generations) are buffered and re-injected into later batches when the model may be better equipped to solve them.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reroll_start_fraction: 0.5 # Start re-rolling after 50% of training
|
||||
reroll_max_groups: 1 # Max groups to replace per batch
|
||||
```
|
||||
|
||||
##### Zero-Advantage Batch Skipping
|
||||
|
||||
When all advantages in a micro-batch are zero (no learning signal), the forward/backward pass is skipped entirely. This is enabled by default and logged as `skipped_zero_adv_batches=1`.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
skip_zero_advantage_batches: true # default
|
||||
```
|
||||
|
||||
##### Parallel Reward Workers
|
||||
|
||||
Reward functions that use `signal.alarm()` (e.g., `math_verify`) must run in the main thread. Parallel reward workers use subprocesses to work around this limitation while enabling concurrent reward computation.
|
||||
|
||||
```yaml
|
||||
trl:
|
||||
reward_num_workers: 4 # Number of subprocess workers (1 = no parallelism)
|
||||
```
|
||||
|
||||
##### Full Async GRPO Example
|
||||
|
||||
```yaml
|
||||
base_model: Qwen/Qwen2.5-1.5B-Instruct
|
||||
|
||||
vllm:
|
||||
host: 0.0.0.0
|
||||
port: 8000
|
||||
gpu_memory_utilization: 0.35
|
||||
dtype: auto
|
||||
|
||||
adapter: lora
|
||||
lora_r: 32
|
||||
lora_alpha: 64
|
||||
lora_target_linear: true
|
||||
|
||||
rl: grpo
|
||||
trl:
|
||||
use_data_producer: true
|
||||
use_vllm: true
|
||||
async_prefetch: true
|
||||
prefetch_depth: 1
|
||||
vllm_sync_interval: 2
|
||||
vllm_lora_sync: true
|
||||
streaming_partial_batch: true
|
||||
vllm_importance_sampling_correction: true
|
||||
off_policy_mask_threshold: 0.5
|
||||
importance_sampling_level: token
|
||||
num_generations: 8
|
||||
max_completion_length: 512
|
||||
reward_funcs:
|
||||
- rewards.accuracy_reward
|
||||
reroll_start_fraction: 0.5
|
||||
replay_buffer_size: 100
|
||||
reward_num_workers: 4
|
||||
skip_zero_advantage_batches: true
|
||||
|
||||
datasets:
|
||||
- path: AI-MO/NuminaMath-TIR
|
||||
type: rewards.prompt_transform
|
||||
split: train
|
||||
|
||||
gradient_accumulation_steps: 4
|
||||
micro_batch_size: 2
|
||||
max_steps: 500
|
||||
learning_rate: 1e-5
|
||||
bf16: true
|
||||
gradient_checkpointing: true
|
||||
```
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start vLLM on GPU 0
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
|
||||
# Terminal 2: Train on GPU 1
|
||||
CUDA_VISIBLE_DEVICES=1 axolotl train config.yaml
|
||||
```
|
||||
|
||||
##### Multi-GPU Async GRPO
|
||||
|
||||
Async GRPO supports FSDP and DeepSpeed ZeRO-3 for multi-GPU training. vLLM runs on one GPU while training is distributed across the remaining GPUs.
|
||||
|
||||
**FSDP:**
|
||||
|
||||
```yaml
|
||||
fsdp:
|
||||
- full_shard
|
||||
- auto_wrap
|
||||
fsdp_config:
|
||||
fsdp_transformer_layer_cls_to_wrap: Qwen2DecoderLayer
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
```
|
||||
|
||||
**DeepSpeed ZeRO-3:**
|
||||
|
||||
```yaml
|
||||
deepspeed: deepspeed_configs/zero3_bf16.json
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: true # Required for ZeRO-3
|
||||
```
|
||||
|
||||
```bash
|
||||
# Terminal 1: Start vLLM on GPU 0
|
||||
CUDA_VISIBLE_DEVICES=0 axolotl vllm-serve config.yaml
|
||||
|
||||
# Terminal 2: Train on GPUs 0,1
|
||||
CUDA_VISIBLE_DEVICES=0,1 accelerate launch --num_processes 2 -m axolotl.cli.train config.yaml
|
||||
```
|
||||
|
||||
::: {.callout-important}
|
||||
With multi-GPU async prefetch, only rank 0 generates completions in the background thread. Results are broadcast to all ranks on the main thread. This avoids FSDP/DeepSpeed collective deadlocks from unsynchronized background threads.
|
||||
:::
|
||||
|
||||
### GDPO
|
||||
|
||||
GDPO (Group Reward-Decoupled Policy Optimization) extends GRPO for multi-reward training. It addresses the **reward advantage collapse** problem by normalizing each reward function independently before combining them.
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"%%capture\n",
|
||||
"# This step can take ~5-10 minutes to install dependencies\n",
|
||||
"!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1\n",
|
||||
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@a668583\""
|
||||
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fa9a7fe\""
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
72
examples/glm45/README.md
Normal file
72
examples/glm45/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Finetune Z.ai's GLM-4.5-Air with Axolotl
|
||||
|
||||
[GLM-4.5-Air](https://huggingface.co/zai-org/GLM-4.5-Air) is a MoE model by Z.ai.
|
||||
|
||||
This guide shows how to fine-tune it with Axolotl.
|
||||
|
||||
## Getting started
|
||||
|
||||
1. Install Axolotl following the [installation guide](https://docs.axolotl.ai/docs/installation.html).
|
||||
|
||||
2. Install [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy) to reduce training VRAM usage.
|
||||
|
||||
3. Run the finetuning example:
|
||||
|
||||
```bash
|
||||
# QLoRA (1x80GB @ ~63.4GiB/GPU)
|
||||
axolotl train examples/glm45/glm-45-air-qlora.yaml
|
||||
```
|
||||
|
||||
### Dataset
|
||||
|
||||
In addition to the standard OpenAI Messages format, GLM-4.5 supports an extra parameter for thinking in the assistant section.
|
||||
|
||||
```json
|
||||
{
|
||||
"role": "assistant",
|
||||
"reasoning_content": "...", // or have </think>...</think> in `content`
|
||||
"content": "..."
|
||||
}
|
||||
```
|
||||
|
||||
Make sure you set the below extra attributes if needed:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: ...
|
||||
type: chat_template
|
||||
message_property_mappings:
|
||||
role: role
|
||||
content: content
|
||||
|
||||
# tool_calls: tool_calls # uncomment if using tools
|
||||
# reasoning_content: reasoning_content # uncomment if have reasoning
|
||||
|
||||
# Uncomment if training on tool role (you would rarely if ever need this)
|
||||
# eot_tokens:
|
||||
# - <|observation|>
|
||||
```
|
||||
|
||||
### Tips
|
||||
|
||||
- The role name for tools in this template is `tool`.
|
||||
- You will see this Axolotl WARNING — this is expected as the template does not use EOS:
|
||||
```
|
||||
EOS token '<|endoftext|>' not found in chat_template. Please check if your template/EOS token is correct.
|
||||
```
|
||||
- You can run a full finetuning by removing `adapter: qlora`, `load_in_4bit: true`, and `quantize_moe_experts: true` from the config.
|
||||
- **LoRA kernels**: Incompatible with this model. Must be explicitly disabled (`lora_*_kernel: false`).
|
||||
- Read more on how to load your own dataset at [docs](https://docs.axolotl.ai/docs/dataset_loading.html).
|
||||
|
||||
## Optimization Guides
|
||||
|
||||
Please check the [Optimizations doc](https://docs.axolotl.ai/docs/optimizations.html).
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [GLM-4.5-Air on HuggingFace](https://huggingface.co/zai-org/GLM-4.5-Air)
|
||||
- [GLM-4.5 Blog](https://z.ai/blog/glm-4.5)
|
||||
- [Axolotl Docs](https://docs.axolotl.ai)
|
||||
- [Axolotl Website](https://axolotl.ai)
|
||||
- [Axolotl GitHub](https://github.com/axolotl-ai-cloud/axolotl)
|
||||
- [Axolotl Discord](https://discord.gg/7m9sfhzaf3)
|
||||
64
examples/glm45/glm-45-air-qlora.yaml
Normal file
64
examples/glm45/glm-45-air-qlora.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
base_model: zai-org/GLM-4.5-Air
|
||||
|
||||
# Automatically upload checkpoint and final model to HF
|
||||
# hub_model_id: username/custom_model_name
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
|
||||
load_in_8bit: false
|
||||
load_in_4bit: true
|
||||
|
||||
quantize_moe_experts: true # important
|
||||
|
||||
datasets:
|
||||
- path: fozziethebeat/alpaca_messages_2k_test
|
||||
type: chat_template
|
||||
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.1
|
||||
output_dir: ./outputs/lora-out
|
||||
|
||||
adapter: qlora
|
||||
lora_model_dir:
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
|
||||
lora_r: 16
|
||||
lora_alpha: 8
|
||||
lora_dropout: 0
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- v_proj
|
||||
- k_proj
|
||||
- o_proj
|
||||
|
||||
# lora_target_parameters:
|
||||
# - mlp.experts.gate_up_proj
|
||||
# - mlp.experts.down_proj
|
||||
|
||||
lora_mlp_kernel: false
|
||||
lora_qkv_kernel: false
|
||||
lora_o_kernel: false
|
||||
|
||||
gradient_accumulation_steps: 2
|
||||
micro_batch_size: 2
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: auto
|
||||
tf32: false
|
||||
|
||||
gradient_checkpointing: true
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
|
||||
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
||||
@@ -16,40 +16,28 @@ This guide shows how to fine-tune it with Axolotl.
|
||||
# QLoRA
|
||||
# - no target experts (1x48GB @ ~24GiB/GPU)
|
||||
# - target experts (1x48GB @ ~34GiB/GPU)
|
||||
axolotl train examples/glm4.7-flash/qlora.yaml
|
||||
axolotl train examples/glm47-flash/qlora.yaml
|
||||
|
||||
# QLoRA FSDP2 no target experts (2x48GB @ ~29GiB/GPU)
|
||||
axolotl train examples/glm4.7-flash/qlora_fsdp.yaml
|
||||
axolotl train examples/glm47-flash/qlora_fsdp.yaml
|
||||
```
|
||||
|
||||
```bash
|
||||
# LoRA
|
||||
# - no target experts (1x48GB @ ~35GiB/GPU)
|
||||
# - target experts (1x48GB @ OOM. Projected ~45-50GiB/GPU)
|
||||
axolotl train examples/glm4.7-flash/lora.yaml
|
||||
axolotl train examples/glm47-flash/lora.yaml
|
||||
|
||||
# LoRA FSDP2 no target experts (2x48GB @ ~43GiB/GPU)
|
||||
axolotl train examples/glm4.7-flash/lora_fsdp.yaml
|
||||
axolotl train examples/glm47-flash/lora_fsdp.yaml
|
||||
```
|
||||
|
||||
### Expert LoRA
|
||||
### MoE Expert Quantization & Expert LoRA
|
||||
|
||||
To also apply LoRA adapters to expert weights, add `lora_target_parameters` to your config.
|
||||
|
||||
Note: `lora_dropout` must be `0` when using `lora_target_parameters`.
|
||||
|
||||
```yaml
|
||||
lora_target_parameters:
|
||||
- mlp.experts.gate_up_proj
|
||||
- mlp.experts.down_proj
|
||||
# - mlp.gate.weight # router, untested but should work, not normally targeted
|
||||
```
|
||||
This model quantize expert weights on load. To learn about expert quantization, expert LoRA targeting, and related limitations, see the [MoE Expert Quantization](https://docs.axolotl.ai/docs/expert_quantization.html) docs.
|
||||
|
||||
## Limitations
|
||||
|
||||
- **FSDP VRAM**: FSDP2 may use more VRAM per GPU than single GPU training. We suspect not all layers are properly sharded across ranks.
|
||||
- **FSDP initial spike**: FSDP LoRA (8-bit) may have a large initial VRAM spike at the first 1-2 steps that then drops. FSDP QLoRA (4-bit) does not exhibit this.
|
||||
- **cpu_ram_efficient_loading**: Must be set to `false` with FSDP2 — causes hang otherwise.
|
||||
- **lora_target_linear**: Incompatible for this model.
|
||||
- **LoRA kernels**: Incompatible with this model due to non-standard attention projections (DSA). Must be explicitly disabled (`lora_*_kernel: false`).
|
||||
|
||||
65
examples/llama-3/3b-qat-mxfp4.yaml
Normal file
65
examples/llama-3/3b-qat-mxfp4.yaml
Normal file
@@ -0,0 +1,65 @@
|
||||
base_model: meta-llama/Llama-3.2-3B
|
||||
# Automatically upload checkpoint and final model to HF
|
||||
# hub_model_id: username/custom_model_name
|
||||
|
||||
load_in_8bit: false
|
||||
load_in_4bit: false
|
||||
strict: false
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.liger.LigerPlugin
|
||||
|
||||
liger_rope: true
|
||||
liger_rms_norm: true
|
||||
liger_glu_activation: true
|
||||
liger_layer_norm: true
|
||||
liger_fused_linear_cross_entropy: true
|
||||
|
||||
datasets:
|
||||
- path: yahma/alpaca-cleaned
|
||||
type: alpaca
|
||||
split: train[:95%]
|
||||
|
||||
output_dir: ./outputs/qat_out/
|
||||
dataset_prepared_path: ./outputs/dataset_prepared
|
||||
|
||||
sequence_len: 2048
|
||||
flash_attention: true
|
||||
|
||||
qat:
|
||||
activation_dtype: mxfp4
|
||||
weight_dtype: mxfp4
|
||||
group_size: 32
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_checkpointing: true
|
||||
activation_offloading: true
|
||||
gradient_accumulation_steps: 4
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_torch_8bit
|
||||
|
||||
cosine_constant_lr_ratio: 0
|
||||
cosine_min_lr_ratio: 1.0
|
||||
learning_rate: 2e-5
|
||||
save_only_model: true
|
||||
bf16: true
|
||||
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
|
||||
warmup_ratio: 0.1
|
||||
weight_decay: 0.0
|
||||
|
||||
special_tokens:
|
||||
pad_token: <|finetune_right_pad_id|>
|
||||
|
||||
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
||||
85
examples/mistral4/README.md
Normal file
85
examples/mistral4/README.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# Finetune Mistral Small 4 with Axolotl
|
||||
|
||||
Mistral Small 4 is a 119B parameter (6.5B active) multimodal MoE model from MistralAI that unifies instruct, reasoning, and coding capabilities into a single model. It is available on HuggingFace at [Mistral-Small-4-119B-2603](https://huggingface.co/mistralai/Mistral-Small-4-119B-2603).
|
||||
|
||||
Thanks to the team at MistralAI for giving us early access to prepare for this release.
|
||||
|
||||
## Getting started
|
||||
|
||||
Note: Training this model requires weights in BF16 which we will link to later.
|
||||
Users interested in training can convert / descale the existing FP8 weights.
|
||||
|
||||
1. Install Axolotl following the [installation guide](https://docs.axolotl.ai/docs/installation.html).
|
||||
|
||||
2. Install [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy) to reduce training VRAM usage
|
||||
|
||||
3. Install transformers from main
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/transformers.git
|
||||
```
|
||||
|
||||
4. Run one of the example configs:
|
||||
|
||||
```bash
|
||||
# text-only
|
||||
axolotl train examples/mistral4/qlora-text.yml # no experts ~69 GiB, experts ~93 GiB
|
||||
axolotl train examples/mistral4/fft-text.yml
|
||||
|
||||
# text + vision
|
||||
# run: wget https://huggingface.co/datasets/Nanobit/text-vision-2k-test/resolve/main/African_elephant.jpg
|
||||
axolotl train examples/mistral4/qlora-vision.yml # no experts ~68 GiB
|
||||
axolotl train examples/mistral4/fft-vision.yml
|
||||
```
|
||||
|
||||
Note: FFT configs provided as reference. Please adjust hyperparameters as needed.
|
||||
|
||||
## Reasoning Effort
|
||||
|
||||
The chat template supports a `reasoning_effort` variable to control the model's reasoning depth:
|
||||
|
||||
- `"none"` — instruct mode (default)
|
||||
- `"high"` — reasoning mode with explicit thinking steps
|
||||
|
||||
Pass it via `chat_template_kwargs` under your dataset config:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: your/dataset
|
||||
type: chat_template
|
||||
chat_template_kwargs:
|
||||
reasoning_effort: high
|
||||
```
|
||||
|
||||
## Thinking Support
|
||||
|
||||
The chat template supports a `thinking` content type in assistant messages for training on reasoning traces (rendered as `[THINK]...[/THINK]` blocks).
|
||||
|
||||
To use thinking datasets, add the `thinking` mapping via `message_property_mappings`:
|
||||
|
||||
```yaml
|
||||
datasets:
|
||||
- path: your/thinking-dataset
|
||||
type: chat_template
|
||||
message_property_mappings:
|
||||
role: role
|
||||
content: content
|
||||
thinking: thinking
|
||||
chat_template_kwargs:
|
||||
reasoning_effort: high
|
||||
```
|
||||
|
||||
See the [Magistral thinking guide](../magistral/think/README.md) for dataset format details.
|
||||
|
||||
## Tips
|
||||
|
||||
- Read more on how to load your own dataset at [docs](https://docs.axolotl.ai/docs/dataset_loading.html).
|
||||
- The text dataset format follows the OpenAI Messages format as seen [here](https://docs.axolotl.ai/docs/dataset-formats/conversation.html#chat_template).
|
||||
- The vision model requires multi-modal dataset format as documented [here](https://docs.axolotl.ai/docs/multimodal.html#dataset-format).
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [MistralAI Mistral Small 4 Blog](https://mistral.ai/news/mistral-small-4)
|
||||
- [Axolotl Docs](https://docs.axolotl.ai)
|
||||
- [Axolotl GitHub](https://github.com/axolotl-ai-cloud/axolotl)
|
||||
- [Axolotl Discord](https://discord.gg/7m9sfhzaf3)
|
||||
58
examples/mistral4/fft-text.yml
Normal file
58
examples/mistral4/fft-text.yml
Normal file
@@ -0,0 +1,58 @@
|
||||
base_model: mistralai/Mistral-Small-4-119B-2603
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_sonicmoe: true
|
||||
|
||||
# only train language model layers, freeze vision tower
|
||||
unfrozen_parameters:
|
||||
- model.language_model.*
|
||||
- lm_head
|
||||
- embed_tokens
|
||||
|
||||
datasets:
|
||||
- path: fozziethebeat/alpaca_messages_2k_test
|
||||
type: chat_template
|
||||
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.01
|
||||
output_dir: ./outputs/out
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 1
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 2e-5
|
||||
|
||||
bf16: true
|
||||
tf32: true
|
||||
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
|
||||
fsdp_version: 2
|
||||
fsdp_config:
|
||||
offload_params: false
|
||||
cpu_ram_efficient_loading: false
|
||||
state_dict_type: FULL_STATE_DICT
|
||||
auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
transformer_layer_cls_to_wrap: Mistral4DecoderLayer
|
||||
reshard_after_forward: true
|
||||
activation_checkpointing: true
|
||||
57
examples/mistral4/fft-vision.yml
Normal file
57
examples/mistral4/fft-vision.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
base_model: mistralai/Mistral-Small-4-119B-2603
|
||||
processor_type: AutoProcessor
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
use_kernels: true
|
||||
use_sonicmoe: true
|
||||
|
||||
# vision requirements
|
||||
skip_prepare_dataset: true
|
||||
remove_unused_columns: false
|
||||
sample_packing: false
|
||||
|
||||
datasets:
|
||||
- path: Nanobit/text-vision-2k-test
|
||||
type: chat_template
|
||||
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.01
|
||||
output_dir: ./outputs/out
|
||||
|
||||
sequence_len: 2048
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 1
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 2e-5
|
||||
|
||||
bf16: true
|
||||
tf32: true
|
||||
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
|
||||
fsdp_version: 2
|
||||
fsdp_config:
|
||||
offload_params: false
|
||||
cpu_ram_efficient_loading: false
|
||||
state_dict_type: FULL_STATE_DICT
|
||||
auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
transformer_layer_cls_to_wrap: Mistral4DecoderLayer
|
||||
reshard_after_forward: true
|
||||
activation_checkpointing: true
|
||||
58
examples/mistral4/qlora-text.yml
Normal file
58
examples/mistral4/qlora-text.yml
Normal file
@@ -0,0 +1,58 @@
|
||||
base_model: mistralai/Mistral-Small-4-119B-2603
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
|
||||
load_in_4bit: true
|
||||
quantize_moe_experts: true
|
||||
|
||||
datasets:
|
||||
- path: fozziethebeat/alpaca_messages_2k_test
|
||||
type: chat_template
|
||||
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.01
|
||||
output_dir: ./outputs/out
|
||||
|
||||
adapter: qlora
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
|
||||
lora_r: 32
|
||||
lora_alpha: 16
|
||||
lora_dropout: 0.05
|
||||
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||
|
||||
# uncomment to train on expert layers
|
||||
# lora_target_parameters:
|
||||
# - mlp.experts.gate_up_proj
|
||||
# - mlp.experts.down_proj
|
||||
# lora_mlp_kernel: false
|
||||
# lora_qkv_kernel: false
|
||||
# lora_o_kernel: false
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 1
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: true
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
63
examples/mistral4/qlora-vision.yml
Normal file
63
examples/mistral4/qlora-vision.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
base_model: mistralai/Mistral-Small-4-119B-2603
|
||||
processor_type: AutoProcessor
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
|
||||
load_in_4bit: true
|
||||
quantize_moe_experts: true
|
||||
|
||||
# vision chat template requirements
|
||||
skip_prepare_dataset: true
|
||||
remove_unused_columns: false
|
||||
sample_packing: false
|
||||
|
||||
datasets:
|
||||
- path: Nanobit/text-vision-2k-test
|
||||
type: chat_template
|
||||
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.01
|
||||
output_dir: ./outputs/out
|
||||
|
||||
adapter: qlora
|
||||
|
||||
sequence_len: 2048
|
||||
|
||||
lora_r: 32
|
||||
lora_alpha: 16
|
||||
lora_dropout: 0.05
|
||||
lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
|
||||
|
||||
# uncomment to train on expert layers
|
||||
# lora_target_parameters:
|
||||
# - mlp.experts.gate_up_proj
|
||||
# - mlp.experts.down_proj
|
||||
# lora_mlp_kernel: false
|
||||
# lora_qkv_kernel: false
|
||||
# lora_o_kernel: false
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 1
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: true
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
71
examples/qwen3.5/122b-a10b-moe-qlora.yaml
Normal file
71
examples/qwen3.5/122b-a10b-moe-qlora.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
base_model: Qwen/Qwen3.5-122B-A10B
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
strict: false
|
||||
|
||||
chat_template: qwen3_5
|
||||
datasets:
|
||||
- path: mlabonne/FineTome-100k
|
||||
type: chat_template
|
||||
split: train[:20%]
|
||||
field_messages: conversations
|
||||
message_property_mappings:
|
||||
role: from
|
||||
content: value
|
||||
val_set_size: 0.0
|
||||
output_dir: ./outputs/out
|
||||
dataset_prepared_path: last_run_prepared
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
|
||||
load_in_4bit: true
|
||||
quantize_moe_experts: true
|
||||
adapter: qlora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_dropout: 0
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- k_proj
|
||||
- v_proj
|
||||
- o_proj
|
||||
|
||||
#lora_target_parameters:
|
||||
# - mlp.experts.gate_up_proj
|
||||
# - mlp.experts.down_proj
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 2
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_torch_4bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: auto
|
||||
tf32: true
|
||||
|
||||
|
||||
lora_mlp_kernel: false
|
||||
lora_qkv_kernel: false
|
||||
lora_o_kernel: false
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
special_tokens:
|
||||
72
examples/qwen3.5/27b-qlora.yaml
Normal file
72
examples/qwen3.5/27b-qlora.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
base_model: Qwen/Qwen3.5-27B
|
||||
# Automatically upload checkpoint and final model to HF
|
||||
# hub_model_id: username/custom_model_name
|
||||
# Note: Qwen3.5 is an early-fusion VLM (image+text). This config fine-tunes
|
||||
# the text-only path. For multimodal (image+text) fine-tuning, add image
|
||||
# columns to your dataset following axolotl's multimodal dataset format.
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
strict: false
|
||||
|
||||
chat_template: qwen3_5
|
||||
datasets:
|
||||
- path: mlabonne/FineTome-100k
|
||||
type: chat_template
|
||||
split: train[:20%]
|
||||
field_messages: conversations
|
||||
message_property_mappings:
|
||||
role: from
|
||||
content: value
|
||||
val_set_size: 0.0
|
||||
output_dir: ./outputs/out
|
||||
dataset_prepared_path: last_run_prepared
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
|
||||
load_in_4bit: true
|
||||
adapter: qlora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- k_proj
|
||||
- v_proj
|
||||
- o_proj
|
||||
- down_proj
|
||||
- up_proj
|
||||
# Uncomment below to also target the linear attention projections.
|
||||
# These use separate in_proj_qkv / in_proj_z / out_proj (Qwen3.5-specific).
|
||||
# - linear_attn.in_proj_qkv
|
||||
# - linear_attn.in_proj_z
|
||||
# - linear_attn.out_proj
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 2
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_torch_4bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: auto
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
special_tokens:
|
||||
70
examples/qwen3.5/35b-a3b-moe-qlora.yaml
Normal file
70
examples/qwen3.5/35b-a3b-moe-qlora.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
base_model: Qwen/Qwen3.5-35B-A3B
|
||||
|
||||
plugins:
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
strict: false
|
||||
|
||||
chat_template: qwen3_5
|
||||
datasets:
|
||||
- path: mlabonne/FineTome-100k
|
||||
type: chat_template
|
||||
split: train[:20%]
|
||||
field_messages: conversations
|
||||
message_property_mappings:
|
||||
role: from
|
||||
content: value
|
||||
val_set_size: 0.0
|
||||
output_dir: ./outputs/out
|
||||
dataset_prepared_path: last_run_prepared
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
|
||||
load_in_4bit: true
|
||||
quantize_moe_experts: true
|
||||
adapter: qlora
|
||||
lora_r: 16
|
||||
lora_alpha: 32
|
||||
lora_dropout: 0
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- k_proj
|
||||
- v_proj
|
||||
- o_proj
|
||||
|
||||
#lora_target_parameters:
|
||||
# - mlp.experts.gate_up_proj
|
||||
# - mlp.experts.down_proj
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 2
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_torch_4bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: auto
|
||||
tf32: true
|
||||
|
||||
lora_mlp_kernel: false
|
||||
lora_qkv_kernel: false
|
||||
lora_o_kernel: false
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 4
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
special_tokens:
|
||||
72
examples/qwen3.5/7b-lora-vision.yaml
Normal file
72
examples/qwen3.5/7b-lora-vision.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
base_model: Qwen/Qwen3.5-7B
|
||||
processor_type: AutoProcessor
|
||||
|
||||
# Qwen3.5-7B and above are early-fusion VLMs (Qwen3_5ForConditionalGeneration).
|
||||
# Vision and text tokens are processed together by the same transformer layers.
|
||||
# Note: Qwen3.5-2B is a text-only model — the smallest VLM is Qwen3.5-7B.
|
||||
|
||||
# These 3 lines are required for vision/multimodal training
|
||||
skip_prepare_dataset: true
|
||||
remove_unused_columns: false
|
||||
sample_packing: false
|
||||
|
||||
chat_template: qwen3_5
|
||||
datasets:
|
||||
- path: HuggingFaceH4/llava-instruct-mix-vsft
|
||||
type: chat_template
|
||||
split: train[:1%]
|
||||
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.0
|
||||
output_dir: ./outputs/out
|
||||
|
||||
adapter: lora
|
||||
lora_model_dir:
|
||||
|
||||
sequence_len: 8192
|
||||
pad_to_sequence_len: false
|
||||
|
||||
lora_r: 32
|
||||
lora_alpha: 16
|
||||
lora_dropout: 0.05
|
||||
# Targets the language model attention and MLP layers.
|
||||
# Qwen3.5 is early-fusion: all layers (including those seeing vision tokens) share
|
||||
# the same transformer stack, so standard attention targets work for both modalities.
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- k_proj
|
||||
- v_proj
|
||||
- o_proj
|
||||
- down_proj
|
||||
- up_proj
|
||||
# Uncomment to also target the linear attention (GatedDeltaNet) projections:
|
||||
# - linear_attn.in_proj_qkv
|
||||
# - linear_attn.in_proj_z
|
||||
# - linear_attn.out_proj
|
||||
|
||||
wandb_project:
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 4
|
||||
micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
|
||||
bf16: true
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_ratio: 0.1
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
61
examples/qwen3.5/README.md
Normal file
61
examples/qwen3.5/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Finetune Qwen3.5 with Axolotl
|
||||
|
||||
[Qwen3.5](https://huggingface.co/collections/Qwen/qwen35-68452f3bc6e4b7cfb4e1c803) is a hybrid architecture model series combining Gated DeltaNet linear attention with standard Transformer attention. Models from 7B onwards are early-fusion vision-language models (`Qwen3_5ForConditionalGeneration`), meaning vision and text tokens are processed through the same transformer stack. The 2B variant is text-only.
|
||||
|
||||
Available configs:
|
||||
|
||||
| Config | Model | Type |
|
||||
|---|---|---|
|
||||
| `27b-qlora.yaml` | Qwen3.5-27B | Dense VLM, text-only path |
|
||||
| `35b-a3b-moe-qlora.yaml` | Qwen3.5-35B-A3B | MoE, text-only path |
|
||||
| `122b-a10b-moe-qlora.yaml` | Qwen3.5-122B-A10B | MoE, text-only path |
|
||||
| `7b-lora-vision.yaml` | Qwen3.5-7B | Vision+text (multimodal) |
|
||||
|
||||
## Getting started
|
||||
|
||||
1. Install Axolotl following the [installation guide](https://docs.axolotl.ai/docs/installation.html).
|
||||
|
||||
2. Install [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy) to reduce training VRAM usage.
|
||||
|
||||
3. Install FLA for sample packing support with the Gated DeltaNet linear attention layers:
|
||||
```bash
|
||||
pip3 uninstall -y causal-conv1d && pip3 install flash-linear-attention==0.4.1
|
||||
```
|
||||
> FLA is required when `sample_packing: true`. Without it, training raises a `RuntimeError` on packed sequences. Vision configs use `sample_packing: false` so FLA is optional there.
|
||||
|
||||
4. Run a finetuning example:
|
||||
|
||||
```bash
|
||||
# Dense 27B text-only (QLoRA, ~47 GiB VRAM with sample packing)
|
||||
axolotl train examples/qwen3.5/27b-qlora.yaml
|
||||
|
||||
# MoE 35B-A3B text-only (QLoRA)
|
||||
axolotl train examples/qwen3.5/35b-a3b-moe-qlora.yaml
|
||||
|
||||
# MoE 122B-A10B text-only (QLoRA)
|
||||
axolotl train examples/qwen3.5/122b-a10b-moe-qlora.yaml
|
||||
|
||||
# 7B vision+text (LoRA, multimodal dataset)
|
||||
axolotl train examples/qwen3.5/7b-lora-vision.yaml
|
||||
```
|
||||
|
||||
### TIPS
|
||||
|
||||
- For inference, you can experiment with `temperature: 0.7`, `top_p: 0.8`, `top_k: 20`, and `min_p: 0`.
|
||||
- You can run a full finetuning by removing `adapter: qlora` and `load_in_4bit: true`. See [Multi-GPU](#optimization-guides) below.
|
||||
- Read more on loading your own dataset at [docs](https://docs.axolotl.ai/docs/dataset_loading.html).
|
||||
- The dataset format follows the OpenAI Messages format as seen [here](https://docs.axolotl.ai/docs/dataset-formats/conversation.html#chat_template).
|
||||
- For **multimodal** finetuning, set `processor_type: AutoProcessor`, `skip_prepare_dataset: true`, and `remove_unused_columns: false` as shown in `7b-lora-vision.yaml`.
|
||||
- The Gated DeltaNet linear attention layers (`linear_attn.*`) can optionally be added to `lora_target_modules` — they are commented out by default.
|
||||
|
||||
## Optimization Guides
|
||||
|
||||
- [Optimizations Guide](https://docs.axolotl.ai/docs/optimizations.html)
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [Qwen3.5 Blog](https://qwenlm.github.io/blog/qwen3.5/)
|
||||
- [Axolotl Docs](https://docs.axolotl.ai)
|
||||
- [Axolotl Website](https://axolotl.ai)
|
||||
- [Axolotl GitHub](https://github.com/axolotl-ai-cloud/axolotl)
|
||||
- [Axolotl Discord](https://discord.gg/7m9sfhzaf3)
|
||||
@@ -12,13 +12,16 @@ packaging==26.0
|
||||
huggingface_hub>=1.1.7
|
||||
peft>=0.18.1
|
||||
tokenizers>=0.22.1
|
||||
transformers==5.2.0
|
||||
accelerate==1.12.0
|
||||
transformers==5.3.0
|
||||
accelerate==1.13.0
|
||||
datasets==4.5.0
|
||||
deepspeed>=0.18.3
|
||||
trl==0.28.0
|
||||
hf_xet==1.2.0
|
||||
kernels==0.12.1
|
||||
deepspeed>=0.18.6,<0.19.0
|
||||
trl==0.29.0
|
||||
hf_xet==1.3.2
|
||||
kernels==0.12.2
|
||||
|
||||
fla-core==0.4.1
|
||||
flash-linear-attention==0.4.1
|
||||
|
||||
trackio>=0.16.1
|
||||
typing-extensions>=4.15.0
|
||||
@@ -72,4 +75,4 @@ axolotl-contribs-mit==0.0.6
|
||||
# telemetry
|
||||
posthog==6.7.11
|
||||
|
||||
mistral-common==1.8.8
|
||||
mistral-common==1.10.0
|
||||
|
||||
@@ -29,5 +29,5 @@ UV_PREFIX = "uv " if USE_UV else ""
|
||||
|
||||
print(
|
||||
UNINSTALL_PREFIX
|
||||
+ f'{UV_PREFIX}pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@a668583"'
|
||||
+ f'{UV_PREFIX}pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fa9a7fe"'
|
||||
)
|
||||
|
||||
11
setup.py
11
setup.py
@@ -27,9 +27,16 @@ def parse_requirements(extras_require_map):
|
||||
xformers_version = [req for req in _install_requires if "xformers" in req][0]
|
||||
install_xformers = platform.machine() != "aarch64"
|
||||
if platform.machine() == "aarch64":
|
||||
# skip torchao on ARM64
|
||||
# skip on ARM64
|
||||
skip_packages = [
|
||||
"torchao",
|
||||
"fla-core",
|
||||
"flash-linear-attention",
|
||||
]
|
||||
_install_requires = [
|
||||
req for req in _install_requires if "torchao" not in req
|
||||
req
|
||||
for req in _install_requires
|
||||
if re.split(r"[>=<]", req)[0].strip() not in skip_packages
|
||||
]
|
||||
if "Darwin" in platform.system():
|
||||
# skip packages not compatible with OSX
|
||||
|
||||
@@ -6,5 +6,6 @@ from axolotl.logging_config import configure_logging
|
||||
|
||||
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
||||
os.environ.setdefault("HF_XET_HIGH_PERFORMANCE", "1")
|
||||
os.environ.setdefault("TRL_EXPERIMENTAL_SILENCE", "1")
|
||||
|
||||
configure_logging()
|
||||
|
||||
@@ -90,9 +90,8 @@ class ModalCloud(Cloud):
|
||||
# grab the sha256 hash from docker hub for this image+tag
|
||||
# this ensures that we always get the latest image for this tag, even if it's already cached
|
||||
try:
|
||||
manifest = subprocess.check_output( # nosec B602
|
||||
f"docker manifest inspect {docker_image}",
|
||||
shell=True,
|
||||
manifest = subprocess.check_output( # nosec
|
||||
["docker", "manifest", "inspect", docker_image],
|
||||
).decode("utf-8")
|
||||
sha256_hash = json.loads(manifest)["manifests"][0]["digest"]
|
||||
except subprocess.CalledProcessError:
|
||||
|
||||
@@ -11,7 +11,7 @@ from urllib.parse import urlparse
|
||||
import requests
|
||||
import torch
|
||||
import yaml
|
||||
from transformers.utils import is_torch_bf16_gpu_available
|
||||
from transformers.utils import is_torch_bf16_gpu_available, is_torch_tf32_available
|
||||
|
||||
from axolotl.integrations.base import PluginManager
|
||||
from axolotl.telemetry.errors import send_errors
|
||||
@@ -300,7 +300,7 @@ def load_cfg(
|
||||
try:
|
||||
device_props = torch.cuda.get_device_properties("cuda")
|
||||
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
||||
except:
|
||||
except (RuntimeError, AssertionError):
|
||||
gpu_version = None
|
||||
|
||||
prepare_plugins(cfg)
|
||||
@@ -310,6 +310,7 @@ def load_cfg(
|
||||
capabilities={
|
||||
"bf16": is_torch_bf16_gpu_available(),
|
||||
"fp8": compute_supports_fp8(),
|
||||
"tf32": is_torch_tf32_available(),
|
||||
"n_gpu": int(os.environ.get("WORLD_SIZE", 1)),
|
||||
"compute_capability": gpu_version,
|
||||
},
|
||||
|
||||
@@ -71,6 +71,7 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
||||
merge_lora=True,
|
||||
load_in_8bit=False,
|
||||
load_in_4bit=False,
|
||||
quantize_moe_experts=False,
|
||||
flash_attention=False,
|
||||
context_parallel_size=None,
|
||||
deepspeed=None,
|
||||
|
||||
@@ -196,12 +196,10 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
||||
state.wait_for_everyone()
|
||||
LOG.info(
|
||||
f"FSDP SHARDED_STATE_DICT weights successfully merged to: {output_path}",
|
||||
main_process_only=True,
|
||||
)
|
||||
LOG.info(
|
||||
"Merged weights are only the safetensors and doesn't include the model configuration "
|
||||
f"or tokenizer which may be found in {parsed_cfg.output_dir}.",
|
||||
main_process_only=True,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,18 @@ def do_vllm_serve(
|
||||
cfg = load_cfg(config)
|
||||
model = cfg.base_model
|
||||
|
||||
serve_module = cli_args.get("serve_module", "trl.scripts.vllm_serve")
|
||||
# Determine serve module: explicit CLI/config > auto-select from vllm_lora_sync > default
|
||||
serve_module = cli_args.get("serve_module") or getattr(
|
||||
cfg.vllm, "serve_module", None
|
||||
)
|
||||
if (
|
||||
serve_module is None
|
||||
and getattr(cfg, "trl", None)
|
||||
and getattr(cfg.trl, "vllm_lora_sync", False)
|
||||
):
|
||||
serve_module = "axolotl.scripts.vllm_serve_lora"
|
||||
if serve_module is None:
|
||||
serve_module = "trl.scripts.vllm_serve"
|
||||
vllm_serve_main = __import__(serve_module, fromlist=["main"]).main
|
||||
tensor_parallel_size = 1
|
||||
data_parallel_size = 1
|
||||
@@ -68,7 +79,7 @@ def do_vllm_serve(
|
||||
cli_args.get("enable_reasoning") or cfg.vllm.enable_reasoning or False
|
||||
)
|
||||
|
||||
vllm_script_args = AxolotlScriptArguments(
|
||||
base_kwargs = dict(
|
||||
model=model,
|
||||
tensor_parallel_size=tensor_parallel_size,
|
||||
data_parallel_size=data_parallel_size,
|
||||
@@ -78,7 +89,21 @@ def do_vllm_serve(
|
||||
dtype=dtype,
|
||||
max_model_len=max_model_len,
|
||||
enable_prefix_caching=enable_prefix_caching,
|
||||
reasoning_parser=reasoning_parser,
|
||||
enable_reasoning=enable_reasoning,
|
||||
)
|
||||
|
||||
# Use LoRAScriptArguments when serving with native LoRA support
|
||||
if serve_module == "axolotl.scripts.vllm_serve_lora":
|
||||
from axolotl.scripts.vllm_serve_lora import LoRAScriptArguments
|
||||
|
||||
lora_kwargs = {}
|
||||
if hasattr(cfg, "lora_r") and cfg.lora_r:
|
||||
lora_kwargs["max_lora_rank"] = cfg.lora_r
|
||||
vllm_script_args = LoRAScriptArguments(**base_kwargs, **lora_kwargs)
|
||||
else:
|
||||
vllm_script_args = AxolotlScriptArguments(
|
||||
**base_kwargs,
|
||||
reasoning_parser=reasoning_parser,
|
||||
enable_reasoning=enable_reasoning,
|
||||
)
|
||||
|
||||
vllm_serve_main(vllm_script_args)
|
||||
|
||||
@@ -12,9 +12,11 @@ MOE_ARCH_BLOCK = {
|
||||
"mixtral": "MixtralSparseMoeBlock",
|
||||
"qwen2_moe": "Qwen2MoeSparseMoeBlock",
|
||||
"qwen3_moe": "Qwen3MoeSparseMoeBlock",
|
||||
"qwen3_5_moe": "Qwen3_5MoeSparseMoeBlock",
|
||||
"qwen3_vl_moe": "Qwen3VLMoeTextSparseMoeBlock",
|
||||
"deepseek_v2": "DeepseekV2MoE",
|
||||
"deepseek_v3": "DeepseekV3MoE",
|
||||
"mistral4": "Mistral4MoE",
|
||||
"gpt_oss": "GptOssDecoderLayer",
|
||||
"lfm2_moe": "Lfm2MoeSparseMoeBlock",
|
||||
"afmoe": "AfmoeMoE",
|
||||
|
||||
@@ -67,7 +67,7 @@ class JsonToJsonlConverter:
|
||||
self.json_parser = json_parser
|
||||
self.jsonl_serializer = jsonl_serializer
|
||||
|
||||
def convert(self, input_file_path, output_file_path):
|
||||
def convert(self, input_file_path):
|
||||
content = self.file_reader.read(input_file_path)
|
||||
data = self.json_parser.parse(content)
|
||||
# data = [r for r in data if r["conversations"]] # vicuna cleaned has rows with empty conversations
|
||||
|
||||
@@ -250,7 +250,7 @@ class TrainerBuilderBase(abc.ABC):
|
||||
|
||||
def _configure_precision_settings(self, training_args_kwargs: dict):
|
||||
training_args_kwargs["fp16"] = (self.cfg.fp16 and not self.cfg.bf16) or False
|
||||
training_args_kwargs["tf32"] = self.cfg.tf32
|
||||
training_args_kwargs["tf32"] = True if self.cfg.tf32 is True else False
|
||||
if self.cfg.bf16 == "full":
|
||||
training_args_kwargs["bf16_full_eval"] = True
|
||||
else:
|
||||
|
||||
@@ -54,8 +54,16 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||
if self.cfg.rl in {RLType.GRPO, RLType.GDPO}:
|
||||
from axolotl.core.trainers.grpo import GRPOStrategy
|
||||
|
||||
async_grpo = bool(
|
||||
self.cfg.trl
|
||||
and (
|
||||
getattr(self.cfg.trl, "async_prefetch", False)
|
||||
or getattr(self.cfg.trl, "use_data_producer", False)
|
||||
)
|
||||
)
|
||||
trainer_cls = GRPOStrategy.get_trainer_class(
|
||||
sequence_parallel=self.cfg.context_parallel_size > 1
|
||||
sequence_parallel=self.cfg.context_parallel_size > 1,
|
||||
async_grpo=async_grpo,
|
||||
)
|
||||
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
||||
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
||||
@@ -120,11 +128,6 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||
if self.cfg.use_wandb:
|
||||
training_args_kwargs["run_name"] = self.cfg.wandb_name
|
||||
|
||||
if self.cfg.max_prompt_len:
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.max_prompt_len
|
||||
else:
|
||||
training_args_kwargs["max_prompt_length"] = self.cfg.sequence_len
|
||||
|
||||
training_args_cls = None
|
||||
blocklist_args_kwargs = []
|
||||
if self.cfg.rl is RLType.SIMPO:
|
||||
@@ -156,7 +159,16 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||
elif self.cfg.rl in {RLType.GRPO, RLType.GDPO}:
|
||||
from axolotl.core.trainers.grpo import GRPOStrategy
|
||||
|
||||
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||
async_grpo = bool(
|
||||
self.cfg.trl
|
||||
and (
|
||||
getattr(self.cfg.trl, "async_prefetch", False)
|
||||
or getattr(self.cfg.trl, "use_data_producer", False)
|
||||
)
|
||||
)
|
||||
training_args_cls = GRPOStrategy.get_training_args_class(
|
||||
async_grpo=async_grpo
|
||||
)
|
||||
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||
if self.cfg.rl is RLType.GDPO:
|
||||
@@ -222,13 +234,36 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||
trainer_kwargs, trainer_cls
|
||||
)
|
||||
|
||||
trainer = trainer_cls(
|
||||
*trainer_cls_args,
|
||||
args=training_args,
|
||||
train_dataset=self.train_dataset,
|
||||
callbacks=self.get_callbacks(),
|
||||
**trainer_kwargs,
|
||||
)
|
||||
# Allow FP8-quantized models to be fine-tuned with LoRA adapters.
|
||||
# transformers' validate_quantization_for_training blocks FP8 because
|
||||
# hf_quantizer.is_trainable is False, but LoRA only trains the adapters
|
||||
# (base weights stay frozen in FP8).
|
||||
_orig_validate_quant = None
|
||||
if (
|
||||
self.cfg.adapter
|
||||
and hasattr(self.model, "is_quantized")
|
||||
and self.model.is_quantized
|
||||
):
|
||||
import transformers.trainer as _trainer_module
|
||||
|
||||
_orig_validate_quant = _trainer_module.validate_quantization_for_training
|
||||
_trainer_module.validate_quantization_for_training = lambda model: None
|
||||
|
||||
try:
|
||||
trainer = trainer_cls(
|
||||
*trainer_cls_args,
|
||||
args=training_args,
|
||||
train_dataset=self.train_dataset,
|
||||
callbacks=self.get_callbacks(),
|
||||
**trainer_kwargs,
|
||||
)
|
||||
finally:
|
||||
if _orig_validate_quant is not None:
|
||||
import transformers.trainer as _trainer_module
|
||||
|
||||
_trainer_module.validate_quantization_for_training = (
|
||||
_orig_validate_quant
|
||||
)
|
||||
if self.cfg.fsdp_config or self.cfg.fsdp:
|
||||
ensure_dtype(trainer.model, dtype=self.cfg.torch_dtype)
|
||||
if self.cfg.rl in [RLType.DPO, RLType.IPO] and trainer.ref_model:
|
||||
|
||||
@@ -26,7 +26,7 @@ from transformers import PreTrainedModel, Trainer
|
||||
from transformers.trainer import TRAINING_ARGS_NAME
|
||||
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, has_length, seed_worker
|
||||
from transformers.utils import SAFE_WEIGHTS_NAME, is_peft_available
|
||||
from trl.trainer.utils import pad_to_length
|
||||
from trl.experimental.utils import pad_to_length
|
||||
from typing_extensions import override
|
||||
|
||||
from axolotl.core.trainers.mixins import (
|
||||
|
||||
@@ -25,17 +25,13 @@ class DPOStrategy:
|
||||
# Label smoothing is not compatible with IPO
|
||||
if cfg.rl is RLType.DPO and cfg.dpo_label_smoothing:
|
||||
training_args_kwargs["label_smoothing"] = cfg.dpo_label_smoothing
|
||||
training_args_kwargs["max_completion_length"] = None
|
||||
training_args_kwargs["max_length"] = cfg.sequence_len
|
||||
training_args_kwargs["generate_during_eval"] = cfg.dpo_generate_during_eval
|
||||
if cfg.dpo_use_weighting is not None:
|
||||
training_args_kwargs["use_weighting"] = cfg.dpo_use_weighting
|
||||
if cfg.dpo_padding_free is not None:
|
||||
training_args_kwargs["padding_free"] = cfg.dpo_padding_free
|
||||
if cfg.dpo_norm_loss is not None:
|
||||
training_args_kwargs["dpo_norm_loss"] = cfg.dpo_norm_loss
|
||||
if cfg.dpo_use_logits_to_keep is not None:
|
||||
training_args_kwargs["use_logits_to_keep"] = cfg.dpo_use_logits_to_keep
|
||||
if cfg.dpo_use_liger_kernel is not None:
|
||||
training_args_kwargs["use_liger_kernel"] = cfg.dpo_use_liger_kernel
|
||||
return training_args_kwargs
|
||||
|
||||
@@ -103,10 +103,10 @@ class AxolotlDPOTrainer(
|
||||
) -> dict[str, torch.Tensor]:
|
||||
if self.args.dpo_norm_loss:
|
||||
# fmt: off
|
||||
loss_type: str = self.loss_type # type: ignore[has-type]
|
||||
loss_type: list[str] = self.loss_type # type: ignore[has-type]
|
||||
# fmt: on
|
||||
# concatenated_forward handles avg token logprob for ipo case already
|
||||
self.loss_type = "ipo"
|
||||
self.loss_type = ["ipo"]
|
||||
res = super().concatenated_forward(model, batch, is_ref_model=is_ref_model)
|
||||
self.loss_type = loss_type
|
||||
return res
|
||||
|
||||
@@ -9,8 +9,9 @@ from huggingface_hub import snapshot_download
|
||||
from requests import HTTPError
|
||||
from trl.trainer.grpo_trainer import RewardFunc
|
||||
|
||||
from axolotl.core.trainers.grpo.args import AxolotlGRPOConfig
|
||||
from axolotl.core.trainers.grpo.args import AxolotlAsyncGRPOConfig, AxolotlGRPOConfig
|
||||
from axolotl.core.trainers.grpo.trainer import (
|
||||
AxolotlAsyncGRPOTrainer,
|
||||
AxolotlGRPOSequenceParallelTrainer,
|
||||
AxolotlGRPOTrainer,
|
||||
)
|
||||
@@ -27,14 +28,31 @@ class GRPOStrategy:
|
||||
|
||||
@classmethod
|
||||
def get_trainer_class(
|
||||
cls, sequence_parallel: bool
|
||||
) -> type[AxolotlGRPOTrainer] | type[AxolotlGRPOSequenceParallelTrainer]:
|
||||
cls,
|
||||
sequence_parallel: bool,
|
||||
async_grpo: bool = False,
|
||||
) -> (
|
||||
type[AxolotlGRPOTrainer]
|
||||
| type[AxolotlGRPOSequenceParallelTrainer]
|
||||
| type[AxolotlAsyncGRPOTrainer]
|
||||
):
|
||||
if sequence_parallel and async_grpo:
|
||||
raise ValueError(
|
||||
"sequence_parallel and async_grpo cannot both be enabled. "
|
||||
"Disable one of context_parallel_size > 1 or async_prefetch/use_data_producer."
|
||||
)
|
||||
if sequence_parallel:
|
||||
return AxolotlGRPOSequenceParallelTrainer
|
||||
if async_grpo:
|
||||
return AxolotlAsyncGRPOTrainer
|
||||
return AxolotlGRPOTrainer
|
||||
|
||||
@classmethod
|
||||
def get_training_args_class(cls) -> type[AxolotlGRPOConfig]:
|
||||
def get_training_args_class(
|
||||
cls, async_grpo: bool = False
|
||||
) -> type[AxolotlGRPOConfig] | type[AxolotlAsyncGRPOConfig]:
|
||||
if async_grpo:
|
||||
return AxolotlAsyncGRPOConfig
|
||||
return AxolotlGRPOConfig
|
||||
|
||||
@classmethod
|
||||
@@ -124,13 +142,63 @@ class GRPOStrategy:
|
||||
grpo_args_kwargs["epsilon_high"] = trl.epsilon_high
|
||||
|
||||
if trl.use_liger_loss is not None:
|
||||
grpo_args_kwargs["use_liger_loss"] = trl.use_liger_loss
|
||||
grpo_args_kwargs["use_liger_kernel"] = trl.use_liger_loss
|
||||
|
||||
if trl.multi_objective_aggregation is not None:
|
||||
grpo_args_kwargs["multi_objective_aggregation"] = (
|
||||
trl.multi_objective_aggregation
|
||||
)
|
||||
|
||||
# Async GRPO fields
|
||||
if getattr(trl, "use_data_producer", None) is not None:
|
||||
grpo_args_kwargs["use_data_producer"] = trl.use_data_producer
|
||||
if getattr(trl, "async_prefetch", None) is not None:
|
||||
grpo_args_kwargs["async_prefetch"] = trl.async_prefetch
|
||||
if getattr(trl, "prefetch_depth", None) is not None:
|
||||
grpo_args_kwargs["prefetch_depth"] = trl.prefetch_depth
|
||||
if getattr(trl, "vllm_sync_interval", None) is not None:
|
||||
grpo_args_kwargs["vllm_sync_interval"] = trl.vllm_sync_interval
|
||||
if getattr(trl, "streaming_partial_batch", None) is not None:
|
||||
grpo_args_kwargs["streaming_partial_batch"] = trl.streaming_partial_batch
|
||||
if getattr(trl, "streaming_min_groups", None) is not None:
|
||||
grpo_args_kwargs["streaming_min_groups"] = trl.streaming_min_groups
|
||||
if getattr(trl, "vllm_importance_sampling_correction", None) is not None:
|
||||
grpo_args_kwargs["vllm_importance_sampling_correction"] = (
|
||||
trl.vllm_importance_sampling_correction
|
||||
)
|
||||
if getattr(trl, "vllm_importance_sampling_mode", None) is not None:
|
||||
grpo_args_kwargs["vllm_importance_sampling_mode"] = (
|
||||
trl.vllm_importance_sampling_mode
|
||||
)
|
||||
if getattr(trl, "vllm_importance_sampling_cap", None) is not None:
|
||||
grpo_args_kwargs["vllm_importance_sampling_cap"] = (
|
||||
trl.vllm_importance_sampling_cap
|
||||
)
|
||||
if getattr(trl, "off_policy_mask_threshold", None) is not None:
|
||||
grpo_args_kwargs["off_policy_mask_threshold"] = (
|
||||
trl.off_policy_mask_threshold
|
||||
)
|
||||
if getattr(trl, "use_bias_correction_kl", None) is not None:
|
||||
grpo_args_kwargs["use_bias_correction_kl"] = trl.use_bias_correction_kl
|
||||
|
||||
# Fast Async GRPO fields
|
||||
if getattr(trl, "reward_num_workers", None) is not None:
|
||||
grpo_args_kwargs["reward_num_workers"] = trl.reward_num_workers
|
||||
if getattr(trl, "replay_buffer_size", None) is not None:
|
||||
grpo_args_kwargs["replay_buffer_size"] = trl.replay_buffer_size
|
||||
if getattr(trl, "replay_recompute_logps", None) is not None:
|
||||
grpo_args_kwargs["replay_recompute_logps"] = trl.replay_recompute_logps
|
||||
if getattr(trl, "reroll_start_fraction", None) is not None:
|
||||
grpo_args_kwargs["reroll_start_fraction"] = trl.reroll_start_fraction
|
||||
if getattr(trl, "reroll_max_groups", None) is not None:
|
||||
grpo_args_kwargs["reroll_max_groups"] = trl.reroll_max_groups
|
||||
if getattr(trl, "skip_zero_advantage_batches", None) is not None:
|
||||
grpo_args_kwargs["skip_zero_advantage_batches"] = (
|
||||
trl.skip_zero_advantage_batches
|
||||
)
|
||||
if getattr(trl, "vllm_lora_sync", None) is not None:
|
||||
grpo_args_kwargs["vllm_lora_sync"] = trl.vllm_lora_sync
|
||||
|
||||
return grpo_args_kwargs
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -6,6 +6,7 @@ from dataclasses import dataclass
|
||||
|
||||
from trl import GRPOConfig
|
||||
|
||||
from axolotl.core.trainers.grpo.fast_async_trainer import FastAsyncGRPOConfig
|
||||
from axolotl.core.training_args import AxolotlTrainingMixins
|
||||
|
||||
|
||||
@@ -14,3 +15,10 @@ class AxolotlGRPOConfig(AxolotlTrainingMixins, GRPOConfig):
|
||||
"""Axolotl GRPO Config for GRPO training"""
|
||||
|
||||
context_parallel_size: int | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AxolotlAsyncGRPOConfig(AxolotlTrainingMixins, FastAsyncGRPOConfig):
|
||||
"""Axolotl Async GRPO Config — adds async prefetch, streaming scoring, and IS correction."""
|
||||
|
||||
context_parallel_size: int | None = None
|
||||
|
||||
2657
src/axolotl/core/trainers/grpo/async_trainer.py
Normal file
2657
src/axolotl/core/trainers/grpo/async_trainer.py
Normal file
File diff suppressed because it is too large
Load Diff
768
src/axolotl/core/trainers/grpo/fast_async_trainer.py
Normal file
768
src/axolotl/core/trainers/grpo/fast_async_trainer.py
Normal file
@@ -0,0 +1,768 @@
|
||||
# Copyright 2020-2026 Axolotl AI. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Experimental GRPO extensions: parallel reward workers, replay buffer,
|
||||
deferred re-roll, and zero-advantage skipping.
|
||||
|
||||
These features are built as subclasses of GRPOTrainer and GRPODataProducer,
|
||||
using the hook system (_compute_rewards_for_batch, _post_advantage_hook,
|
||||
_pre_produce_hook) defined in the base classes.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from trl import GRPOTrainer
|
||||
|
||||
from axolotl.core.trainers.grpo.async_trainer import (
|
||||
AsyncGRPOConfig,
|
||||
AsyncGRPOTrainer,
|
||||
GRPODataProducer,
|
||||
)
|
||||
from axolotl.core.trainers.grpo.replay_buffer import ReplayBuffer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Extended config
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class FastAsyncGRPOConfig(AsyncGRPOConfig):
|
||||
"""GRPOConfig with additional experimental parameters."""
|
||||
|
||||
reward_num_workers: int = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"help": "Number of persistent subprocess workers for parallel reward computation. Each worker has its "
|
||||
"own main thread so signal.alarm() (used by math_verify) works correctly. Work is sharded across "
|
||||
"workers by prompt groups. Only used with use_data_producer=True and non-nn.Module reward functions."
|
||||
},
|
||||
)
|
||||
replay_buffer_size: int = field(
|
||||
default=0,
|
||||
metadata={
|
||||
"help": "[Experimental, disabled by default] Size of the replay buffer for storing high-signal rollout "
|
||||
"groups. When > 0, groups with reward variance are cached and used to replace zero-signal groups "
|
||||
"(where all rewards are identical). Set to 0 to disable. Only used with use_data_producer=True."
|
||||
},
|
||||
)
|
||||
replay_recompute_logps: bool = field(
|
||||
default=True,
|
||||
metadata={
|
||||
"help": "When True (default), recompute old_per_token_logps for replayed groups using the current "
|
||||
"training model. This fixes the importance sampling mismatch that occurs when replaying stale data. "
|
||||
"Only relevant when replay_buffer_size > 0."
|
||||
},
|
||||
)
|
||||
reroll_start_fraction: float = field(
|
||||
default=0.5,
|
||||
metadata={
|
||||
"help": "Fraction of total training steps after which deferred re-rolling begins. Zero-signal prompts "
|
||||
"(where all rewards in a group are identical) are buffered and re-injected into later batches when the "
|
||||
"model is more likely to solve them. Set to 1.0 to disable. Only used with use_data_producer=True."
|
||||
},
|
||||
)
|
||||
reroll_max_groups: int = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"help": "Maximum number of prompt groups to replace with re-roll candidates per batch. Higher values "
|
||||
"increase data utilization but reduce prompt diversity. Only used with use_data_producer=True."
|
||||
},
|
||||
)
|
||||
skip_zero_advantage_batches: bool = field(
|
||||
default=True,
|
||||
metadata={
|
||||
"help": "When True, skip gradient computation for micro-batches where all advantages are zero (no learning "
|
||||
"signal). This avoids the forward/backward pass entirely when no learning signal is present. The step is "
|
||||
"logged with skipped_zero_adv_batches=1 for monitoring."
|
||||
},
|
||||
)
|
||||
vllm_lora_sync: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "When True, sync LoRA adapter weights to vLLM via filesystem instead of merging into base model "
|
||||
"and NCCL-broadcasting all parameters. vLLM loads the adapter natively using Punica kernels. "
|
||||
"Requires vllm_serve_lora serve module (auto-selected when this is True). "
|
||||
"Syncs only LoRA adapter weights (much smaller) vs full merged model. Legacy merge behavior is used when False."
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Extended data producer with re-roll injection
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class RerollDataProducer(GRPODataProducer):
|
||||
"""GRPODataProducer that injects re-roll candidates into prompt batches.
|
||||
|
||||
Reads from the trainer's ``_reroll_buffer`` (populated by
|
||||
``GRPOExperimentalTrainer._post_advantage_hook``) and replaces the
|
||||
last N prompt groups with previously-failed prompts.
|
||||
"""
|
||||
|
||||
def _pre_produce_hook(self, inputs: list, global_step: int) -> list:
|
||||
trainer = self._trainer
|
||||
reroll_buf = getattr(trainer, "_reroll_buffer", None)
|
||||
reroll_lock = getattr(trainer, "_reroll_lock", None)
|
||||
if reroll_buf is None or reroll_lock is None:
|
||||
return inputs
|
||||
|
||||
max_steps = getattr(trainer.args, "max_steps", -1)
|
||||
start_frac = getattr(trainer.args, "reroll_start_fraction", 1.0)
|
||||
max_groups = getattr(trainer.args, "reroll_max_groups", 1)
|
||||
reroll_start_step = (
|
||||
max(1, int(max_steps * start_frac)) if max_steps > 0 else float("inf")
|
||||
)
|
||||
|
||||
if global_step < reroll_start_step:
|
||||
return inputs
|
||||
|
||||
with reroll_lock:
|
||||
n_to_take = min(max_groups, len(reroll_buf))
|
||||
reroll_prompts = [reroll_buf.pop(0) for _ in range(n_to_take)]
|
||||
|
||||
if reroll_prompts:
|
||||
num_gen = self._num_generations
|
||||
n_groups = len(inputs) // num_gen
|
||||
for i, reroll_prompt in enumerate(reroll_prompts):
|
||||
group_idx = n_groups - 1 - i
|
||||
if group_idx < 0:
|
||||
break
|
||||
start = group_idx * num_gen
|
||||
for j in range(num_gen):
|
||||
inputs[start + j] = reroll_prompt
|
||||
logger.info(
|
||||
f"[REROLL] Step {global_step}: replaced {len(reroll_prompts)}/{n_groups} prompt groups "
|
||||
f"with deferred re-roll candidates ({len(reroll_buf)} remaining)"
|
||||
)
|
||||
|
||||
return inputs
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Persistent reward subprocess pool
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _persistent_reward_worker(conn):
|
||||
"""Long-lived reward worker. Receives work items, returns results."""
|
||||
while True:
|
||||
try:
|
||||
msg = conn.recv()
|
||||
except EOFError:
|
||||
break
|
||||
if msg is None: # Shutdown signal
|
||||
break
|
||||
(
|
||||
reward_funcs,
|
||||
prompts,
|
||||
completions,
|
||||
completion_ids_list,
|
||||
inputs,
|
||||
reward_func_names,
|
||||
) = msg
|
||||
try:
|
||||
keys = [
|
||||
key
|
||||
for key in inputs[0]
|
||||
if key not in ["prompt", "completion", "completion_ids"]
|
||||
]
|
||||
reward_kwargs = {key: [example[key] for example in inputs] for key in keys}
|
||||
results = []
|
||||
for reward_func, _reward_func_name in zip(
|
||||
reward_funcs, reward_func_names, strict=True
|
||||
):
|
||||
output = reward_func(
|
||||
prompts=prompts,
|
||||
completions=completions,
|
||||
completion_ids=completion_ids_list,
|
||||
**reward_kwargs,
|
||||
)
|
||||
results.append(
|
||||
[float(r) if r is not None else float("nan") for r in output]
|
||||
)
|
||||
conn.send(results)
|
||||
except Exception:
|
||||
conn.send(None)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Extended trainer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class FastAsyncGRPOTrainer(AsyncGRPOTrainer):
|
||||
"""GRPOTrainer with experimental extensions.
|
||||
|
||||
Adds:
|
||||
- Parallel reward subprocess workers (``reward_num_workers``)
|
||||
- Replay buffer for high-signal group reuse (``replay_buffer_size``)
|
||||
- Deferred re-roll of failed prompts (``reroll_start_fraction``)
|
||||
- Zero-advantage micro-batch skipping
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# These must be initialized before super().__init__() because
|
||||
# _create_data_producer (called during super().__init__) needs them.
|
||||
self._reroll_buffer: list = []
|
||||
self._reroll_lock = threading.Lock()
|
||||
|
||||
# Temporarily suppress the base class's Liger + OPSM validation check,
|
||||
# since this subclass supports it via a custom compute_liger_loss override.
|
||||
grpo_args = kwargs.get("args")
|
||||
if grpo_args is None:
|
||||
for a in args:
|
||||
if hasattr(a, "off_policy_mask_threshold"):
|
||||
grpo_args = a
|
||||
break
|
||||
saved_threshold = None
|
||||
if grpo_args is not None and getattr(grpo_args, "use_liger_kernel", False):
|
||||
saved_threshold = grpo_args.off_policy_mask_threshold
|
||||
grpo_args.off_policy_mask_threshold = None
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
if saved_threshold is not None:
|
||||
grpo_args.off_policy_mask_threshold = saved_threshold
|
||||
self.off_policy_mask_threshold = saved_threshold
|
||||
|
||||
# Replay buffer
|
||||
if getattr(self.args, "replay_buffer_size", 0) > 0:
|
||||
self._replay_buffer = ReplayBuffer(max_size=self.args.replay_buffer_size)
|
||||
else:
|
||||
self._replay_buffer = None
|
||||
self._replay_recompute_logps = getattr(
|
||||
self.args, "replay_recompute_logps", True
|
||||
)
|
||||
|
||||
# Reward worker pool (lazy-initialized)
|
||||
self._reward_workers = None
|
||||
|
||||
# -- Factory override: use RerollDataProducer ----------------------------
|
||||
|
||||
def _create_data_producer(self, args, train_dataset):
|
||||
"""Override to use RerollDataProducer for re-roll prompt injection."""
|
||||
from axolotl.core.trainers.grpo.async_trainer import (
|
||||
AsyncDataProducer,
|
||||
ProducerConfig,
|
||||
)
|
||||
|
||||
producer_config = ProducerConfig(
|
||||
mini_epochs=args.num_iterations,
|
||||
max_rollouts=None,
|
||||
eval_during_produce=False,
|
||||
empty_cache_before_produce=True,
|
||||
empty_cache_after_produce=True,
|
||||
async_prefetch=args.async_prefetch,
|
||||
prefetch_depth=args.prefetch_depth,
|
||||
)
|
||||
data_producer = RerollDataProducer(
|
||||
config=producer_config,
|
||||
prompt_dataset=train_dataset,
|
||||
num_generations=self.num_generations,
|
||||
generation_batch_size=args.generation_batch_size,
|
||||
train_batch_size=args.per_device_train_batch_size,
|
||||
steps_per_generation=args.steps_per_generation,
|
||||
shuffle_dataset=self.shuffle_dataset,
|
||||
seed=args.seed,
|
||||
)
|
||||
data_producer.set_trainer(self)
|
||||
if args.async_prefetch:
|
||||
data_producer = AsyncDataProducer(
|
||||
data_producer,
|
||||
background_produce_kwargs={"skip_policy_logps": True},
|
||||
)
|
||||
return data_producer
|
||||
|
||||
# -- Reward worker pool --------------------------------------------------
|
||||
|
||||
def _get_reward_workers(self):
|
||||
"""Return a list of persistent reward worker subprocesses (lazy-initialized)."""
|
||||
import multiprocessing as _mp
|
||||
|
||||
num_workers = getattr(self.args, "reward_num_workers", 1)
|
||||
if num_workers < 1:
|
||||
num_workers = 1
|
||||
|
||||
if self._reward_workers is not None:
|
||||
alive = all(proc.is_alive() for conn, proc in self._reward_workers)
|
||||
if alive and len(self._reward_workers) == num_workers:
|
||||
return self._reward_workers
|
||||
self._shutdown_reward_workers()
|
||||
|
||||
workers = []
|
||||
for _ in range(num_workers):
|
||||
parent_conn, child_conn = _mp.Pipe()
|
||||
proc = _mp.Process(
|
||||
target=_persistent_reward_worker, args=(child_conn,), daemon=True
|
||||
)
|
||||
proc.start()
|
||||
child_conn.close()
|
||||
workers.append((parent_conn, proc))
|
||||
|
||||
self._reward_workers = workers
|
||||
return workers
|
||||
|
||||
def _shutdown_reward_workers(self):
|
||||
"""Shut down all persistent reward workers."""
|
||||
if self._reward_workers is None:
|
||||
return
|
||||
for conn, proc in self._reward_workers:
|
||||
try:
|
||||
conn.send(None)
|
||||
proc.join(timeout=5)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
self._reward_workers = None
|
||||
|
||||
# -- Hook overrides ------------------------------------------------------
|
||||
|
||||
def _compute_rewards_for_batch(
|
||||
self, inputs, prompts, completions, completion_ids_list
|
||||
):
|
||||
"""Dispatch rewards to parallel subprocess workers (synchronous wrapper)."""
|
||||
self._launch_reward_workers(inputs, prompts, completions, completion_ids_list)
|
||||
return self._collect_reward_workers(
|
||||
inputs, prompts, completions, completion_ids_list
|
||||
)
|
||||
|
||||
def _launch_reward_workers(self, inputs, prompts, completions, completion_ids_list):
|
||||
"""Send reward work to subprocess workers (non-blocking).
|
||||
|
||||
Results are collected later by _collect_reward_workers, allowing GPU
|
||||
logprob computation to overlap with CPU reward computation.
|
||||
"""
|
||||
reward_can_bg = all(
|
||||
callable(rf)
|
||||
and not isinstance(rf, nn.Module)
|
||||
and not asyncio.iscoroutinefunction(rf)
|
||||
for rf in self.reward_funcs
|
||||
)
|
||||
num_workers = getattr(self.args, "reward_num_workers", 1)
|
||||
|
||||
if not reward_can_bg or num_workers <= 1:
|
||||
# Can't parallelize — store args for sync fallback in collect
|
||||
self._reward_workers_used = None
|
||||
self._pending_reward_args = (
|
||||
inputs,
|
||||
prompts,
|
||||
completions,
|
||||
completion_ids_list,
|
||||
)
|
||||
return
|
||||
|
||||
workers = self._get_reward_workers()
|
||||
num_generations = self.num_generations
|
||||
num_prompts = len(prompts)
|
||||
num_groups = num_prompts // num_generations
|
||||
|
||||
# Shard by prompt groups across workers
|
||||
groups_per_worker = max(1, (num_groups + len(workers) - 1) // len(workers))
|
||||
workers_used = []
|
||||
for w_idx, (conn, _proc) in enumerate(workers):
|
||||
g_start = w_idx * groups_per_worker
|
||||
g_end = min((w_idx + 1) * groups_per_worker, num_groups)
|
||||
if g_start >= num_groups:
|
||||
break
|
||||
s_start = g_start * num_generations
|
||||
s_end = g_end * num_generations
|
||||
conn.send(
|
||||
(
|
||||
self.reward_funcs,
|
||||
prompts[s_start:s_end],
|
||||
completions[s_start:s_end],
|
||||
completion_ids_list[s_start:s_end],
|
||||
inputs[s_start:s_end],
|
||||
self.reward_func_names,
|
||||
)
|
||||
)
|
||||
workers_used.append(conn)
|
||||
|
||||
self._reward_workers_used = workers_used
|
||||
self._pending_reward_args = (inputs, prompts, completions, completion_ids_list)
|
||||
|
||||
def _collect_reward_workers(
|
||||
self, inputs, prompts, completions, completion_ids_list
|
||||
):
|
||||
"""Collect reward results from subprocess workers (blocks until done)."""
|
||||
from accelerate.utils import gather
|
||||
|
||||
workers_used = getattr(self, "_reward_workers_used", None)
|
||||
args = getattr(self, "_pending_reward_args", None)
|
||||
self._reward_workers_used = None
|
||||
self._pending_reward_args = None
|
||||
|
||||
if workers_used is None:
|
||||
# Sync fallback — compute on main thread
|
||||
if args is not None:
|
||||
return self._calculate_rewards(*args)
|
||||
return self._calculate_rewards(
|
||||
inputs, prompts, completions, completion_ids_list
|
||||
)
|
||||
|
||||
device = self.accelerator.device
|
||||
num_prompts = len(args[1]) if args else len(prompts)
|
||||
|
||||
# Collect results from workers
|
||||
all_worker_results = []
|
||||
any_failed = False
|
||||
for conn in workers_used:
|
||||
result = conn.recv()
|
||||
if result is None:
|
||||
any_failed = True
|
||||
# Drain remaining workers to prevent stale results in pipes
|
||||
for remaining_conn in workers_used:
|
||||
if remaining_conn is not conn:
|
||||
try:
|
||||
remaining_conn.recv()
|
||||
except Exception:
|
||||
pass
|
||||
break
|
||||
all_worker_results.append(result)
|
||||
|
||||
if not any_failed:
|
||||
rewards_per_func = torch.zeros(
|
||||
num_prompts, len(self.reward_funcs), device=device
|
||||
)
|
||||
offset = 0
|
||||
for worker_result in all_worker_results:
|
||||
chunk_size = len(worker_result[0])
|
||||
for i, result in enumerate(worker_result):
|
||||
rewards_per_func[offset : offset + chunk_size, i] = torch.tensor(
|
||||
result, dtype=torch.float32, device=device
|
||||
)
|
||||
offset += chunk_size
|
||||
return gather(rewards_per_func)
|
||||
|
||||
# Fallback to main thread on failure
|
||||
if args is not None:
|
||||
return self._calculate_rewards(*args)
|
||||
return self._calculate_rewards(
|
||||
inputs, prompts, completions, completion_ids_list
|
||||
)
|
||||
|
||||
def _post_advantage_hook(
|
||||
self,
|
||||
data: dict,
|
||||
rewards_per_func,
|
||||
advantages,
|
||||
inputs: list,
|
||||
num_generations: int,
|
||||
mode: str,
|
||||
s_start: int | None = None,
|
||||
s_end: int | None = None,
|
||||
is_last_chunk: bool = True,
|
||||
) -> None:
|
||||
"""Replay buffer store/replace + re-roll buffering."""
|
||||
from trl.models.utils import disable_gradient_checkpointing
|
||||
|
||||
# -- Replay buffer: store high-signal groups --
|
||||
if self._replay_buffer is not None:
|
||||
local_grouped = rewards_per_func.view(
|
||||
-1, num_generations, len(self.reward_funcs)
|
||||
)
|
||||
per_group_std = local_grouped.std(dim=1)
|
||||
has_signal = (per_group_std > 0).any(dim=1)
|
||||
offset = s_start or 0
|
||||
|
||||
if has_signal.any():
|
||||
grouped_adv = advantages.view(-1, num_generations)
|
||||
replay_scores = grouped_adv.abs().sum(dim=1) * per_group_std.sum(dim=1)
|
||||
for group_idx in has_signal.nonzero(as_tuple=True)[0]:
|
||||
gi = group_idx.item()
|
||||
start = offset + gi * num_generations
|
||||
end = start + num_generations
|
||||
group_data = {}
|
||||
for key in data:
|
||||
val = data[key]
|
||||
if (
|
||||
isinstance(val, torch.Tensor)
|
||||
and val.dim() > 0
|
||||
and val.size(0) >= end
|
||||
):
|
||||
group_data[key] = val[start:end].clone()
|
||||
self._replay_buffer.add(replay_scores[gi].item(), group_data)
|
||||
|
||||
# Replace zero-signal groups with high-signal replay buffer entries
|
||||
# Only in non-streaming path (s_start is None) — streaming scores
|
||||
# groups incrementally, so replacement + logprob recompute would be
|
||||
# too expensive per chunk.
|
||||
n_replaced = 0
|
||||
if s_start is None:
|
||||
no_signal = ~has_signal
|
||||
replaced_ranges = []
|
||||
if no_signal.any() and len(self._replay_buffer) > 0:
|
||||
for group_idx in no_signal.nonzero(as_tuple=True)[0]:
|
||||
sampled = self._replay_buffer.sample(1)
|
||||
if sampled is None:
|
||||
break
|
||||
sampled_group = sampled[0]
|
||||
gi = group_idx.item()
|
||||
start = offset + gi * num_generations
|
||||
end = start + num_generations
|
||||
for key, val in sampled_group.items():
|
||||
if key in data and isinstance(data[key], torch.Tensor):
|
||||
src = val.to(data[key].device)
|
||||
tgt_seq_len = (
|
||||
data[key].size(1) if data[key].dim() > 1 else None
|
||||
)
|
||||
if start >= data[key].size(0) or end > data[key].size(
|
||||
0
|
||||
):
|
||||
continue
|
||||
if tgt_seq_len is not None:
|
||||
if src.size(1) <= tgt_seq_len:
|
||||
data[key][start:end] = 0
|
||||
data[key][start:end, : src.size(1)] = src
|
||||
else:
|
||||
data[key][start:end] = src[:, :tgt_seq_len]
|
||||
else:
|
||||
data[key][start:end] = src
|
||||
replaced_ranges.append((start, end))
|
||||
n_replaced += 1
|
||||
|
||||
# Recompute old_per_token_logps for replayed groups
|
||||
if (
|
||||
n_replaced > 0
|
||||
and self._replay_recompute_logps
|
||||
and "old_per_token_logps" in data
|
||||
):
|
||||
with (
|
||||
torch.no_grad(),
|
||||
disable_gradient_checkpointing(
|
||||
self.model, self.args.gradient_checkpointing_kwargs
|
||||
),
|
||||
):
|
||||
for r_start, r_end in replaced_ranges:
|
||||
r_ids = torch.cat(
|
||||
[
|
||||
data["prompt_ids"][r_start:r_end],
|
||||
data["completion_ids"][r_start:r_end],
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
r_mask = torch.cat(
|
||||
[
|
||||
data["prompt_mask"][r_start:r_end],
|
||||
data["completion_mask"][r_start:r_end],
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
r_logits_to_keep = data["completion_ids"].size(1)
|
||||
r_fwd_kwargs = {}
|
||||
for fk in (
|
||||
"pixel_values",
|
||||
"image_grid_thw",
|
||||
"pixel_attention_mask",
|
||||
"image_sizes",
|
||||
"token_type_ids",
|
||||
"mm_token_type_ids",
|
||||
):
|
||||
if fk in data:
|
||||
r_fwd_kwargs[fk] = data[fk]
|
||||
r_logps, _ = self._get_per_token_logps_and_entropies(
|
||||
self.model,
|
||||
r_ids,
|
||||
r_mask,
|
||||
r_logits_to_keep,
|
||||
r_end - r_start,
|
||||
**r_fwd_kwargs,
|
||||
)
|
||||
data["old_per_token_logps"][r_start:r_end] = r_logps
|
||||
|
||||
if n_replaced > 0:
|
||||
self._metrics[mode]["replay_buffer_replacements"].append(
|
||||
float(n_replaced)
|
||||
)
|
||||
|
||||
if is_last_chunk:
|
||||
self._metrics[mode]["replay_buffer_size"].append(
|
||||
float(len(self._replay_buffer))
|
||||
)
|
||||
|
||||
# -- Re-roll buffer: store failed prompts --
|
||||
if getattr(self.args, "reroll_start_fraction", 1.0) < 1.0:
|
||||
grouped_rewards = rewards_per_func.view(
|
||||
-1, num_generations, len(self.reward_funcs)
|
||||
)
|
||||
per_group_std = grouped_rewards.std(dim=1)
|
||||
per_group_mean = grouped_rewards.mean(dim=1)
|
||||
zero_signal = (per_group_std == 0).all(dim=1)
|
||||
all_failed = (per_group_mean.abs() < 1e-6).all(dim=1)
|
||||
should_reroll = zero_signal & all_failed
|
||||
_n_buffered = 0
|
||||
with self._reroll_lock:
|
||||
for group_idx in should_reroll.nonzero(as_tuple=True)[0]:
|
||||
idx = group_idx.item() * num_generations
|
||||
if idx >= len(inputs):
|
||||
continue
|
||||
prompt_input = inputs[idx]
|
||||
self._reroll_buffer.append(prompt_input)
|
||||
_n_buffered += 1
|
||||
if _n_buffered > 0:
|
||||
self._metrics[mode]["reroll_buffered"].append(float(_n_buffered))
|
||||
if is_last_chunk:
|
||||
self._metrics[mode]["reroll_buffer_size"].append(
|
||||
float(len(self._reroll_buffer))
|
||||
)
|
||||
|
||||
# -- Zero-advantage skipping + Liger OPSM ---------------------------------
|
||||
|
||||
def compute_liger_loss(self, unwrapped_model, inputs):
|
||||
"""Liger loss with zero-adv skipping and off-policy sequence masking (OPSM).
|
||||
|
||||
The base class Liger path doesn't support OPSM because the fused kernel
|
||||
doesn't expose per-token logprobs needed for the KL computation. This
|
||||
override computes them via chunked lm_head matmul (no grad, low memory)
|
||||
and applies the OPSM to the loss mask before calling the kernel.
|
||||
"""
|
||||
if self.args.skip_zero_advantage_batches and torch.all(
|
||||
inputs["advantages"] == 0
|
||||
):
|
||||
mode = "train" if self.model.training else "eval"
|
||||
self._metrics[mode]["skipped_zero_adv_batches"].append(1.0)
|
||||
return torch.tensor(
|
||||
0.0, device=inputs["advantages"].device, requires_grad=True
|
||||
)
|
||||
|
||||
if self.off_policy_mask_threshold is None:
|
||||
return super().compute_liger_loss(unwrapped_model, inputs)
|
||||
|
||||
# OPSM path: need per_token_logps for KL, which Liger kernel doesn't provide
|
||||
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
|
||||
completion_ids, completion_mask = (
|
||||
inputs["completion_ids"],
|
||||
inputs["completion_mask"],
|
||||
)
|
||||
input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
||||
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
|
||||
logits_to_keep = completion_ids.size(1)
|
||||
|
||||
last_hidden_state = self._get_last_hidden_state(
|
||||
unwrapped_model,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
logits_to_keep,
|
||||
inputs.get("pixel_values"),
|
||||
inputs.get("image_grid_thw"),
|
||||
inputs.get("pixel_attention_mask"),
|
||||
inputs.get("image_sizes"),
|
||||
)
|
||||
|
||||
loss_mask = (
|
||||
completion_mask
|
||||
if "tool_mask" not in inputs
|
||||
else completion_mask * inputs["tool_mask"]
|
||||
)
|
||||
|
||||
# Compute per_token_logps via chunked lm_head matmul (no grad, low memory)
|
||||
lm_weight = unwrapped_model.lm_head.weight
|
||||
lm_bias = unwrapped_model.lm_head.bias
|
||||
with torch.no_grad():
|
||||
per_token_logps_chunks = []
|
||||
for i in range(last_hidden_state.size(0)):
|
||||
chunk_logits = torch.matmul(last_hidden_state[i : i + 1], lm_weight.t())
|
||||
if lm_bias is not None:
|
||||
chunk_logits = chunk_logits + lm_bias
|
||||
chunk_lps = (
|
||||
chunk_logits.float()
|
||||
.log_softmax(-1)
|
||||
.gather(-1, completion_ids[i : i + 1].unsqueeze(-1))
|
||||
.squeeze(-1)
|
||||
)
|
||||
per_token_logps_chunks.append(chunk_lps)
|
||||
del chunk_logits
|
||||
per_token_logps = torch.cat(per_token_logps_chunks, dim=0)
|
||||
|
||||
advantages = inputs["advantages"]
|
||||
if advantages.dim() == 1:
|
||||
advantages_2d = advantages.unsqueeze(1)
|
||||
else:
|
||||
advantages_2d = advantages
|
||||
|
||||
sampling_per_token_logps = inputs.get("sampling_per_token_logps")
|
||||
if sampling_per_token_logps is None:
|
||||
sampling_per_token_logps = inputs.get("old_per_token_logps")
|
||||
if sampling_per_token_logps is None:
|
||||
sampling_per_token_logps = per_token_logps
|
||||
|
||||
off_policy_mask = GRPOTrainer.get_off_policy_mask(
|
||||
advantages=advantages_2d,
|
||||
per_token_logps=per_token_logps,
|
||||
sampling_per_token_logps=sampling_per_token_logps,
|
||||
mask=loss_mask,
|
||||
off_policy_threshold=self.off_policy_mask_threshold,
|
||||
)
|
||||
loss_mask = loss_mask * off_policy_mask
|
||||
|
||||
# Call the Liger fused kernel with OPSM-modified mask
|
||||
loss, metrics = self.liger_grpo_loss(
|
||||
_input=last_hidden_state,
|
||||
lin_weight=unwrapped_model.lm_head.weight,
|
||||
selected_token_ids=completion_ids,
|
||||
attention_mask=loss_mask,
|
||||
advantages=inputs["advantages"],
|
||||
bias=unwrapped_model.lm_head.bias,
|
||||
old_per_token_logps=inputs.get("old_per_token_logps"),
|
||||
ref_per_token_logps=inputs.get("ref_per_token_logps"),
|
||||
vllm_is_ratio=inputs.get("importance_sampling_ratio"),
|
||||
)
|
||||
|
||||
mean_kl = metrics[0] if self.beta != 0.0 else None
|
||||
clip_ratio = metrics[-1]
|
||||
|
||||
mode = "train" if self.model.training else "eval"
|
||||
if self.beta != 0.0:
|
||||
self._metrics[mode]["kl"].append(
|
||||
self.accelerator.gather(mean_kl).mean().item()
|
||||
)
|
||||
self._metrics[mode]["clip_ratio"].append(
|
||||
self.accelerator.gather(clip_ratio).mean().item()
|
||||
)
|
||||
normalizer = (
|
||||
self.current_gradient_accumulation_steps if mode == "train" else 1.0
|
||||
)
|
||||
return loss / normalizer
|
||||
|
||||
def _compute_loss(self, model, inputs):
|
||||
if self.args.skip_zero_advantage_batches and torch.all(
|
||||
inputs["advantages"] == 0
|
||||
):
|
||||
mode = "train" if self.model.training else "eval"
|
||||
self._metrics[mode]["skipped_zero_adv_batches"].append(1.0)
|
||||
# Create zero loss with grad_fn. DeepSpeed requires grad_fn != None.
|
||||
# With ZeRO-3, parameters are partitioned (shape=[0], requires_grad=False)
|
||||
# so we can't just do `(p * 0).sum()`. Instead, do a tiny forward pass
|
||||
# with a single token to create a proper computation graph.
|
||||
prompt_ids = inputs["prompt_ids"][:1, :1] # (1, 1)
|
||||
attn = torch.ones_like(prompt_ids)
|
||||
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
out = model(input_ids=prompt_ids, attention_mask=attn)
|
||||
return out.logits.sum() * 0
|
||||
return super()._compute_loss(model, inputs)
|
||||
44
src/axolotl/core/trainers/grpo/replay_buffer.py
Normal file
44
src/axolotl/core/trainers/grpo/replay_buffer.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""Simple replay buffer for storing and sampling high-signal rollout groups."""
|
||||
|
||||
import heapq
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class ReplayBuffer:
|
||||
"""Min-heap replay buffer that keeps the highest-scoring rollout groups.
|
||||
Groups are scored by signal quality (advantage magnitude * reward variance).
|
||||
When sampling, groups are drawn proportional to their scores.
|
||||
"""
|
||||
|
||||
def __init__(self, max_size: int):
|
||||
self.max_size = max_size
|
||||
self._heap: list[tuple[float, int, dict]] = [] # min-heap of (score, id, data)
|
||||
self._counter = 0 # unique tiebreaker for heap
|
||||
|
||||
def __len__(self):
|
||||
return len(self._heap)
|
||||
|
||||
def add(self, score: float, data: dict):
|
||||
"""Add a group to the buffer. If full, replaces lowest-scoring entry."""
|
||||
if self.max_size <= 0:
|
||||
return
|
||||
self._counter += 1
|
||||
if len(self._heap) < self.max_size:
|
||||
heapq.heappush(self._heap, (score, self._counter, data))
|
||||
elif score > self._heap[0][0]:
|
||||
heapq.heapreplace(self._heap, (score, self._counter, data))
|
||||
|
||||
def sample(self, num_samples: int) -> list[dict] | None:
|
||||
"""Sample groups weighted by their scores. Returns None if buffer is empty."""
|
||||
if self.max_size <= 0 or not self._heap:
|
||||
return None
|
||||
|
||||
scores = torch.tensor([item[0] for item in self._heap], dtype=torch.float32)
|
||||
scores = scores.clamp(min=1e-8) # avoid zero probabilities
|
||||
probs = scores / scores.sum()
|
||||
replacement = num_samples > len(self._heap)
|
||||
indices = torch.multinomial(
|
||||
probs, num_samples, replacement=replacement
|
||||
).tolist()
|
||||
return [self._heap[i][2] for i in indices]
|
||||
@@ -40,6 +40,7 @@ from trl.trainer.grpo_config import GRPOConfig
|
||||
from trl.trainer.grpo_trainer import RewardFunc, nanstd
|
||||
from trl.trainer.utils import pad
|
||||
|
||||
from axolotl.core.trainers.grpo.fast_async_trainer import FastAsyncGRPOTrainer
|
||||
from axolotl.core.trainers.grpo.sampler import SequenceParallelRepeatRandomSampler
|
||||
from axolotl.core.trainers.mixins import (
|
||||
DistributedParallelMixin,
|
||||
@@ -66,6 +67,19 @@ class AxolotlGRPOTrainer(
|
||||
_tag_names = ["trl", "grpo", "axolotl"]
|
||||
|
||||
|
||||
class AxolotlAsyncGRPOTrainer(
|
||||
RngLoaderMixin,
|
||||
SchedulerMixin,
|
||||
OptimizerMixin,
|
||||
OptimizerInitMixin,
|
||||
DistributedParallelMixin,
|
||||
FastAsyncGRPOTrainer,
|
||||
):
|
||||
"""Extend AsyncGRPOTrainer with axolotl helpers"""
|
||||
|
||||
_tag_names = ["trl", "grpo", "async", "axolotl"]
|
||||
|
||||
|
||||
class AxolotlGRPOSequenceParallelTrainer(AxolotlGRPOTrainer):
|
||||
"""Extend the base GRPOTrainer for sequence parallelism handling"""
|
||||
|
||||
|
||||
@@ -19,5 +19,4 @@ class CheckpointSaveMixin(Trainer):
|
||||
f"Trainer does not support saving optimizer and scheduler: {exc}\n"
|
||||
"Optimizer and scheduler states were not saved - resuming from checkpoints "
|
||||
"for this training run will not be possible.",
|
||||
main_process_only=True,
|
||||
)
|
||||
|
||||
@@ -104,7 +104,7 @@ class OptimizerMixin(Trainer):
|
||||
|
||||
return optimizer_grouped_parameters
|
||||
|
||||
def create_optimizer(self):
|
||||
def create_optimizer(self, model=None):
|
||||
if (
|
||||
self.args.loraplus_lr_ratio is None
|
||||
and self.args.embedding_lr_scale is None
|
||||
@@ -112,9 +112,9 @@ class OptimizerMixin(Trainer):
|
||||
and self.args.lr_groups is None
|
||||
and self.optimizer_cls_and_kwargs is None
|
||||
):
|
||||
return super().create_optimizer()
|
||||
return super().create_optimizer(model=model)
|
||||
|
||||
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
|
||||
opt_model = self.model if model is None else model
|
||||
|
||||
if (
|
||||
not self.optimizer
|
||||
|
||||
@@ -19,7 +19,7 @@ python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
- If you are installing from pip
|
||||
```bash
|
||||
pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@a668583"
|
||||
pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fa9a7fe"
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -73,8 +73,10 @@ plugins:
|
||||
- ministral3
|
||||
- mistral
|
||||
- mistral3
|
||||
- mistral4
|
||||
- mixtral
|
||||
- mllama
|
||||
- nemotron_h
|
||||
- olmo
|
||||
- olmo2
|
||||
- olmo3
|
||||
|
||||
@@ -35,7 +35,7 @@ LOG = get_logger(__name__)
|
||||
|
||||
_CCE_INSTALL_MESSAGE = (
|
||||
"Please install Axolotl's fork of cut_cross_entropy with transformers support using "
|
||||
'`pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@a668583"`'
|
||||
'`pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@fa9a7fe"`'
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ class ExpertsInterface(GeneralInterface):
|
||||
}
|
||||
```
|
||||
|
||||
In our custom integration, we add support for **ScatterMoE**, which is even more efficient and faster than `grouped_mm`.
|
||||
In our custom integration, we add support for **ScatterMoE** and **SonicMoE**, which are more efficient and faster than `grouped_mm`.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -21,23 +21,55 @@ plugins:
|
||||
- axolotl.integrations.kernels.KernelsPlugin
|
||||
|
||||
use_kernels: true
|
||||
|
||||
# Choose one (mutually exclusive):
|
||||
use_scattermoe: true
|
||||
# OR
|
||||
use_sonicmoe: true
|
||||
```
|
||||
|
||||
**Important:** Setting `experts_implementation` is incompatible with `use_scattermoe`.
|
||||
**Important:** Setting `experts_implementation` is incompatible with custom kernel options.
|
||||
|
||||
### SonicMoE installation
|
||||
|
||||
**Prerequisites:**
|
||||
- NVIDIA Hopper (H100, H200) or Blackwell (B200, GB200) GPU
|
||||
- CUDA 12.9+ (13.0+ for B300)
|
||||
- PyTorch 2.7+ (2.9.1 recommended)
|
||||
- For B300: Triton 3.6.0
|
||||
|
||||
```bash
|
||||
pip install --ignore-requires-python --no-deps "sonic-moe @ git+https://github.com/Dao-AILab/sonic-moe.git@116e2df0a41874f77fa0ad269ce7df3f0cfcb956" && pip install nvidia-cutlass-dsl==4.4.0 quack-kernels==0.2.5
|
||||
```
|
||||
|
||||
See the [SonicMoE installation guide](https://github.com/Dao-AILab/sonic-moe?tab=readme-ov-file#-installation) for the latest prerequisite details.
|
||||
|
||||
**Note:** Blackwell support is in upstream beta. On Blackwell GPUs, Axolotl automatically sets `USE_QUACK_GEMM=1` to enable the Blackwell kernels.
|
||||
|
||||
## How It Works
|
||||
|
||||
The `KernelsPlugin` runs before model loading and:
|
||||
|
||||
1. Registers the ScatterMoE kernel from the [`axolotl-ai-co/scattermoe`](https://huggingface.co/axolotl-ai-co/scattermoe) Hub repo.
|
||||
### ScatterMoE
|
||||
1. Registers the ScatterMoE kernel from the local `libs/scattermoe_lora` package (includes fused LoRA support via Triton kernels).
|
||||
2. Patches the model's `SparseMoeBlock` forward method with the optimized ScatterMoE implementation.
|
||||
|
||||
This works for any MoE model in transformers that uses a `SparseMoeBlock` class (Mixtral, Qwen2-MoE, OLMoE, etc.).
|
||||
### SonicMoE
|
||||
1. Resolves the model's MoE block class(es) from `constants.py`.
|
||||
2. Patches the forward method with SonicMoE's optimized kernels and registers a weight converter for the interleaved gate/up projection format.
|
||||
3. Supports both softmax->topk and sigmoid->topk routing strategies.
|
||||
|
||||
Both paths use the shared `resolve_moe_block_classes` utility in `constants.py` for model-type-to-class resolution.
|
||||
|
||||
#### Supported Models
|
||||
|
||||
See `constants.py` for the full list of supported model types (Qwen2-MoE, Qwen3-MoE, OLMoE, Mixtral, DeepSeek-V3, GLM-MoE, MiniMax, etc.).
|
||||
|
||||
## Limitations
|
||||
|
||||
ScatterMoE uses a softmax -> topk routing, so results may be different for some model arch as baseline (GPT-OSS, GLM_MOE_DSA).
|
||||
ScatterMoE uses a softmax -> topk routing, so results may be different for some model architectures as baseline (GPT-OSS, etc). Incompatible with `GLM_MOE_DSA` (GLM 5) and `GLM4_MOE_LITE` (GLM 4.7 Flash) at the moment.
|
||||
|
||||
SonicMoE supports both softmax->topk and sigmoid->topk routing, covering a wider range of architectures.
|
||||
|
||||
ScatterMoE does not work for GLM4.7 Flash (glm4_moe_lite) atm.
|
||||
|
||||
|
||||
@@ -6,7 +6,18 @@ LOG = get_logger(__name__)
|
||||
|
||||
|
||||
class KernelsArgs(BaseModel):
|
||||
use_scattermoe: bool | None = True
|
||||
use_scattermoe: bool | None = None
|
||||
use_sonicmoe: bool | None = None
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_mutually_exclusive(cls, data):
|
||||
if data.get("use_scattermoe") and data.get("use_sonicmoe"):
|
||||
raise ValueError(
|
||||
"Cannot use both ScatterMoE and SonicMoE simultaneously. "
|
||||
"Please set only one of `use_scattermoe` or `use_sonicmoe` to true."
|
||||
)
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -36,11 +47,11 @@ class KernelsArgs(BaseModel):
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def disable_mlp_kernel_scattermoe(cls, data):
|
||||
if data.get("use_scattermoe") is True:
|
||||
def disable_mlp_kernel(cls, data):
|
||||
if data.get("use_scattermoe") is True or data.get("use_sonicmoe") is True:
|
||||
if data.get("lora_mlp_kernel") is True:
|
||||
LOG.warning(
|
||||
"Disabling lora_mlp_kernel when using scattermoe due to compatibility issues."
|
||||
"Disabling lora_mlp_kernel when using custom MoE kernels due to compatibility issues."
|
||||
)
|
||||
data["lora_mlp_kernel"] = False
|
||||
data["mlp_kernel"] = False
|
||||
|
||||
120
src/axolotl/integrations/kernels/autotune_callback.py
Normal file
120
src/axolotl/integrations/kernels/autotune_callback.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""Trainer callback for reporting Triton autotune results from scattermoe-lora kernels."""
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
from transformers import (
|
||||
TrainerCallback,
|
||||
TrainerControl,
|
||||
TrainerState,
|
||||
TrainingArguments,
|
||||
)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Give up looking for autotune data after this many training steps.
|
||||
_MAX_POLL_STEP = 5
|
||||
|
||||
|
||||
def _get_gpu_info() -> dict:
|
||||
"""Return basic GPU identification for the current device."""
|
||||
if not torch.cuda.is_available():
|
||||
return {}
|
||||
try:
|
||||
idx = torch.cuda.current_device()
|
||||
props = torch.cuda.get_device_properties(idx)
|
||||
return {
|
||||
"gpu_name": props.name,
|
||||
"gpu_compute_capability": f"{props.major}.{props.minor}",
|
||||
"gpu_memory_bytes": props.total_memory,
|
||||
}
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
return {}
|
||||
|
||||
|
||||
def _get_smem_capacity() -> dict:
|
||||
"""Return shared memory capacity from the runtime lora_ops module."""
|
||||
try:
|
||||
from axolotl.integrations.kernels.autotune_collector import (
|
||||
_find_lora_ops_module,
|
||||
)
|
||||
|
||||
lora_ops = _find_lora_ops_module()
|
||||
if lora_ops is None:
|
||||
return {}
|
||||
fn = getattr(lora_ops, "_get_smem_capacity", None)
|
||||
if fn is None:
|
||||
return {}
|
||||
return {"smem_capacity_bytes": fn()}
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
return {}
|
||||
|
||||
|
||||
class AutotuneReportCallback(TrainerCallback):
|
||||
"""Reports Triton kernel autotune selections via telemetry.
|
||||
|
||||
Fires **once** after the first training step completes (step 1), at
|
||||
which point the forward and backward passes have both run and the
|
||||
autotuned kernels have populated their caches. If for some reason
|
||||
the caches are still empty (e.g. the kernel was never invoked), the
|
||||
callback retries on subsequent steps up to ``_MAX_POLL_STEP`` and
|
||||
then stops polling.
|
||||
|
||||
After reporting (or giving up) every subsequent ``on_step_end``
|
||||
call short-circuits on the ``_reported`` flag — zero hot-path cost.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._reported = False
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def on_step_end(
|
||||
self,
|
||||
args: TrainingArguments,
|
||||
state: TrainerState,
|
||||
control: TrainerControl,
|
||||
**kwargs,
|
||||
):
|
||||
if self._reported:
|
||||
return
|
||||
|
||||
# Lazy import — Triton / scattermoe kernels may not be installed.
|
||||
from axolotl.integrations.kernels.autotune_collector import (
|
||||
collect_autotune_configs,
|
||||
)
|
||||
|
||||
configs = collect_autotune_configs()
|
||||
|
||||
if not configs:
|
||||
if state.global_step >= _MAX_POLL_STEP:
|
||||
LOG.debug(
|
||||
"No autotune data found after %d steps; giving up.",
|
||||
state.global_step,
|
||||
)
|
||||
self._reported = True
|
||||
return
|
||||
|
||||
self._reported = True
|
||||
|
||||
from axolotl.telemetry.manager import TelemetryManager
|
||||
|
||||
telemetry_manager = TelemetryManager.get_instance()
|
||||
if not telemetry_manager.enabled:
|
||||
return
|
||||
|
||||
properties = {
|
||||
"kernel_count": len(configs),
|
||||
"kernels": configs,
|
||||
}
|
||||
properties.update(_get_gpu_info())
|
||||
properties.update(_get_smem_capacity())
|
||||
|
||||
telemetry_manager.send_event(
|
||||
event_type="scattermoe-autotune",
|
||||
properties=properties,
|
||||
)
|
||||
|
||||
LOG.info(
|
||||
"Reported %d scattermoe kernel autotune config(s) to telemetry.",
|
||||
len(configs),
|
||||
)
|
||||
114
src/axolotl/integrations/kernels/autotune_collector.py
Normal file
114
src/axolotl/integrations/kernels/autotune_collector.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""Collect Triton autotune results from scattermoe-lora kernels.
|
||||
|
||||
This module reads the ``.cache`` attribute from Triton ``@triton.autotune``
|
||||
decorated kernel objects and returns structured dicts describing the selected
|
||||
configurations. It has **no** telemetry dependency — callers decide what to
|
||||
do with the data.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from types import ModuleType
|
||||
from typing import Any
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# (human-readable name, attribute on the lora_ops module)
|
||||
_KERNEL_REGISTRY: list[tuple[str, str]] = [
|
||||
("scatter2scatter_lora_fwd", "_scatter2scatter_lora"),
|
||||
("scatter2scatter_lora_dX", "_scatter2scatter_lora_dX"),
|
||||
("group_bwd_lora", "_group_bwd_lora"),
|
||||
("group_bwd_lora_fused", "_group_bwd_lora_fused"),
|
||||
]
|
||||
|
||||
# The autotune key declared on every kernel: key=["M", "N", "K"]
|
||||
_KEY_NAMES: list[str] = ["M", "N", "K"]
|
||||
|
||||
|
||||
def _parse_key_tuple(key_tuple: tuple) -> dict[str, Any]:
|
||||
"""Turn the autotune cache key tuple into a labelled dict.
|
||||
|
||||
Triton builds the cache key from the values of the declared ``key``
|
||||
args (``M``, ``N``, ``K``) followed by dtype signature elements.
|
||||
We label the first three and store the rest under ``_extra``.
|
||||
"""
|
||||
result: dict[str, Any] = {}
|
||||
for i, name in enumerate(_KEY_NAMES):
|
||||
if i < len(key_tuple):
|
||||
result[name] = key_tuple[i]
|
||||
if len(key_tuple) > len(_KEY_NAMES):
|
||||
result["_extra"] = [str(v) for v in key_tuple[len(_KEY_NAMES) :]]
|
||||
return result
|
||||
|
||||
|
||||
def _find_lora_ops_module() -> ModuleType | None:
|
||||
"""Locate the *runtime* ``lora_ops`` module in ``sys.modules``.
|
||||
|
||||
The HF ``kernels`` package loads ``scattermoe_lora`` via
|
||||
``import_from_path`` which registers it in ``sys.modules`` under a
|
||||
hash-suffixed name (e.g. ``scattermoe_lora_a1b2c3d4``). A normal
|
||||
import (``from axolotl.integrations.kernels...``) would create a
|
||||
*separate* module instance whose kernel objects have empty
|
||||
``.cache`` dicts because autotuning ran on the runtime copy.
|
||||
|
||||
We search ``sys.modules`` for any module whose name contains
|
||||
``lora_ops`` and that has the ``_scatter2scatter_lora`` kernel
|
||||
attribute — that is the runtime copy with populated caches.
|
||||
"""
|
||||
for name, module in list(sys.modules.items()):
|
||||
if (
|
||||
module is not None
|
||||
and "lora_ops" in name
|
||||
and hasattr(module, "_scatter2scatter_lora")
|
||||
):
|
||||
return module
|
||||
return None
|
||||
|
||||
|
||||
def collect_autotune_configs() -> list[dict[str, Any]]:
|
||||
"""Read autotune caches from the four scattermoe-lora kernels.
|
||||
|
||||
Returns a (possibly empty) list of dicts, each containing:
|
||||
|
||||
* ``kernel`` – human-readable kernel name
|
||||
* ``key`` – dict with the ``M``/``N``/``K`` problem dimensions
|
||||
* ``config`` – dict with the selected tile sizes, ``num_warps``,
|
||||
and ``num_stages``
|
||||
|
||||
Returns ``[]`` if the kernel module cannot be found or if no
|
||||
autotune cache entries exist yet.
|
||||
"""
|
||||
lora_ops = _find_lora_ops_module()
|
||||
if lora_ops is None:
|
||||
LOG.debug(
|
||||
"lora_ops module not found in sys.modules; skipping autotune collection"
|
||||
)
|
||||
return []
|
||||
|
||||
results: list[dict[str, Any]] = []
|
||||
|
||||
for friendly_name, attr_name in _KERNEL_REGISTRY:
|
||||
kernel_fn = getattr(lora_ops, attr_name, None)
|
||||
if kernel_fn is None:
|
||||
continue
|
||||
|
||||
cache = getattr(kernel_fn, "cache", None)
|
||||
if not cache:
|
||||
continue
|
||||
|
||||
for key_tuple, config in cache.items():
|
||||
config_dict = dict(config.kwargs)
|
||||
config_dict["num_warps"] = config.num_warps
|
||||
config_dict["num_stages"] = config.num_stages
|
||||
if getattr(config, "num_ctas", None) is not None:
|
||||
config_dict["num_ctas"] = config.num_ctas
|
||||
|
||||
results.append(
|
||||
{
|
||||
"kernel": friendly_name,
|
||||
"key": _parse_key_tuple(key_tuple),
|
||||
"config": config_dict,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
70
src/axolotl/integrations/kernels/constants.py
Normal file
70
src/axolotl/integrations/kernels/constants.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
Supported MoE block mappings for kernel integrations.
|
||||
|
||||
Maps model_type to the SparseMoeBlock class name(s) in transformers.
|
||||
Used by both ScatterMoE and SonicMoE kernel paths.
|
||||
|
||||
Values can be a single class name (str) or a list of class names for models
|
||||
with multiple MoE block types (e.g. qwen3_omni_moe has Thinker + Talker).
|
||||
"""
|
||||
|
||||
import importlib
|
||||
|
||||
SPARSE_MOE_BLOCK = {
|
||||
# softmax -> topk routing
|
||||
"qwen2_moe": "Qwen2MoeSparseMoeBlock",
|
||||
"qwen3_moe": "Qwen3MoeSparseMoeBlock",
|
||||
"qwen3_5_moe": "Qwen3_5MoeSparseMoeBlock",
|
||||
"qwen3_next": "Qwen3NextSparseMoeBlock",
|
||||
"qwen3_vl_moe": "Qwen3VLMoeTextSparseMoeBlock",
|
||||
# qwen3_omni_moe: Thinker (standard) + Talker (shared experts + shared_expert_gate)
|
||||
"qwen3_omni_moe": [
|
||||
"Qwen3OmniMoeThinkerTextSparseMoeBlock",
|
||||
"Qwen3OmniMoeTalkerTextSparseMoeBlock",
|
||||
],
|
||||
"olmoe": "OlmoeSparseMoeBlock",
|
||||
"mixtral": "MixtralSparseMoeBlock",
|
||||
"minimax": "MiniMaxSparseMoeBlock",
|
||||
# softmax -> topk routing (with group-based expert selection)
|
||||
"mistral4": "Mistral4MoE",
|
||||
# sigmoid -> topk routing (with group-based expert selection)
|
||||
"glm_moe_dsa": "GlmMoeDsaMoE",
|
||||
"deepseek_v3": "DeepseekV3MoE",
|
||||
"glm4_moe": "Glm4MoeMoE",
|
||||
"glm4_moe_lite": "Glm4MoeLiteMoE",
|
||||
"glm4v_moe": "Glm4vMoeTextMoE",
|
||||
# sigmoid -> topk routing (no group selection)
|
||||
"minimax_m2": "MiniMaxM2SparseMoeBlock",
|
||||
# Models below need custom routing (not yet implemented):
|
||||
# "ernie4_5_moe": "Ernie4_5_MoeSparseMoeBlock", # softmax->topk, e_score_correction_bias between softmax and topk
|
||||
# "deepseek_v2": "DeepseekV2Moe", # softmax->topk, group_limited_greedy, different attr names (num_group)
|
||||
# "hunyuan_v1_moe": "HunYuanMoEV1Moe", # softmax->topk, gate.wg (not gate.weight), scatter routing
|
||||
# "gpt_oss": "GptOssMLP", # topk->softmax, transposed layout [E,H,2*I], custom GLU, expert biases
|
||||
}
|
||||
|
||||
|
||||
def resolve_moe_block_classes(model_type: str):
|
||||
"""Resolve all MoE block classes from transformers for the given model type.
|
||||
|
||||
Returns a list of classes (one for most models, multiple for models with
|
||||
distinct MoE block types like qwen3_omni_moe).
|
||||
"""
|
||||
entry = SPARSE_MOE_BLOCK.get(model_type)
|
||||
if entry is None:
|
||||
raise ValueError(
|
||||
f"Unsupported MoE model type '{model_type}'. "
|
||||
f"Supported types: {list(SPARSE_MOE_BLOCK.keys())}"
|
||||
)
|
||||
|
||||
cls_names = entry if isinstance(entry, list) else [entry]
|
||||
module_path = f"transformers.models.{model_type}.modeling_{model_type}"
|
||||
module = importlib.import_module(module_path)
|
||||
|
||||
classes = []
|
||||
for cls_name in cls_names:
|
||||
moe_cls = getattr(module, cls_name, None)
|
||||
if moe_cls is None:
|
||||
raise ValueError(f"Could not find class '{cls_name}' in '{module_path}'")
|
||||
classes.append(moe_cls)
|
||||
|
||||
return classes
|
||||
@@ -195,6 +195,30 @@ def _estimate_smem_usage(
|
||||
_SMEM_SLACK = 10_000
|
||||
|
||||
|
||||
def _estimate_register_pressure(
|
||||
num_warps: int,
|
||||
*tile_sizes: tuple[int, int],
|
||||
) -> float:
|
||||
"""Estimate per-thread register count from live tile sizes.
|
||||
|
||||
Each tile of shape (rows, cols) requires rows*cols elements distributed
|
||||
across 32 threads per warp, but each thread in the warp holds a fragment.
|
||||
For Triton GEMM-style kernels, the register footprint per thread is
|
||||
approximately sum(rows * cols) / 32 for each live tile, plus ~40 for
|
||||
scalar overhead (loop counters, pointers, masks, etc.).
|
||||
|
||||
Returns estimated registers per thread.
|
||||
"""
|
||||
# Each thread in a warp holds 1/32 of the tile elements
|
||||
tile_regs = sum(r * c for r, c in tile_sizes) / 32
|
||||
scalar_overhead = 40
|
||||
return tile_regs + scalar_overhead
|
||||
|
||||
|
||||
# Maximum registers per thread on NVIDIA GPUs
|
||||
_MAX_REGS_PER_THREAD = 255
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Forward Kernel: scatter2scatter with fused LoRA
|
||||
# =============================================================================
|
||||
@@ -313,12 +337,11 @@ def _compute_expert_block_lora(
|
||||
B_blk_ptrs, mask=N_mask[:, None] & R_mask[None, :], other=0.0
|
||||
) # [BLOCK_N, BLOCK_R]
|
||||
|
||||
# Cast xa_acc and b to same dtype for tl.dot (required when input is bf16/fp16)
|
||||
# Both operands must match; cast to float32 (accumulator type) for precision.
|
||||
b_f32 = b.to(tl.float32)
|
||||
# tl.dot requires non-float32 inputs (tensor cores); cast back to input dtype
|
||||
b_inp = b.to(INPUT_DTYPE)
|
||||
|
||||
# (X @ A^T) @ B^T: [M, R] @ [R, N] -> [M, N]
|
||||
lora_out = tl.dot(xa_acc, tl.trans(b_f32), allow_tf32=allow_tf32)
|
||||
lora_out = tl.dot(xa_acc.to(INPUT_DTYPE), tl.trans(b_inp), allow_tf32=allow_tf32)
|
||||
|
||||
acc += scaling * lora_out
|
||||
return acc
|
||||
@@ -327,20 +350,21 @@ def _compute_expert_block_lora(
|
||||
def _scatter2scatter_lora_configs():
|
||||
"""Generate forward kernel autotune configs.
|
||||
|
||||
Search space includes smaller tile sizes and fewer pipeline stages to
|
||||
support GPUs with limited shared memory (e.g. ~99KB on some GPUs).
|
||||
Search space includes BLOCK_M to allow trading token-tile size for
|
||||
larger BLOCK_K/BLOCK_N tiles. On GPUs with ~99KB SMEM, BLOCK_M=128
|
||||
forces BLOCK_K=32 and BLOCK_N=32; BLOCK_M=64 allows BLOCK_K=128
|
||||
(4× fewer inner-loop iterations).
|
||||
|
||||
Search space:
|
||||
BLOCK_M: {32, 64, 128}
|
||||
BLOCK_N: {32, 64, 128, 256}
|
||||
BLOCK_K: {32, 64, 128}
|
||||
num_warps: {4, 8}
|
||||
num_stages: {3, 4, 5}
|
||||
|
||||
BLOCK_M is fixed at 128 (module-level constant, not autotuned in the
|
||||
scatter2scatter pattern).
|
||||
"""
|
||||
configs = []
|
||||
for block_n, block_k, warps, stages in product(
|
||||
for block_m, block_n, block_k, warps, stages in product(
|
||||
[32, 64, 128], # BLOCK_M
|
||||
[32, 64, 128, 256], # BLOCK_N
|
||||
[32, 64, 128], # BLOCK_K
|
||||
[4, 8], # num_warps
|
||||
@@ -348,7 +372,7 @@ def _scatter2scatter_lora_configs():
|
||||
):
|
||||
configs.append(
|
||||
triton.Config(
|
||||
{"BLOCK_N": block_n, "BLOCK_K": block_k},
|
||||
{"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k},
|
||||
num_stages=stages,
|
||||
num_warps=warps,
|
||||
)
|
||||
@@ -357,7 +381,7 @@ def _scatter2scatter_lora_configs():
|
||||
|
||||
|
||||
def _prune_fwd_configs(configs, named_args, **kwargs):
|
||||
"""Prune forward configs based on SMEM capacity.
|
||||
"""Prune forward configs based on SMEM capacity and register pressure.
|
||||
|
||||
The forward kernel inner loop loads three tiles per pipeline stage:
|
||||
X[BLOCK_M, BLOCK_K], W[BLOCK_K, BLOCK_N], A[BLOCK_R, BLOCK_K].
|
||||
@@ -373,23 +397,49 @@ def _prune_fwd_configs(configs, named_args, **kwargs):
|
||||
|
||||
scored = []
|
||||
for config in configs:
|
||||
block_m = config.kwargs["BLOCK_M"]
|
||||
block_n = config.kwargs["BLOCK_N"]
|
||||
block_k = config.kwargs["BLOCK_K"]
|
||||
# Base: stages * BLOCK_K * (BLOCK_M + BLOCK_N) + BLOCK_M * BLOCK_N
|
||||
smem_base = _estimate_smem_usage(config.num_stages, BLOCK_M, block_n, block_k)
|
||||
smem_base = _estimate_smem_usage(config.num_stages, block_m, block_n, block_k)
|
||||
# A tile [BLOCK_R, BLOCK_K] loaded per stage in the inner loop
|
||||
smem_lora_loop = config.num_stages * block_r * block_k * 2
|
||||
# B tile [BLOCK_N, BLOCK_R] loaded once in epilogue
|
||||
smem_lora_epilogue = block_n * block_r * 2
|
||||
smem = smem_base + smem_lora_loop + smem_lora_epilogue
|
||||
|
||||
# Register pressure: live tiles are acc[M,N], xa_acc[M,R],
|
||||
# x[M,K], w[K,N], a[R,K], plus epilogue b[N,R]
|
||||
est_regs = _estimate_register_pressure(
|
||||
config.num_warps,
|
||||
(block_m, block_n), # acc
|
||||
(block_m, block_r), # xa_acc
|
||||
(block_m, block_k), # x tile
|
||||
(block_k, block_n), # w tile
|
||||
(block_r, block_k), # a tile
|
||||
(block_n, block_r), # b tile (epilogue)
|
||||
)
|
||||
if est_regs > _MAX_REGS_PER_THREAD:
|
||||
continue
|
||||
|
||||
scored.append((smem, config))
|
||||
|
||||
pruned = [c for s, c in scored if s <= smem_cap - _SMEM_SLACK]
|
||||
if pruned:
|
||||
return pruned
|
||||
# All configs exceed SMEM — return the one with smallest estimated usage
|
||||
scored.sort(key=lambda x: x[0])
|
||||
return [scored[0][1]]
|
||||
if scored:
|
||||
# All surviving configs exceed SMEM — return the one with smallest usage
|
||||
scored.sort(key=lambda x: x[0])
|
||||
return [scored[0][1]]
|
||||
# All configs pruned by register pressure — fall back to smallest tiles
|
||||
return [
|
||||
min(
|
||||
configs,
|
||||
key=lambda c: (
|
||||
c.kwargs["BLOCK_M"] * c.kwargs["BLOCK_N"] * c.kwargs["BLOCK_K"]
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@triton.autotune(
|
||||
@@ -531,6 +581,89 @@ def _scatter2scatter_lora(
|
||||
tl.store(Y_blk_ptrs, acc, mask=M_boundary_mask[:, None] & N_mask[None, :])
|
||||
|
||||
|
||||
def _scatter2scatter_lora_split(
|
||||
X: torch.Tensor,
|
||||
W: torch.Tensor,
|
||||
sorted_expert_idxs: torch.Tensor,
|
||||
sorted_scattered_idxs: torch.Tensor,
|
||||
k: int,
|
||||
lora_A: torch.Tensor,
|
||||
lora_B: torch.Tensor,
|
||||
scaling: float,
|
||||
b: Optional[torch.Tensor] = None,
|
||||
x_grouped: bool = False,
|
||||
y_grouped: bool = False,
|
||||
out: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
"""Split base+LoRA forward: 3 scatter2scatter calls, no fused LoRA kernel.
|
||||
|
||||
Faster for models with few large experts (e.g. Mixtral E=8, I=14336)
|
||||
because the base kernel runs at full speed without LoRA SMEM overhead,
|
||||
and the LoRA matmuls (R=16) are tiny separate passes.
|
||||
|
||||
Y = scatter(X, W) + scaling * scatter(scatter(X, A^T), B^T)
|
||||
"""
|
||||
from axolotl.integrations.kernels.libs.scattermoe_lora.kernels.ops import (
|
||||
scatter2scatter,
|
||||
)
|
||||
|
||||
E = W.size(0)
|
||||
R = lora_A.size(0) // E
|
||||
K = W.size(1)
|
||||
N = W.size(2)
|
||||
|
||||
# 1. Base: Y_base = X @ W (uses base kernel with optimal tile sizes)
|
||||
output = scatter2scatter(
|
||||
X=X,
|
||||
W=W,
|
||||
b=b,
|
||||
sorted_expert_idxs=sorted_expert_idxs,
|
||||
sorted_scattered_idxs=sorted_scattered_idxs,
|
||||
k=k,
|
||||
x_grouped=x_grouped,
|
||||
y_grouped=y_grouped,
|
||||
out=out,
|
||||
)
|
||||
|
||||
# 2. XA = X @ A^T (tiny: output is [M*k, R])
|
||||
# Reshape A: [R*E, K] → [E, K, R] (expert weights for scatter2scatter)
|
||||
W_A = lora_A.reshape(E, R, K).permute(0, 2, 1).contiguous()
|
||||
XA = scatter2scatter(
|
||||
X=X,
|
||||
W=W_A,
|
||||
sorted_expert_idxs=sorted_expert_idxs,
|
||||
sorted_scattered_idxs=sorted_scattered_idxs,
|
||||
k=k,
|
||||
x_grouped=x_grouped,
|
||||
y_grouped=True,
|
||||
)
|
||||
|
||||
# 3. Y_lora = XA @ B^T (R is tiny, so this is very fast)
|
||||
# Reshape B: [N, R*E] → [E, R, N]
|
||||
W_B = lora_B.T.reshape(E, R, N).contiguous()
|
||||
Y_lora = scatter2scatter(
|
||||
X=XA,
|
||||
W=W_B,
|
||||
sorted_expert_idxs=sorted_expert_idxs,
|
||||
sorted_scattered_idxs=sorted_scattered_idxs,
|
||||
k=1,
|
||||
x_grouped=True,
|
||||
y_grouped=y_grouped,
|
||||
)
|
||||
|
||||
# 4. Y = Y_base + scaling * Y_lora
|
||||
output.add_(Y_lora, alpha=scaling)
|
||||
return output
|
||||
|
||||
|
||||
# Threshold for switching from fused to split LoRA forward.
|
||||
# Split wins when per-expert matmul is large (bandwidth-bound LoRA tile
|
||||
# loads dominate in the fused kernel's inner loop).
|
||||
# Empirically: split wins for E<=32 with K*N > 20M (e.g. Mixtral, Phi-MoE).
|
||||
_SPLIT_LORA_FWD_THRESHOLD = 20_000_000 # per-expert K*N
|
||||
_SPLIT_LORA_FWD_MAX_EXPERTS = 32
|
||||
|
||||
|
||||
def scatter2scatter_lora(
|
||||
X: torch.Tensor,
|
||||
W: torch.Tensor,
|
||||
@@ -546,7 +679,13 @@ def scatter2scatter_lora(
|
||||
out: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Fused scatter2scatter with LoRA: Y[i] = X[i] @ W[e] + scaling * (X[i] @ A[e]^T) @ B[e]^T + b[e]
|
||||
Scatter2scatter with LoRA: Y[i] = X[i] @ W[e] + scaling * (X[i] @ A[e]^T) @ B[e]^T + b[e]
|
||||
|
||||
Automatically selects between:
|
||||
- Fused kernel: single Triton kernel with LoRA in the inner loop.
|
||||
Best for many small experts (E>=64, small K*N).
|
||||
- Split dispatch: 3 separate scatter2scatter calls (base + XA + lora).
|
||||
Best for few large experts (E<=32, large K*N like Mixtral).
|
||||
|
||||
Args:
|
||||
X: Input [M, K] or [M*k, K] if x_grouped
|
||||
@@ -565,12 +704,30 @@ def scatter2scatter_lora(
|
||||
Returns:
|
||||
Y: Output [M*k, N]
|
||||
"""
|
||||
assert sorted_scattered_idxs.size(0) == sorted_expert_idxs.size(0)
|
||||
assert sorted_scattered_idxs.size(0) == X.size(0) * k
|
||||
|
||||
E = W.size(0)
|
||||
K = W.size(1)
|
||||
N = W.size(2)
|
||||
|
||||
# Dispatch: split for few large experts, fused for many small experts
|
||||
if E <= _SPLIT_LORA_FWD_MAX_EXPERTS and K * N >= _SPLIT_LORA_FWD_THRESHOLD:
|
||||
return _scatter2scatter_lora_split(
|
||||
X,
|
||||
W,
|
||||
sorted_expert_idxs,
|
||||
sorted_scattered_idxs,
|
||||
k,
|
||||
lora_A,
|
||||
lora_B,
|
||||
scaling,
|
||||
b,
|
||||
x_grouped,
|
||||
y_grouped,
|
||||
out,
|
||||
)
|
||||
|
||||
assert sorted_scattered_idxs.size(0) == sorted_expert_idxs.size(0)
|
||||
assert sorted_scattered_idxs.size(0) == X.size(0) * k
|
||||
|
||||
R = lora_A.size(0) // E
|
||||
|
||||
# Pad R to power of 2 for Triton tile size
|
||||
@@ -610,11 +767,9 @@ def scatter2scatter_lora(
|
||||
b_ptr,
|
||||
stride_be,
|
||||
stride_bn,
|
||||
# A: [r*E, K] -> stride(0) is r*E dim stride, stride(1) is K dim stride
|
||||
lora_A,
|
||||
lora_A.stride(0),
|
||||
lora_A.stride(1),
|
||||
# B: [N, r*E] -> stride(0) is N dim stride, stride(1) is r*E dim stride
|
||||
lora_B,
|
||||
lora_B.stride(0),
|
||||
lora_B.stride(1),
|
||||
@@ -625,9 +780,8 @@ def scatter2scatter_lora(
|
||||
K=K,
|
||||
N=N,
|
||||
E=E,
|
||||
ACTUAL_R=R, # True LoRA rank for weight indexing
|
||||
BLOCK_M=BLOCK_M,
|
||||
BLOCK_R=BLOCK_R, # Padded tile size >= max(R, 16)
|
||||
ACTUAL_R=R,
|
||||
BLOCK_R=BLOCK_R,
|
||||
ACC_TYPE=tl.float32,
|
||||
scaling=scaling,
|
||||
allow_tf32=ALLOW_TF32,
|
||||
@@ -761,13 +915,13 @@ def _compute_expert_block_lora_dX(
|
||||
+ (A_expert_offset + R_block)[:, None] * stride_ar
|
||||
+ K_block[None, :] * stride_ak
|
||||
)
|
||||
a_e = tl.load(A_blk_ptrs, mask=R_mask[:, None] & K_mask[None, :], other=0.0)
|
||||
|
||||
# Cast to float32 for precision
|
||||
a_f32 = a_e.to(tl.float32)
|
||||
a_e = tl.load(A_blk_ptrs, mask=R_mask[:, None] & K_mask[None, :], other=0.0).to(
|
||||
INPUT_DTYPE
|
||||
)
|
||||
|
||||
# (DY @ B) @ A: [M, R] @ [R, K] -> [M, K]
|
||||
lora_dx = tl.dot(dy_b_acc, a_f32, allow_tf32=allow_tf32)
|
||||
# tl.dot requires non-float32 inputs (tensor cores); cast accumulator back to input dtype
|
||||
lora_dx = tl.dot(dy_b_acc.to(INPUT_DTYPE), a_e, allow_tf32=allow_tf32)
|
||||
|
||||
acc += scaling * lora_dx
|
||||
return acc
|
||||
@@ -779,17 +933,18 @@ def _scatter2scatter_lora_dX_configs():
|
||||
The inner loop is over N (not K as in forward). The output dimension is K.
|
||||
So BLOCK_K tiles the output and BLOCK_N tiles the reduction.
|
||||
|
||||
Search space includes smaller tile sizes and fewer pipeline stages to
|
||||
support GPUs with limited shared memory (e.g. ~99KB on some GPUs).
|
||||
BLOCK_M is now autotunable (was fixed at 128).
|
||||
|
||||
Search space:
|
||||
BLOCK_M: {32, 64, 128} (token tile)
|
||||
BLOCK_K: {32, 64, 128, 256} (output tile)
|
||||
BLOCK_N: {32, 64, 128, 256} (reduction tile)
|
||||
num_warps: {4, 8}
|
||||
num_stages: {3, 4, 5}
|
||||
"""
|
||||
configs = []
|
||||
for block_k, block_n, warps, stages in product(
|
||||
for block_m, block_k, block_n, warps, stages in product(
|
||||
[32, 64, 128], # BLOCK_M
|
||||
[32, 64, 128, 256], # BLOCK_K (output dimension)
|
||||
[32, 64, 128, 256], # BLOCK_N (reduction dimension)
|
||||
[4, 8], # num_warps
|
||||
@@ -797,7 +952,7 @@ def _scatter2scatter_lora_dX_configs():
|
||||
):
|
||||
configs.append(
|
||||
triton.Config(
|
||||
{"BLOCK_K": block_k, "BLOCK_N": block_n},
|
||||
{"BLOCK_M": block_m, "BLOCK_K": block_k, "BLOCK_N": block_n},
|
||||
num_stages=stages,
|
||||
num_warps=warps,
|
||||
)
|
||||
@@ -806,7 +961,7 @@ def _scatter2scatter_lora_dX_configs():
|
||||
|
||||
|
||||
def _prune_dX_configs(configs, named_args, **kwargs):
|
||||
"""Prune backward dX configs based on SMEM capacity.
|
||||
"""Prune backward dX configs based on SMEM capacity and register pressure.
|
||||
|
||||
The dX kernel inner loop loads three tiles per pipeline stage:
|
||||
DY[BLOCK_M, BLOCK_N], W^T[BLOCK_N, BLOCK_K], B[BLOCK_N, BLOCK_R].
|
||||
@@ -822,23 +977,49 @@ def _prune_dX_configs(configs, named_args, **kwargs):
|
||||
|
||||
scored = []
|
||||
for config in configs:
|
||||
block_m = config.kwargs["BLOCK_M"]
|
||||
block_k = config.kwargs["BLOCK_K"]
|
||||
block_n = config.kwargs["BLOCK_N"]
|
||||
# Base: stages * BLOCK_N * (BLOCK_M + BLOCK_K) + BLOCK_M * BLOCK_K
|
||||
smem_base = _estimate_smem_usage(config.num_stages, BLOCK_M, block_k, block_n)
|
||||
smem_base = _estimate_smem_usage(config.num_stages, block_m, block_k, block_n)
|
||||
# B tile [BLOCK_N, BLOCK_R] loaded per stage in the inner loop
|
||||
smem_lora_loop = config.num_stages * block_n * block_r * 2
|
||||
# A tile [BLOCK_R, BLOCK_K] loaded once in epilogue
|
||||
smem_lora_epilogue = block_r * block_k * 2
|
||||
smem = smem_base + smem_lora_loop + smem_lora_epilogue
|
||||
|
||||
# Register pressure: live tiles are acc[M,K], dy_b_acc[M,R],
|
||||
# dy[M,N], wt[N,K], b[N,R], plus epilogue a[R,K]
|
||||
est_regs = _estimate_register_pressure(
|
||||
config.num_warps,
|
||||
(block_m, block_k), # acc
|
||||
(block_m, block_r), # dy_b_acc
|
||||
(block_m, block_n), # dy tile
|
||||
(block_n, block_k), # wt tile
|
||||
(block_n, block_r), # b tile
|
||||
(block_r, block_k), # a tile (epilogue)
|
||||
)
|
||||
if est_regs > _MAX_REGS_PER_THREAD:
|
||||
continue
|
||||
|
||||
scored.append((smem, config))
|
||||
|
||||
pruned = [c for s, c in scored if s <= smem_cap - _SMEM_SLACK]
|
||||
if pruned:
|
||||
return pruned
|
||||
# All configs exceed SMEM — return the one with smallest estimated usage
|
||||
scored.sort(key=lambda x: x[0])
|
||||
return [scored[0][1]]
|
||||
if scored:
|
||||
# All surviving configs exceed SMEM — return the one with smallest usage
|
||||
scored.sort(key=lambda x: x[0])
|
||||
return [scored[0][1]]
|
||||
# All configs pruned by register pressure — fall back to smallest tiles
|
||||
return [
|
||||
min(
|
||||
configs,
|
||||
key=lambda c: (
|
||||
c.kwargs["BLOCK_M"] * c.kwargs["BLOCK_K"] * c.kwargs["BLOCK_N"]
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@triton.autotune(
|
||||
@@ -1067,7 +1248,7 @@ def scatter2scatter_lora_dX(
|
||||
N=N,
|
||||
E=E,
|
||||
ACTUAL_R=R,
|
||||
BLOCK_M=BLOCK_M,
|
||||
# BLOCK_M is autotuned (injected by triton.autotune from Config kwargs)
|
||||
BLOCK_R=BLOCK_R,
|
||||
ACC_TYPE=tl.float32,
|
||||
scaling=scaling,
|
||||
@@ -1119,7 +1300,7 @@ def _group_bwd_lora_configs():
|
||||
|
||||
|
||||
def _prune_bwd_lora_configs(configs, named_args, **kwargs):
|
||||
"""Prune backward configs based on SMEM capacity.
|
||||
"""Prune backward configs based on SMEM capacity and register pressure.
|
||||
|
||||
The backward kernel loads X[BLOCK_M, BLOCK_K] and DY[BLOCK_M, BLOCK_N]
|
||||
in the inner loop, plus holds A[BLOCK_R, BLOCK_K] and B[BLOCK_N, BLOCK_R]
|
||||
@@ -1138,14 +1319,40 @@ def _prune_bwd_lora_configs(configs, named_args, **kwargs):
|
||||
# A[BLOCK_R, BLOCK_K] and B[BLOCK_N, BLOCK_R] held for the full expert
|
||||
smem_lora = (block_r * block_k + block_n * block_r) * 2
|
||||
smem = smem_base + smem_lora
|
||||
|
||||
# Register pressure: dA_acc[R,K], dB_acc[N,R], x[M,K], dy[M,N],
|
||||
# a[R,K], b[N,R], xa[M,R], dy_b[M,R]
|
||||
est_regs = _estimate_register_pressure(
|
||||
config.num_warps,
|
||||
(block_r, block_k), # dA_acc
|
||||
(block_n, block_r), # dB_acc
|
||||
(block_m, block_k), # x tile
|
||||
(block_m, block_n), # dy tile
|
||||
(block_r, block_k), # a tile
|
||||
(block_n, block_r), # b tile
|
||||
(block_m, block_r), # xa intermediate
|
||||
)
|
||||
if est_regs > _MAX_REGS_PER_THREAD:
|
||||
continue
|
||||
|
||||
scored.append((smem, config))
|
||||
|
||||
pruned = [c for s, c in scored if s <= smem_cap - _SMEM_SLACK]
|
||||
if pruned:
|
||||
return pruned
|
||||
# All configs exceed SMEM — return the one with smallest estimated usage
|
||||
scored.sort(key=lambda x: x[0])
|
||||
return [scored[0][1]]
|
||||
if scored:
|
||||
# All surviving configs exceed SMEM — return the one with smallest usage
|
||||
scored.sort(key=lambda x: x[0])
|
||||
return [scored[0][1]]
|
||||
# All configs pruned by register pressure — fall back to smallest tiles
|
||||
return [
|
||||
min(
|
||||
configs,
|
||||
key=lambda c: (
|
||||
c.kwargs["BLOCK_M"] * c.kwargs["BLOCK_K"] * c.kwargs["BLOCK_N"]
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@triton.autotune(
|
||||
@@ -1330,6 +1537,279 @@ def _group_bwd_lora(
|
||||
)
|
||||
|
||||
|
||||
def _group_bwd_split_configs():
|
||||
"""Autotune configs for split dA/dB kernels."""
|
||||
configs = []
|
||||
for block_m, block_dim, warps, stages in product(
|
||||
[32, 64, 128], # BLOCK_M (token tile)
|
||||
[32, 64, 128, 256], # BLOCK_DIM (K for dA, N for dB — output tile)
|
||||
[4, 8], # num_warps
|
||||
[3, 4, 5], # num_stages
|
||||
):
|
||||
configs.append(
|
||||
triton.Config(
|
||||
{"BLOCK_M": block_m, "BLOCK_DIM": block_dim},
|
||||
num_stages=stages,
|
||||
num_warps=warps,
|
||||
)
|
||||
)
|
||||
return configs
|
||||
|
||||
|
||||
def _prune_split_configs(configs, named_args, **kwargs):
|
||||
"""Prune split kernel configs based on SMEM capacity and register pressure."""
|
||||
smem_cap = _get_smem_capacity()
|
||||
block_r = named_args.get("BLOCK_R", 64)
|
||||
|
||||
# Fixed inner tile for reduction dimension
|
||||
BLOCK_INNER = 64
|
||||
|
||||
pruned = []
|
||||
for config in configs:
|
||||
block_m = config.kwargs["BLOCK_M"]
|
||||
block_dim = config.kwargs["BLOCK_DIM"]
|
||||
# Inner loop loads: input[M, INNER] and other[M, INNER_or_DIM]
|
||||
smem = config.num_stages * BLOCK_INNER * (block_m + block_dim) * 2
|
||||
# LoRA weights held in registers: [INNER, R] or [R, DIM]
|
||||
smem += (block_r * max(block_dim, BLOCK_INNER)) * 2
|
||||
|
||||
# Register pressure check
|
||||
est_regs = _estimate_register_pressure(
|
||||
config.num_warps,
|
||||
(block_r, block_dim), # acc
|
||||
(block_m, BLOCK_INNER), # input tile
|
||||
(block_m, block_dim), # other tile
|
||||
(block_r, BLOCK_INNER), # lora weight
|
||||
)
|
||||
if est_regs > _MAX_REGS_PER_THREAD:
|
||||
continue
|
||||
|
||||
if smem <= smem_cap - _SMEM_SLACK:
|
||||
pruned.append(config)
|
||||
|
||||
if pruned:
|
||||
return pruned
|
||||
configs.sort(key=lambda c: c.kwargs["BLOCK_M"] * c.kwargs["BLOCK_DIM"])
|
||||
return [configs[0]]
|
||||
|
||||
|
||||
@triton.autotune(
|
||||
configs=_group_bwd_split_configs(),
|
||||
key=["M", "K", "N"],
|
||||
prune_configs_by={"early_config_prune": _prune_split_configs},
|
||||
)
|
||||
@triton.heuristics(
|
||||
{
|
||||
"NO_DIM_MASK": lambda args: (
|
||||
(args["K"] % args["BLOCK_DIM"]) == 0
|
||||
if args["COMPUTE_DA"]
|
||||
else (args["N"] % args["BLOCK_DIM"]) == 0
|
||||
),
|
||||
}
|
||||
)
|
||||
@triton.jit
|
||||
def _group_bwd_lora_split(
|
||||
# Data tensors (DY and X are always present)
|
||||
DY_ptr,
|
||||
stride_dym,
|
||||
stride_dyn,
|
||||
X_ptr,
|
||||
stride_xm,
|
||||
stride_xk,
|
||||
# LoRA weight for the inner reduction (B for dA, A for dB)
|
||||
LW_ptr,
|
||||
stride_lw0,
|
||||
stride_lw1,
|
||||
# Output gradient tensor (dA or dB)
|
||||
OUT_ptr,
|
||||
stride_out0,
|
||||
stride_out1,
|
||||
# Expert offsets
|
||||
expert_offsets_ptr,
|
||||
# Dimensions
|
||||
M,
|
||||
K: tl.constexpr,
|
||||
N: tl.constexpr,
|
||||
ACTUAL_R: tl.constexpr,
|
||||
BLOCK_R: tl.constexpr,
|
||||
INNER_DIM: tl.constexpr, # reduction dimension (N for dA, K for dB)
|
||||
scaling,
|
||||
# Mode flag
|
||||
COMPUTE_DA: tl.constexpr, # True = compute dA, False = compute dB
|
||||
# Tile sizes
|
||||
BLOCK_M: tl.constexpr,
|
||||
BLOCK_DIM: tl.constexpr,
|
||||
ACC_TYPE: tl.constexpr,
|
||||
allow_tf32: tl.constexpr,
|
||||
NO_DIM_MASK: tl.constexpr,
|
||||
):
|
||||
"""
|
||||
Unified split kernel for LoRA gradient computation.
|
||||
|
||||
When COMPUTE_DA=True:
|
||||
dA[e] = scaling * (dY @ B[e])^T @ X → [R, K]
|
||||
Grid: (E, cdiv(K, BLOCK_DIM))
|
||||
- outer_ptr/stride = X (read [M, K_block])
|
||||
- inner reduction over N using DY and B
|
||||
- output shape [BLOCK_R, BLOCK_DIM]
|
||||
|
||||
When COMPUTE_DA=False:
|
||||
dB[e] = scaling * dY^T @ (X @ A[e]^T) → [N, R]
|
||||
Grid: (E, cdiv(N, BLOCK_DIM))
|
||||
- outer_ptr/stride = DY (read [M, N_block])
|
||||
- inner reduction over K using X and A
|
||||
- output shape [BLOCK_DIM, BLOCK_R]
|
||||
|
||||
No atomic adds — each (E, dim_block) pair is written by exactly one block.
|
||||
"""
|
||||
E_idx = tl.program_id(0)
|
||||
dim_block_id = tl.program_id(1)
|
||||
|
||||
if E_idx == 0:
|
||||
start_idx = 0
|
||||
else:
|
||||
start_idx = tl.load(expert_offsets_ptr + E_idx - 1).to(tl.int32)
|
||||
end_idx = tl.load(expert_offsets_ptr + E_idx).to(tl.int32)
|
||||
num_tokens = end_idx - start_idx
|
||||
|
||||
# Output dimension tile (K for dA, N for dB)
|
||||
if COMPUTE_DA:
|
||||
OUT_DIM: tl.constexpr = K # type: ignore[no-redef]
|
||||
else:
|
||||
OUT_DIM: tl.constexpr = N # type: ignore[no-redef]
|
||||
dim_block = dim_block_id * BLOCK_DIM + tl.arange(0, BLOCK_DIM)
|
||||
dim_mask = dim_block < OUT_DIM
|
||||
R_block = tl.arange(0, BLOCK_R)
|
||||
R_mask = R_block < ACTUAL_R
|
||||
lora_offset = E_idx * ACTUAL_R
|
||||
|
||||
# Output pointers — layout differs: dA is [R, K], dB is [N, R]
|
||||
if COMPUTE_DA:
|
||||
out_blk_ptrs = (
|
||||
OUT_ptr
|
||||
+ (lora_offset + R_block)[:, None] * stride_out0
|
||||
+ dim_block[None, :] * stride_out1
|
||||
)
|
||||
out_mask = R_mask[:, None] & dim_mask[None, :]
|
||||
else:
|
||||
out_blk_ptrs = (
|
||||
OUT_ptr
|
||||
+ dim_block[:, None] * stride_out0
|
||||
+ (lora_offset + R_block)[None, :] * stride_out1
|
||||
)
|
||||
out_mask = dim_mask[:, None] & R_mask[None, :]
|
||||
|
||||
if num_tokens > 0:
|
||||
M_block = tl.arange(0, BLOCK_M)
|
||||
INPUT_DTYPE = X_ptr.dtype.element_ty
|
||||
BLOCK_INNER: tl.constexpr = 64
|
||||
inner_iters = tl.cdiv(INNER_DIM, BLOCK_INNER)
|
||||
|
||||
if COMPUTE_DA:
|
||||
acc = tl.zeros((BLOCK_R, BLOCK_DIM), dtype=ACC_TYPE)
|
||||
else:
|
||||
acc = tl.zeros((BLOCK_DIM, BLOCK_R), dtype=ACC_TYPE)
|
||||
|
||||
M_iters = tl.cdiv(num_tokens, BLOCK_M)
|
||||
for i in range(M_iters):
|
||||
M_idx = start_idx + i * BLOCK_M + M_block
|
||||
M_mask = M_idx < end_idx
|
||||
|
||||
if COMPUTE_DA:
|
||||
# Load X[M, K_block] (the "outer" tensor for dA)
|
||||
outer = tl.load(
|
||||
X_ptr + M_idx[:, None] * stride_xm + dim_block[None, :] * stride_xk,
|
||||
mask=M_mask[:, None] & dim_mask[None, :],
|
||||
other=0.0,
|
||||
).to(INPUT_DTYPE)
|
||||
|
||||
# Reduce DY[M, :] @ B[e][:, R] over N → [M, R]
|
||||
reduced = tl.zeros((BLOCK_M, BLOCK_R), dtype=ACC_TYPE)
|
||||
inner_range = tl.arange(0, BLOCK_INNER)
|
||||
for j in range(inner_iters):
|
||||
inn_off = j * BLOCK_INNER + inner_range
|
||||
inn_mask = inn_off < N
|
||||
|
||||
dy_tile = tl.load(
|
||||
DY_ptr
|
||||
+ M_idx[:, None] * stride_dym
|
||||
+ inn_off[None, :] * stride_dyn,
|
||||
mask=M_mask[:, None] & inn_mask[None, :],
|
||||
other=0.0,
|
||||
).to(INPUT_DTYPE)
|
||||
# B layout: [N, r*E] → stride_lw0=N stride, stride_lw1=r*E stride
|
||||
lw_tile = tl.load(
|
||||
LW_ptr
|
||||
+ inn_off[:, None] * stride_lw0
|
||||
+ (lora_offset + R_block)[None, :] * stride_lw1,
|
||||
mask=inn_mask[:, None] & R_mask[None, :],
|
||||
other=0.0,
|
||||
).to(INPUT_DTYPE)
|
||||
reduced += tl.dot(dy_tile, lw_tile, allow_tf32=allow_tf32)
|
||||
|
||||
# dA += (DY@B)^T @ X: [R, M] @ [M, K_block] → [R, K_block]
|
||||
acc += tl.dot(
|
||||
tl.trans(reduced.to(INPUT_DTYPE)), outer, allow_tf32=allow_tf32
|
||||
)
|
||||
else:
|
||||
# Load DY[M, N_block] (the "outer" tensor for dB)
|
||||
outer = tl.load(
|
||||
DY_ptr
|
||||
+ M_idx[:, None] * stride_dym
|
||||
+ dim_block[None, :] * stride_dyn,
|
||||
mask=M_mask[:, None] & dim_mask[None, :],
|
||||
other=0.0,
|
||||
).to(INPUT_DTYPE)
|
||||
|
||||
# Reduce X[M, :] @ A[e][:, :].T over K → [M, R]
|
||||
reduced = tl.zeros((BLOCK_M, BLOCK_R), dtype=ACC_TYPE)
|
||||
inner_range = tl.arange(0, BLOCK_INNER)
|
||||
for j in range(inner_iters):
|
||||
inn_off = j * BLOCK_INNER + inner_range
|
||||
inn_mask = inn_off < K
|
||||
|
||||
x_tile = tl.load(
|
||||
X_ptr
|
||||
+ M_idx[:, None] * stride_xm
|
||||
+ inn_off[None, :] * stride_xk,
|
||||
mask=M_mask[:, None] & inn_mask[None, :],
|
||||
other=0.0,
|
||||
).to(INPUT_DTYPE)
|
||||
# A layout: [r*E, K] → stride_lw0=r*E stride, stride_lw1=K stride
|
||||
# We want A[e]^T: [K, R], so load as [K_inner, R]
|
||||
lw_tile = tl.load(
|
||||
LW_ptr
|
||||
+ (lora_offset + R_block)[None, :] * stride_lw0
|
||||
+ inn_off[:, None] * stride_lw1,
|
||||
mask=inn_mask[:, None] & R_mask[None, :],
|
||||
other=0.0,
|
||||
).to(INPUT_DTYPE)
|
||||
reduced += tl.dot(x_tile, lw_tile, allow_tf32=allow_tf32)
|
||||
|
||||
# dB += DY^T @ (X@A^T): [N_block, M] @ [M, R] → [N_block, R]
|
||||
acc += tl.dot(
|
||||
tl.trans(outer), reduced.to(INPUT_DTYPE), allow_tf32=allow_tf32
|
||||
)
|
||||
|
||||
tl.store(
|
||||
out_blk_ptrs, (acc * scaling).to(OUT_ptr.dtype.element_ty), mask=out_mask
|
||||
)
|
||||
else:
|
||||
# Zero out this expert's slice — needed because output uses empty_like
|
||||
if COMPUTE_DA:
|
||||
tl.store(
|
||||
out_blk_ptrs,
|
||||
tl.zeros((BLOCK_R, BLOCK_DIM), dtype=OUT_ptr.dtype.element_ty),
|
||||
mask=out_mask,
|
||||
)
|
||||
else:
|
||||
tl.store(
|
||||
out_blk_ptrs,
|
||||
tl.zeros((BLOCK_DIM, BLOCK_R), dtype=OUT_ptr.dtype.element_ty),
|
||||
mask=out_mask,
|
||||
)
|
||||
|
||||
|
||||
def group_bwd_lora(
|
||||
DY: torch.Tensor,
|
||||
X: torch.Tensor,
|
||||
@@ -1344,6 +1824,9 @@ def group_bwd_lora(
|
||||
"""
|
||||
Compute LoRA gradients for A and B on expert-grouped data.
|
||||
|
||||
Uses split dA/dB kernels that eliminate atomic adds by giving each
|
||||
(expert, output_block) pair its own thread block.
|
||||
|
||||
Args:
|
||||
DY: Gradient w.r.t. output [M_total, N] (grouped by expert)
|
||||
X: Input [M_total, K] (grouped by expert)
|
||||
@@ -1361,19 +1844,46 @@ def group_bwd_lora(
|
||||
K = X.size(1)
|
||||
N = DY.size(1)
|
||||
|
||||
# Zero-init for atomic accumulation
|
||||
dA = torch.zeros_like(lora_A)
|
||||
dB = torch.zeros_like(lora_B)
|
||||
# No zero-init needed: the split kernels write zeros for experts with
|
||||
# zero routed tokens directly in the kernel (else branch).
|
||||
dA = torch.empty_like(lora_A)
|
||||
dB = torch.empty_like(lora_B)
|
||||
|
||||
BLOCK_R = _block_r_for_rank(R)
|
||||
|
||||
def grid(META):
|
||||
return (
|
||||
E * triton.cdiv(K, META["BLOCK_K"]),
|
||||
triton.cdiv(N, META["BLOCK_N"]),
|
||||
)
|
||||
def grid_dA(META):
|
||||
return (E, triton.cdiv(K, META["BLOCK_DIM"]))
|
||||
|
||||
_group_bwd_lora[grid](
|
||||
_group_bwd_lora_split[grid_dA](
|
||||
DY,
|
||||
DY.stride(0),
|
||||
DY.stride(1),
|
||||
X,
|
||||
X.stride(0),
|
||||
X.stride(1),
|
||||
lora_B,
|
||||
lora_B.stride(0),
|
||||
lora_B.stride(1),
|
||||
dA,
|
||||
dA.stride(0),
|
||||
dA.stride(1),
|
||||
expert_offsets,
|
||||
M=DY.size(0),
|
||||
K=K,
|
||||
N=N,
|
||||
ACTUAL_R=R,
|
||||
BLOCK_R=BLOCK_R,
|
||||
INNER_DIM=N,
|
||||
scaling=scaling,
|
||||
COMPUTE_DA=True,
|
||||
ACC_TYPE=tl.float32,
|
||||
allow_tf32=ALLOW_TF32,
|
||||
)
|
||||
|
||||
def grid_dB(META):
|
||||
return (E, triton.cdiv(N, META["BLOCK_DIM"]))
|
||||
|
||||
_group_bwd_lora_split[grid_dB](
|
||||
DY,
|
||||
DY.stride(0),
|
||||
DY.stride(1),
|
||||
@@ -1383,12 +1893,6 @@ def group_bwd_lora(
|
||||
lora_A,
|
||||
lora_A.stride(0),
|
||||
lora_A.stride(1),
|
||||
lora_B,
|
||||
lora_B.stride(0),
|
||||
lora_B.stride(1),
|
||||
dA,
|
||||
dA.stride(0),
|
||||
dA.stride(1),
|
||||
dB,
|
||||
dB.stride(0),
|
||||
dB.stride(1),
|
||||
@@ -1396,9 +1900,11 @@ def group_bwd_lora(
|
||||
M=DY.size(0),
|
||||
K=K,
|
||||
N=N,
|
||||
ACTUAL_R=R, # True LoRA rank
|
||||
BLOCK_R=BLOCK_R, # Padded tile size
|
||||
ACTUAL_R=R,
|
||||
BLOCK_R=BLOCK_R,
|
||||
INNER_DIM=K,
|
||||
scaling=scaling,
|
||||
COMPUTE_DA=False,
|
||||
ACC_TYPE=tl.float32,
|
||||
allow_tf32=ALLOW_TF32,
|
||||
)
|
||||
|
||||
@@ -220,6 +220,158 @@ def _unwrap_experts_lora(experts_module):
|
||||
return base_experts, gup_lora, down_lora
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Routing helpers
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _softmax_topk_route(
|
||||
moe_block, base_gate, hidden_states, gate_weight, gate_lora_delta
|
||||
):
|
||||
"""Softmax→topk routing (Qwen, OLMoE, Mixtral, MiniMax).
|
||||
|
||||
Returns:
|
||||
(routing_weights [T, K], selected_experts [T, K], top_k, num_experts)
|
||||
"""
|
||||
router_logits = F.linear(hidden_states, gate_weight)
|
||||
if gate_lora_delta is not None:
|
||||
router_logits = router_logits + F.linear(hidden_states, gate_lora_delta)
|
||||
routing_weights = F.softmax(router_logits, dim=-1, dtype=torch.float32)
|
||||
|
||||
top_k = base_gate.top_k
|
||||
num_experts = base_gate.num_experts
|
||||
routing_weights, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
|
||||
|
||||
if getattr(base_gate, "norm_topk_prob", True):
|
||||
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
|
||||
|
||||
return routing_weights, selected_experts, top_k, num_experts
|
||||
|
||||
|
||||
def _sigmoid_topk_route(
|
||||
moe_block, base_gate, hidden_states, gate_weight, gate_lora_delta
|
||||
):
|
||||
"""Sigmoid→topk routing (GLM, DeepSeek V3, MiniMax M2).
|
||||
|
||||
Supports:
|
||||
- ``e_score_correction_bias`` on gate or moe_block
|
||||
- Group-based expert selection when ``n_group > 1``
|
||||
- ``routed_scaling_factor`` applied to final weights
|
||||
- Final weights gathered from original sigmoid probs (not bias-corrected)
|
||||
|
||||
Returns:
|
||||
(routing_weights [T, K], selected_experts [T, K], top_k, num_experts)
|
||||
"""
|
||||
router_logits = F.linear(hidden_states.float(), gate_weight.float())
|
||||
if gate_lora_delta is not None:
|
||||
router_logits = router_logits + F.linear(
|
||||
hidden_states.float(), gate_lora_delta.float()
|
||||
)
|
||||
router_probs = router_logits.sigmoid() # [T, E]
|
||||
|
||||
top_k = getattr(moe_block, "top_k", getattr(base_gate, "top_k", None))
|
||||
num_experts = getattr(moe_block, "n_routed_experts", gate_weight.shape[0])
|
||||
|
||||
# Bias-corrected scores for expert selection (not used for final weights).
|
||||
# glm_moe_dsa/deepseek_v3 store the bias on gate; minimax_m2 on the block.
|
||||
e_score_correction_bias = getattr(base_gate, "e_score_correction_bias", None)
|
||||
if e_score_correction_bias is None:
|
||||
e_score_correction_bias = getattr(moe_block, "e_score_correction_bias", None)
|
||||
if e_score_correction_bias is not None:
|
||||
scores_for_choice = router_probs + e_score_correction_bias
|
||||
else:
|
||||
scores_for_choice = router_probs
|
||||
|
||||
# Group-based selection: pick top groups, mask the rest
|
||||
n_group = getattr(moe_block, "n_group", 1)
|
||||
if n_group > 1:
|
||||
group_scores = (
|
||||
scores_for_choice.view(-1, n_group, num_experts // n_group)
|
||||
.topk(2, dim=-1)[0]
|
||||
.sum(dim=-1)
|
||||
) # [T, n_group]
|
||||
topk_group = getattr(moe_block, "topk_group", n_group)
|
||||
group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[1]
|
||||
group_mask = torch.zeros_like(group_scores)
|
||||
group_mask.scatter_(1, group_idx, 1)
|
||||
score_mask = (
|
||||
group_mask.unsqueeze(-1)
|
||||
.expand(-1, n_group, num_experts // n_group)
|
||||
.reshape(-1, num_experts)
|
||||
)
|
||||
scores_for_choice = scores_for_choice.masked_fill(~score_mask.bool(), 0.0)
|
||||
|
||||
# Final topk from (possibly masked) scores
|
||||
topk_indices = torch.topk(scores_for_choice, k=top_k, dim=-1, sorted=False)[1]
|
||||
|
||||
# Gather weights from original sigmoid scores (not bias-corrected)
|
||||
topk_weights = router_probs.gather(1, topk_indices)
|
||||
|
||||
# Optional renormalization + scaling
|
||||
if getattr(moe_block, "norm_topk_prob", True):
|
||||
topk_weights = topk_weights / (topk_weights.sum(dim=-1, keepdim=True) + 1e-20)
|
||||
routed_scaling_factor = getattr(moe_block, "routed_scaling_factor", 1.0)
|
||||
topk_weights = topk_weights * routed_scaling_factor
|
||||
|
||||
return topk_weights, topk_indices, top_k, num_experts
|
||||
|
||||
|
||||
def _route(moe_block, base_gate, hidden_states, gate_weight, gate_lora_delta):
|
||||
"""Dispatch to the correct routing strategy based on block attributes.
|
||||
|
||||
Detects sigmoid routing by the presence of ``e_score_correction_bias``
|
||||
on either the gate or the moe_block.
|
||||
"""
|
||||
has_sigmoid = (
|
||||
getattr(base_gate, "e_score_correction_bias", None) is not None
|
||||
or getattr(moe_block, "e_score_correction_bias", None) is not None
|
||||
)
|
||||
if has_sigmoid:
|
||||
return _sigmoid_topk_route(
|
||||
moe_block, base_gate, hidden_states, gate_weight, gate_lora_delta
|
||||
)
|
||||
return _softmax_topk_route(
|
||||
moe_block, base_gate, hidden_states, gate_weight, gate_lora_delta
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Shared expert helpers
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _compute_shared_expert(moe_block, hidden_states_flat):
|
||||
"""Compute shared expert output if the block has one.
|
||||
|
||||
Handles singular (qwen2_moe: ``shared_expert``), plural
|
||||
(glm_moe_dsa/deepseek_v3: ``shared_experts``), and MLP
|
||||
(hunyuan_v1_moe: ``shared_mlp``) attribute names.
|
||||
|
||||
peft wraps individual linear layers inside the shared expert with
|
||||
standard LoRA — calling forward() handles this transparently.
|
||||
"""
|
||||
shared_expert = (
|
||||
getattr(moe_block, "shared_expert", None)
|
||||
or getattr(moe_block, "shared_experts", None)
|
||||
or getattr(moe_block, "shared_mlp", None)
|
||||
)
|
||||
if shared_expert is None:
|
||||
return None
|
||||
|
||||
shared_expert_output = shared_expert(hidden_states_flat)
|
||||
|
||||
# Optional sigmoid gate (Qwen2MoE pattern).
|
||||
# shared_expert_gate may also be peft-wrapped (standard LoRA
|
||||
# on nn.Linear), its forward() applies LoRA automatically.
|
||||
shared_expert_gate = getattr(moe_block, "shared_expert_gate", None)
|
||||
if shared_expert_gate is not None:
|
||||
shared_expert_output = (
|
||||
F.sigmoid(shared_expert_gate(hidden_states_flat)) * shared_expert_output
|
||||
)
|
||||
|
||||
return shared_expert_output
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Layer classes
|
||||
# =============================================================================
|
||||
@@ -281,16 +433,18 @@ class ScatterMoEGatedMLP(nn.Module):
|
||||
|
||||
class HFScatterMoEGatedMLP(nn.Module):
|
||||
"""
|
||||
ScatterMoE-accelerated forward pass for HF MoEs (OLMoE / Qwen2MoE).
|
||||
ScatterMoE-accelerated forward pass for HF MoEs.
|
||||
|
||||
Used as a kernel layer via the HF ``kernels`` library. The ``forward``
|
||||
method replaces the original ``OlmoeSparseMoeBlock.forward``.
|
||||
method replaces the original SparseMoeBlock.forward.
|
||||
|
||||
Supports both full-parameter training and LoRA fine-tuning:
|
||||
Supports:
|
||||
|
||||
* **Full-param**: uses ``parallel_linear`` (base ScatterMoE kernel)
|
||||
* **LoRA**: detects peft ``ParamWrapper`` on ``self.experts``, extracts
|
||||
adapter weights, and uses ``parallel_linear_lora`` (fused kernel)
|
||||
* **Softmax→topk routing**: OLMoE, Qwen2/3MoE, Mixtral, MiniMax
|
||||
* **Sigmoid→topk routing**: GLM, DeepSeek V3, MiniMax M2
|
||||
* **Full-parameter training**: uses ``parallel_linear`` (base ScatterMoE)
|
||||
* **LoRA fine-tuning**: detects peft ``ParamWrapper`` on ``self.experts``,
|
||||
extracts adapter weights, and uses ``parallel_linear_lora`` (fused kernel)
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
@@ -302,7 +456,7 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
self: The MoeSparseMoeBlock module containing:
|
||||
- self.gate: Router (or peft ParamWrapper wrapping it)
|
||||
- self.experts: Experts module (or peft ParamWrapper chain)
|
||||
- self.shared_expert: Optional shared expert (e.g. Qwen2MoE)
|
||||
- self.shared_expert(s): Optional shared expert
|
||||
- self.shared_expert_gate: Optional shared expert gate
|
||||
layer_input: Input tensor [batch_size, seq_len, hidden_size]
|
||||
|
||||
@@ -313,38 +467,17 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
hidden_states_flat = layer_input.view(-1, hidden_dim)
|
||||
|
||||
# ====================================================================
|
||||
# Shared Expert (if present, e.g. Qwen2MoE)
|
||||
# Shared Expert (if present, e.g. Qwen2MoE, DeepSeek V3)
|
||||
# ====================================================================
|
||||
# peft wraps individual linear layers inside shared_expert with
|
||||
# standard LoRA — calling forward() handles this transparently.
|
||||
if hasattr(self, "shared_expert") and self.shared_expert is not None:
|
||||
shared_expert_output = self.shared_expert(hidden_states_flat)
|
||||
# shared_expert_gate may also be peft-wrapped (standard LoRA
|
||||
# on nn.Linear), its forward() applies LoRA automatically.
|
||||
shared_expert_gate_output = F.sigmoid(
|
||||
self.shared_expert_gate(hidden_states_flat)
|
||||
)
|
||||
shared_expert_output = shared_expert_output * shared_expert_gate_output
|
||||
else:
|
||||
shared_expert_output = None
|
||||
shared_expert_output = _compute_shared_expert(self, hidden_states_flat)
|
||||
|
||||
# ====================================================================
|
||||
# Router Computation (with optional gate LoRA)
|
||||
# ====================================================================
|
||||
base_gate, gate_weight, gate_lora_delta = _unwrap_gate_lora(self.gate)
|
||||
router_logits = F.linear(hidden_states_flat, gate_weight)
|
||||
if gate_lora_delta is not None:
|
||||
router_logits = router_logits + F.linear(
|
||||
hidden_states_flat, gate_lora_delta
|
||||
)
|
||||
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
|
||||
|
||||
top_k = base_gate.top_k
|
||||
num_experts = base_gate.num_experts
|
||||
routing_weights, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
|
||||
|
||||
if base_gate.norm_topk_prob:
|
||||
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
|
||||
routing_weights, selected_experts, top_k, num_experts = _route(
|
||||
self, base_gate, hidden_states_flat, gate_weight, gate_lora_delta
|
||||
)
|
||||
routing_weights = routing_weights.to(hidden_states_flat.dtype)
|
||||
|
||||
sorted_expert_idxs, sorted_scattered_idxs, expert_offsets = flatten_sort_count(
|
||||
@@ -356,20 +489,71 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
# ====================================================================
|
||||
experts, gup_lora, down_lora = _unwrap_experts_lora(self.experts)
|
||||
|
||||
# ====================================================================
|
||||
# Selective expert weight dequantization
|
||||
# ====================================================================
|
||||
# When experts are BnB-quantized (quantize_moe_experts), dequantize
|
||||
# only the active experts instead of all E. This saves ~97% memory
|
||||
# for the transient dequant buffer when few experts are active.
|
||||
use_selective = (
|
||||
getattr(self, "_use_selective_dequant", False)
|
||||
and hasattr(experts, "parametrizations")
|
||||
and "gate_up_proj" in experts.parametrizations
|
||||
)
|
||||
|
||||
if use_selective:
|
||||
from axolotl.integrations.kernels.libs.scattermoe_lora.selective_dequant import (
|
||||
get_active_experts,
|
||||
remap_expert_indices,
|
||||
selective_expert_weights,
|
||||
selective_lora_weights,
|
||||
)
|
||||
|
||||
active_experts = get_active_experts(sorted_expert_idxs, num_experts)
|
||||
remapped_expert_idxs, compact_offsets = remap_expert_indices(
|
||||
sorted_expert_idxs,
|
||||
expert_offsets,
|
||||
active_experts,
|
||||
num_experts,
|
||||
)
|
||||
# Dequantize only active experts' weights
|
||||
gate_up_W = selective_expert_weights(
|
||||
experts,
|
||||
"gate_up_proj",
|
||||
active_experts,
|
||||
).transpose(2, 1) # [num_active, hidden, 2*inter]
|
||||
|
||||
# Remap LoRA weights to match compact expert indices
|
||||
if gup_lora is not None:
|
||||
gup_A, gup_B, gup_scaling = gup_lora
|
||||
gup_A, gup_B = selective_lora_weights(
|
||||
gup_A,
|
||||
gup_B,
|
||||
active_experts,
|
||||
num_experts,
|
||||
)
|
||||
gup_lora = (gup_A, gup_B, gup_scaling)
|
||||
|
||||
# Use remapped indices for ScatterMoE kernels
|
||||
sei_gup = remapped_expert_idxs
|
||||
eo_gup = compact_offsets
|
||||
else:
|
||||
gate_up_W = experts.gate_up_proj.transpose(2, 1) # [E, hidden, 2*inter]
|
||||
sei_gup = sorted_expert_idxs
|
||||
eo_gup = expert_offsets
|
||||
|
||||
# ====================================================================
|
||||
# Gate + Up projection
|
||||
# ====================================================================
|
||||
gate_up_W = experts.gate_up_proj.transpose(2, 1) # [E, hidden, 2*inter]
|
||||
|
||||
if gup_lora is not None:
|
||||
gup_A, gup_B, gup_scaling = gup_lora
|
||||
gup = parallel_linear_lora(
|
||||
hidden_states_flat,
|
||||
gate_up_W,
|
||||
top_k,
|
||||
sorted_expert_idxs,
|
||||
sei_gup,
|
||||
sorted_scattered_idxs,
|
||||
expert_offsets,
|
||||
eo_gup,
|
||||
lora_A=gup_A,
|
||||
lora_B=gup_B,
|
||||
scaling=gup_scaling,
|
||||
@@ -383,9 +567,9 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
hidden_states_flat,
|
||||
gate_up_W,
|
||||
top_k,
|
||||
sorted_expert_idxs,
|
||||
sei_gup,
|
||||
sorted_scattered_idxs,
|
||||
expert_offsets,
|
||||
eo_gup,
|
||||
grouped_in=False,
|
||||
grouped_out=True,
|
||||
)
|
||||
@@ -396,7 +580,29 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
# ====================================================================
|
||||
# Down projection
|
||||
# ====================================================================
|
||||
down_W = experts.down_proj.transpose(2, 1) # [E, inter, hidden]
|
||||
if use_selective:
|
||||
down_W = selective_expert_weights(
|
||||
experts,
|
||||
"down_proj",
|
||||
active_experts,
|
||||
).transpose(2, 1) # [num_active, inter, hidden]
|
||||
|
||||
if down_lora is not None:
|
||||
down_A, down_B, down_scaling = down_lora
|
||||
down_A, down_B = selective_lora_weights(
|
||||
down_A,
|
||||
down_B,
|
||||
active_experts,
|
||||
num_experts,
|
||||
)
|
||||
down_lora = (down_A, down_B, down_scaling)
|
||||
|
||||
sei_down = remapped_expert_idxs
|
||||
eo_down = compact_offsets
|
||||
else:
|
||||
down_W = experts.down_proj.transpose(2, 1) # [E, inter, hidden]
|
||||
sei_down = sorted_expert_idxs
|
||||
eo_down = expert_offsets
|
||||
|
||||
if down_lora is not None:
|
||||
down_A, down_B, down_scaling = down_lora
|
||||
@@ -404,9 +610,9 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
h,
|
||||
down_W,
|
||||
1,
|
||||
sorted_expert_idxs,
|
||||
sei_down,
|
||||
sorted_scattered_idxs,
|
||||
expert_offsets,
|
||||
eo_down,
|
||||
lora_A=down_A,
|
||||
lora_B=down_B,
|
||||
scaling=down_scaling,
|
||||
@@ -421,9 +627,9 @@ class HFScatterMoEGatedMLP(nn.Module):
|
||||
h,
|
||||
down_W,
|
||||
1,
|
||||
sorted_expert_idxs,
|
||||
sei_down,
|
||||
sorted_scattered_idxs,
|
||||
expert_offsets,
|
||||
eo_down,
|
||||
grouped_in=True,
|
||||
grouped_out=False,
|
||||
gates=routing_weights,
|
||||
|
||||
@@ -0,0 +1,282 @@
|
||||
"""
|
||||
Selective Expert Dequantization
|
||||
===============================
|
||||
|
||||
Instead of dequantizing all E expert weight matrices at once (which creates
|
||||
a ~1 GB transient buffer for 256 experts), only dequantize the experts that
|
||||
are actually routed to by the current batch's top-k selection.
|
||||
|
||||
For Qwen3.5-35B-A3B (E=256, top_k=8, hidden=2048, intermediate=512):
|
||||
- Full dequant: [256, 2048, 1024] = 1,074 MB per projection
|
||||
- Selective (8 active): [8, 2048, 1024] = 33.5 MB per projection
|
||||
- Savings: ~97% memory reduction per layer
|
||||
|
||||
This module provides format-agnostic selective weight extraction:
|
||||
- BnB 4-bit (nf4/fp4): slice quantized data + absmax per expert
|
||||
- bf16/fp32: direct indexing (no dequant needed)
|
||||
- FP8: slice + cast
|
||||
|
||||
The ScatterMoE kernel itself doesn't change — we remap expert indices
|
||||
from global (0..E-1) to compact (0..num_active-1) and pass the smaller
|
||||
weight tensor.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def get_active_experts(sorted_expert_idxs: torch.Tensor, E: int) -> torch.Tensor:
|
||||
"""Get sorted unique expert indices from the routing output.
|
||||
|
||||
Args:
|
||||
sorted_expert_idxs: Expert assignments sorted by expert id [T*k]
|
||||
E: Total number of experts
|
||||
|
||||
Returns:
|
||||
active: Sorted unique expert indices [num_active]
|
||||
"""
|
||||
return torch.unique(sorted_expert_idxs)
|
||||
|
||||
|
||||
def remap_expert_indices(
|
||||
sorted_expert_idxs: torch.Tensor,
|
||||
expert_offsets: torch.Tensor,
|
||||
active_experts: torch.Tensor,
|
||||
E: int,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Remap global expert indices to compact indices.
|
||||
|
||||
Maps expert ids from [0..E-1] to [0..num_active-1], preserving the
|
||||
sort order. Also compacts expert_offsets to only active experts.
|
||||
|
||||
Args:
|
||||
sorted_expert_idxs: [T*k] expert ids in sorted order
|
||||
expert_offsets: [E] cumulative token counts (original)
|
||||
active_experts: [num_active] sorted unique expert ids
|
||||
E: Total number of experts
|
||||
|
||||
Returns:
|
||||
remapped_idxs: [T*k] expert ids in [0..num_active-1]
|
||||
compact_offsets: [num_active] cumulative token counts
|
||||
"""
|
||||
# Build remap table: global_id -> compact_id
|
||||
remap = torch.empty(E, dtype=torch.long, device=sorted_expert_idxs.device)
|
||||
remap[active_experts] = torch.arange(
|
||||
len(active_experts), device=sorted_expert_idxs.device
|
||||
)
|
||||
|
||||
remapped_idxs = remap[sorted_expert_idxs]
|
||||
|
||||
# Compact the expert_offsets: only keep active experts' cumulative counts
|
||||
compact_offsets = expert_offsets[active_experts]
|
||||
|
||||
return remapped_idxs, compact_offsets
|
||||
|
||||
|
||||
def _selective_dequant_bnb4(
|
||||
raw_param: torch.Tensor,
|
||||
quant_state,
|
||||
active_experts: torch.Tensor,
|
||||
expert_shape: tuple[int, int],
|
||||
) -> torch.Tensor:
|
||||
"""Dequantize only selected experts from BnB 4-bit packed data.
|
||||
|
||||
The raw parameter is a flattened 4-bit packed tensor. Each expert's
|
||||
data is contiguous (stored in expert-major order), so we can gather
|
||||
the packed data and absmax blocks for active experts, then dequantize
|
||||
as one contiguous block.
|
||||
|
||||
Args:
|
||||
raw_param: Flattened uint8 tensor of packed 4-bit weights
|
||||
quant_state: BnB QuantState with absmax, blocksize, code, etc.
|
||||
active_experts: [num_active] expert indices to dequantize
|
||||
expert_shape: (dim1, dim2) shape per expert (e.g. (1024, 2048))
|
||||
|
||||
Returns:
|
||||
Dequantized weights [num_active, dim1, dim2] in original dtype
|
||||
"""
|
||||
import bitsandbytes.functional as F # noqa: N812
|
||||
from bitsandbytes.functional import QuantState
|
||||
|
||||
expert_numel = expert_shape[0] * expert_shape[1]
|
||||
packed_per_expert = expert_numel // 2 # 4-bit = 2 values per byte
|
||||
blocks_per_expert = expert_numel // quant_state.blocksize
|
||||
num_active = len(active_experts)
|
||||
|
||||
if blocks_per_expert == 0:
|
||||
# Expert is smaller than one quantization block — blocks span across
|
||||
# expert boundaries, so per-expert slicing isn't possible.
|
||||
# Fallback: full dequantize + index.
|
||||
full = F.dequantize_4bit(raw_param, quant_state)
|
||||
E_total = full.numel() // expert_numel
|
||||
return full.reshape(E_total, *expert_shape)[active_experts]
|
||||
|
||||
# Use fused Triton kernel for NF4 (handles selective gather + dequant in one pass)
|
||||
if quant_state.quant_type == "nf4" and raw_param.dtype == torch.uint8:
|
||||
from axolotl.integrations.kernels.libs.scattermoe_lora.selective_dequant_kernel import (
|
||||
selective_dequant_nf4_triton,
|
||||
)
|
||||
|
||||
# Handle nested (double) quantization: dequantize absmax first
|
||||
# BnB uses dequantize_blockwise (not _4bit) for nested absmax + offset
|
||||
if quant_state.nested:
|
||||
absmax = F.dequantize_blockwise(quant_state.absmax, quant_state.state2)
|
||||
absmax += quant_state.offset
|
||||
if absmax.dtype != torch.float32:
|
||||
absmax = absmax.float()
|
||||
else:
|
||||
absmax = quant_state.absmax
|
||||
|
||||
return selective_dequant_nf4_triton(
|
||||
packed_data=raw_param,
|
||||
absmax=absmax,
|
||||
active_experts=active_experts,
|
||||
expert_shape=expert_shape,
|
||||
blocksize=quant_state.blocksize,
|
||||
dtype=quant_state.dtype,
|
||||
codebook=quant_state.code,
|
||||
)
|
||||
|
||||
# Fallback: gather + BnB dequant (for fp4 or non-uint8 packed formats)
|
||||
raw_flat = raw_param.reshape(-1)
|
||||
|
||||
offsets_qt = (
|
||||
active_experts.long()[:, None] * packed_per_expert
|
||||
+ torch.arange(packed_per_expert, device=raw_param.device)[None, :]
|
||||
).reshape(-1)
|
||||
qt_gathered = raw_flat[offsets_qt]
|
||||
|
||||
offsets_abs = (
|
||||
active_experts.long()[:, None] * blocks_per_expert
|
||||
+ torch.arange(blocks_per_expert, device=raw_param.device)[None, :]
|
||||
).reshape(-1)
|
||||
|
||||
if quant_state.nested:
|
||||
full_absmax = F.dequantize_blockwise(quant_state.absmax, quant_state.state2)
|
||||
full_absmax += quant_state.offset
|
||||
if full_absmax.dtype != torch.float32:
|
||||
full_absmax = full_absmax.float()
|
||||
absmax_gathered = full_absmax[offsets_abs]
|
||||
else:
|
||||
absmax_gathered = quant_state.absmax[offsets_abs]
|
||||
|
||||
qt_gathered = qt_gathered.unsqueeze(1) if qt_gathered.dim() == 1 else qt_gathered
|
||||
|
||||
gathered_qs = QuantState(
|
||||
absmax=absmax_gathered,
|
||||
shape=torch.Size([num_active * expert_numel]),
|
||||
blocksize=quant_state.blocksize,
|
||||
quant_type=quant_state.quant_type,
|
||||
code=quant_state.code,
|
||||
dtype=quant_state.dtype,
|
||||
)
|
||||
|
||||
deq = F.dequantize_4bit(qt_gathered, gathered_qs)
|
||||
return deq.reshape(num_active, *expert_shape)
|
||||
|
||||
|
||||
def _selective_index_dense(
|
||||
param: torch.Tensor,
|
||||
active_experts: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""Select experts from a dense (bf16/fp32) weight tensor.
|
||||
|
||||
Simple indexing — no dequantization needed.
|
||||
"""
|
||||
return param[active_experts]
|
||||
|
||||
|
||||
def selective_expert_weights(
|
||||
experts_module: nn.Module,
|
||||
param_name: str,
|
||||
active_experts: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""Extract and dequantize only the active experts' weights.
|
||||
|
||||
Format-agnostic: dispatches based on whether the parameter is
|
||||
BnB 4-bit quantized (via parametrize), FP8, or dense bf16/fp32.
|
||||
|
||||
Args:
|
||||
experts_module: The base experts module (e.g. Qwen3_5MoeExperts)
|
||||
param_name: "gate_up_proj" or "down_proj"
|
||||
active_experts: [num_active] sorted unique expert indices
|
||||
|
||||
Returns:
|
||||
Compact weight tensor [num_active, dim1, dim2] ready for ScatterMoE
|
||||
"""
|
||||
# Check if the parameter is BnB-quantized via parametrize
|
||||
if (
|
||||
hasattr(experts_module, "parametrizations")
|
||||
and param_name in experts_module.parametrizations
|
||||
):
|
||||
param_list = experts_module.parametrizations[param_name]
|
||||
parametrization = param_list[0]
|
||||
|
||||
# BnB 4-bit parametrization
|
||||
if hasattr(parametrization, "quant_state"):
|
||||
# The raw quantized data is on the ParametrizationList, not the
|
||||
# individual Bnb4bitParametrization module
|
||||
raw_param = param_list.original
|
||||
qs = parametrization.quant_state
|
||||
# qs.shape is the original tensor shape before flattening.
|
||||
# For MoE experts it's [E, d1, d2] (3D) or [total_elements] (1D).
|
||||
orig_shape = qs.shape
|
||||
if isinstance(orig_shape, torch.Size) and len(orig_shape) == 3:
|
||||
expert_shape = (orig_shape[1], orig_shape[2])
|
||||
elif isinstance(orig_shape, torch.Size) and len(orig_shape) == 1:
|
||||
# Flattened — need to infer from module attributes
|
||||
E_total = getattr(experts_module, "num_experts", None)
|
||||
if E_total is None:
|
||||
E_total = int(active_experts.max().item()) + 1
|
||||
expert_numel = orig_shape[0] // E_total
|
||||
d2 = getattr(experts_module, "hidden_dim", None) or getattr(
|
||||
experts_module, "intermediate_dim", None
|
||||
)
|
||||
if d2 and expert_numel % d2 == 0:
|
||||
expert_shape = (expert_numel // d2, d2)
|
||||
else:
|
||||
full = getattr(experts_module, param_name)
|
||||
return full[active_experts]
|
||||
else:
|
||||
full = getattr(experts_module, param_name)
|
||||
return full[active_experts]
|
||||
|
||||
return _selective_dequant_bnb4(raw_param, qs, active_experts, expert_shape)
|
||||
|
||||
# Dense parameter (bf16/fp32) — direct indexing
|
||||
param = getattr(experts_module, param_name)
|
||||
if param.dim() == 3:
|
||||
return param[active_experts]
|
||||
|
||||
# Fallback: full access
|
||||
return param
|
||||
|
||||
|
||||
def selective_lora_weights(
|
||||
lora_A: torch.Tensor,
|
||||
lora_B: torch.Tensor,
|
||||
active_experts: torch.Tensor,
|
||||
E: int,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Select LoRA A and B weights for only the active experts.
|
||||
|
||||
LoRA layout (scattermoe format):
|
||||
A: [r*E, K] — expert e occupies rows [e*r : (e+1)*r]
|
||||
B: [N, r*E] — expert e occupies cols [e*r : (e+1)*r]
|
||||
|
||||
Returns compact:
|
||||
A: [r*num_active, K]
|
||||
B: [N, r*num_active]
|
||||
"""
|
||||
R = lora_A.size(0) // E
|
||||
|
||||
# Vectorized gather: active_experts[:, None] * R + arange(R)[None, :]
|
||||
row_idx = (
|
||||
active_experts.long()[:, None] * R
|
||||
+ torch.arange(R, device=lora_A.device)[None, :]
|
||||
).reshape(-1)
|
||||
|
||||
compact_A = lora_A[row_idx] # [r*num_active, K]
|
||||
compact_B = lora_B[:, row_idx] # [N, r*num_active]
|
||||
|
||||
return compact_A, compact_B
|
||||
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
Triton kernel for fused selective expert gather + NF4 dequantization.
|
||||
|
||||
Instead of:
|
||||
1. Gather packed uint8 data for active experts (memory copy)
|
||||
2. Gather absmax for active experts (memory copy)
|
||||
3. Call BnB dequantize_4bit CUDA kernel
|
||||
|
||||
This kernel does all three in one pass:
|
||||
- Reads packed NF4 bytes from expert-strided positions
|
||||
- Looks up the NF4 codebook
|
||||
- Multiplies by the per-block absmax
|
||||
- Writes bf16 output directly
|
||||
|
||||
This eliminates the intermediate gather buffer entirely.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import triton
|
||||
import triton.language as tl
|
||||
|
||||
# NF4 codebook (16 values, precomputed by BnB)
|
||||
# These are the normalized float4 reconstruction values
|
||||
NF4_CODEBOOK = [
|
||||
-1.0,
|
||||
-0.6961928009986877,
|
||||
-0.5250730514526367,
|
||||
-0.39491748809814453,
|
||||
-0.28444138169288635,
|
||||
-0.18477343022823334,
|
||||
-0.09105003625154495,
|
||||
0.0,
|
||||
0.07958029955625534,
|
||||
0.16093020141124725,
|
||||
0.24611230194568634,
|
||||
0.33791524171829224,
|
||||
0.44070982933044434,
|
||||
0.5626170039176941,
|
||||
0.7229568362236023,
|
||||
1.0,
|
||||
]
|
||||
|
||||
|
||||
@triton.jit
|
||||
def _selective_dequant_nf4_kernel(
|
||||
# Input: packed NF4 data (flattened, expert-major order)
|
||||
packed_ptr,
|
||||
# Input: absmax values (flattened, expert-major order)
|
||||
absmax_ptr,
|
||||
# Input: active expert indices
|
||||
active_experts_ptr,
|
||||
# Input: NF4 codebook (16 float values)
|
||||
codebook_ptr,
|
||||
# Output: dequantized bf16 weights [num_active, expert_numel]
|
||||
out_ptr,
|
||||
stride_out_e, # stride for expert dim in output
|
||||
# Dimensions
|
||||
num_active,
|
||||
packed_per_expert, # expert_numel // 2
|
||||
blocks_per_expert, # expert_numel // blocksize
|
||||
blocksize: tl.constexpr,
|
||||
# Tile size
|
||||
BLOCK_SIZE: tl.constexpr, # elements per thread block (must be multiple of 2)
|
||||
):
|
||||
"""
|
||||
Each program processes BLOCK_SIZE elements from one expert.
|
||||
|
||||
Grid: (num_active, cdiv(expert_numel, BLOCK_SIZE))
|
||||
|
||||
For each output element:
|
||||
1. Compute which byte in packed data contains this element
|
||||
2. Extract the 4-bit nibble (high or low)
|
||||
3. Look up in NF4 codebook
|
||||
4. Scale by absmax for this block
|
||||
"""
|
||||
expert_local_idx = tl.program_id(0) # which active expert (0..num_active-1)
|
||||
block_id = tl.program_id(1) # which element block
|
||||
|
||||
# Load the global expert index
|
||||
expert_global = tl.load(active_experts_ptr + expert_local_idx).to(tl.int64)
|
||||
|
||||
expert_numel = packed_per_expert * 2 # 2 elements per packed byte
|
||||
elem_offset = block_id * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
||||
mask = elem_offset < expert_numel
|
||||
|
||||
# Each element is packed as: byte[i//2], low nibble for even i, high for odd i
|
||||
byte_idx = elem_offset // 2
|
||||
is_high = (elem_offset % 2) == 1
|
||||
|
||||
# Read packed bytes from the global expert's region
|
||||
packed_global_offset = expert_global * packed_per_expert + byte_idx
|
||||
packed_bytes = tl.load(packed_ptr + packed_global_offset, mask=mask, other=0).to(
|
||||
tl.int32
|
||||
)
|
||||
|
||||
# Extract 4-bit nibble
|
||||
# BnB packing: high nibble = even element, low nibble = odd element
|
||||
nibble = tl.where(is_high, packed_bytes & 0xF, (packed_bytes >> 4) & 0xF)
|
||||
|
||||
# NF4 codebook lookup
|
||||
# Load all 16 codebook values (small, fits in registers)
|
||||
# Use gather from codebook pointer
|
||||
code_val = tl.load(codebook_ptr + nibble, mask=mask, other=0.0)
|
||||
|
||||
# Load absmax for this element's quantization block
|
||||
block_idx = elem_offset // blocksize
|
||||
absmax_global_offset = expert_global * blocks_per_expert + block_idx
|
||||
absmax_val = tl.load(absmax_ptr + absmax_global_offset, mask=mask, other=1.0)
|
||||
|
||||
# Dequantize: value = codebook[nibble] * absmax
|
||||
result = code_val * absmax_val
|
||||
|
||||
# Store to output
|
||||
out_offset = expert_local_idx * stride_out_e + elem_offset
|
||||
tl.store(out_ptr + out_offset, result.to(out_ptr.dtype.element_ty), mask=mask)
|
||||
|
||||
|
||||
def selective_dequant_nf4_triton(
|
||||
packed_data: torch.Tensor,
|
||||
absmax: torch.Tensor,
|
||||
active_experts: torch.Tensor,
|
||||
expert_shape: tuple[int, int],
|
||||
blocksize: int,
|
||||
dtype: torch.dtype = torch.bfloat16,
|
||||
codebook: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
"""Fused selective gather + NF4 dequantization via Triton kernel.
|
||||
|
||||
Args:
|
||||
packed_data: Flattened packed NF4 data [total_packed] or [total_packed, 1]
|
||||
absmax: Per-block scaling factors [total_blocks]
|
||||
active_experts: Sorted indices of experts to dequantize [num_active]
|
||||
expert_shape: (dim1, dim2) per expert
|
||||
blocksize: Quantization block size
|
||||
dtype: Output dtype (default bf16)
|
||||
codebook: NF4 lookup table [16] (uses default NF4 codebook if None)
|
||||
|
||||
Returns:
|
||||
Dequantized weights [num_active, dim1, dim2]
|
||||
"""
|
||||
num_active = active_experts.shape[0]
|
||||
expert_numel = expert_shape[0] * expert_shape[1]
|
||||
packed_per_expert = expert_numel // 2
|
||||
blocks_per_expert = expert_numel // blocksize
|
||||
|
||||
# Prepare codebook on device
|
||||
if codebook is None:
|
||||
codebook = torch.tensor(
|
||||
NF4_CODEBOOK, dtype=torch.float32, device=packed_data.device
|
||||
)
|
||||
else:
|
||||
codebook = codebook.to(device=packed_data.device, dtype=torch.float32)
|
||||
|
||||
# Flatten inputs
|
||||
packed_flat = packed_data.reshape(-1)
|
||||
absmax_flat = absmax.reshape(-1).float() # absmax is usually fp32
|
||||
|
||||
# Output buffer
|
||||
out = torch.empty(num_active, expert_numel, dtype=dtype, device=packed_data.device)
|
||||
|
||||
BLOCK_SIZE = 1024 # Process 1024 elements per thread block
|
||||
|
||||
grid = (num_active, triton.cdiv(expert_numel, BLOCK_SIZE))
|
||||
|
||||
_selective_dequant_nf4_kernel[grid](
|
||||
packed_flat,
|
||||
absmax_flat,
|
||||
active_experts,
|
||||
codebook,
|
||||
out,
|
||||
out.stride(0),
|
||||
num_active=num_active,
|
||||
packed_per_expert=packed_per_expert,
|
||||
blocks_per_expert=blocks_per_expert,
|
||||
blocksize=blocksize,
|
||||
BLOCK_SIZE=BLOCK_SIZE,
|
||||
)
|
||||
|
||||
return out.reshape(num_active, *expert_shape)
|
||||
@@ -1,14 +1,59 @@
|
||||
import importlib
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from kernels import (
|
||||
LocalLayerRepository,
|
||||
Mode,
|
||||
register_kernel_mapping,
|
||||
replace_kernel_forward_from_hub,
|
||||
)
|
||||
import torch
|
||||
|
||||
from axolotl.integrations.base import BasePlugin
|
||||
from axolotl.utils.callbacks.models import get_causal_lm_model_cls_prefix
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
|
||||
def _check_sonicmoe_gpu_compat():
|
||||
"""Validate GPU compute capability for SonicMoE and configure env.
|
||||
|
||||
Supported: Hopper (sm_90), Blackwell (sm_100 - sm_103).
|
||||
B300 (sm_103) additionally requires Triton 3.6.0.
|
||||
"""
|
||||
if not torch.cuda.is_available():
|
||||
return
|
||||
|
||||
cc = torch.cuda.get_device_capability()
|
||||
|
||||
if cc < (9, 0):
|
||||
raise RuntimeError(
|
||||
f"SonicMoE requires Hopper (sm_90) or Blackwell (sm_100+) GPU, "
|
||||
f"but detected sm_{cc[0]}{cc[1]}."
|
||||
)
|
||||
|
||||
if cc > (10, 3):
|
||||
raise RuntimeError(
|
||||
f"SonicMoE does not yet support sm_{cc[0]}{cc[1]}. "
|
||||
f"Supported: Hopper (sm_90) and Blackwell (sm_100 - sm_103)."
|
||||
)
|
||||
|
||||
# Blackwell (sm_100+): enable QuACK GEMM kernels
|
||||
if cc >= (10, 0):
|
||||
os.environ.setdefault("USE_QUACK_GEMM", "1")
|
||||
LOG.info(
|
||||
f"Blackwell GPU (sm_{cc[0]}{cc[1]}) detected, enabling USE_QUACK_GEMM=1"
|
||||
)
|
||||
|
||||
# B300 (sm_103): requires Triton 3.6.0
|
||||
if cc == (10, 3):
|
||||
triton_spec = importlib.util.find_spec("triton")
|
||||
if triton_spec is None:
|
||||
raise RuntimeError(
|
||||
"B300 (sm_103) requires Triton 3.6.0, but Triton is not installed."
|
||||
)
|
||||
import triton
|
||||
|
||||
triton_version = tuple(int(x) for x in triton.__version__.split(".")[:2])
|
||||
if triton_version != (3, 6):
|
||||
raise RuntimeError(
|
||||
f"B300 (sm_103) requires Triton 3.6.x, but found {triton.__version__}."
|
||||
)
|
||||
|
||||
|
||||
class KernelsPlugin(BasePlugin):
|
||||
@@ -16,11 +61,44 @@ class KernelsPlugin(BasePlugin):
|
||||
return "axolotl.integrations.kernels.KernelsArgs"
|
||||
|
||||
def pre_model_load(self, cfg):
|
||||
from axolotl.integrations.kernels.constants import SPARSE_MOE_BLOCK
|
||||
|
||||
# Prefer text backbone type for VLMs, but fall back to base type
|
||||
# when the text type isn't in the supported mapping (e.g. qwen3_5_moe_text)
|
||||
moe_model_type = cfg.model_config_type_text or cfg.model_config_type
|
||||
if (
|
||||
moe_model_type not in SPARSE_MOE_BLOCK
|
||||
and cfg.model_config_type in SPARSE_MOE_BLOCK
|
||||
):
|
||||
moe_model_type = cfg.model_config_type
|
||||
|
||||
if cfg.use_scattermoe:
|
||||
self._register_kernels()
|
||||
self._kernelize_model(cfg.model_config_type)
|
||||
self._kernelize_model(moe_model_type)
|
||||
elif cfg.use_sonicmoe:
|
||||
if not importlib.util.find_spec("sonicmoe"):
|
||||
raise RuntimeError(
|
||||
"SonicMoE is not installed. See installation instructions at "
|
||||
"https://github.com/axolotl-ai-cloud/axolotl/blob/main/src/axolotl/integrations/kernels/README.md#sonicmoe-installation"
|
||||
)
|
||||
|
||||
_check_sonicmoe_gpu_compat()
|
||||
|
||||
from axolotl.integrations.kernels.sonicmoe import patch_sonicmoe
|
||||
|
||||
LOG.info(f"Applying SonicMoE patches for model type: {moe_model_type}")
|
||||
patch_sonicmoe(
|
||||
moe_model_type,
|
||||
torch_compile=bool(getattr(cfg, "torch_compile", False)),
|
||||
)
|
||||
|
||||
def _register_kernels(self):
|
||||
from kernels import (
|
||||
LocalLayerRepository,
|
||||
Mode,
|
||||
register_kernel_mapping,
|
||||
)
|
||||
|
||||
plugin_root = Path(__file__).parent
|
||||
register_kernel_mapping(
|
||||
{
|
||||
@@ -41,26 +119,22 @@ class KernelsPlugin(BasePlugin):
|
||||
}
|
||||
)
|
||||
|
||||
def _kernelize_model(self, model_type: str):
|
||||
if model_type == "olmoe":
|
||||
from transformers.models.olmoe.modeling_olmoe import OlmoeSparseMoeBlock
|
||||
|
||||
replace_kernel_forward_from_hub(
|
||||
OlmoeSparseMoeBlock, "HFScatterMoEParallelExperts"
|
||||
def add_callbacks_pre_trainer(self, cfg, model):
|
||||
callbacks = []
|
||||
if cfg.use_scattermoe:
|
||||
from axolotl.integrations.kernels.autotune_callback import (
|
||||
AutotuneReportCallback,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
model_moe_cls = get_model_moe_block(model_type)
|
||||
replace_kernel_forward_from_hub(
|
||||
model_moe_cls, "HFScatterMoEParallelExperts"
|
||||
)
|
||||
except Exception as err:
|
||||
raise ValueError(f"Unsupported model type: {model_type}") from err
|
||||
|
||||
callbacks.append(AutotuneReportCallback())
|
||||
return callbacks
|
||||
|
||||
def get_model_moe_block(model_type: str):
|
||||
module_path = f"transformers.models.{model_type}.modeling_{model_type}"
|
||||
model_cls_prefix, _ = get_causal_lm_model_cls_prefix(model_type)
|
||||
module = __import__(module_path, fromlist=[f"{model_cls_prefix}SparseMoeBlock"])
|
||||
model_cls = getattr(module, f"{model_cls_prefix}SparseMoeBlock")
|
||||
return model_cls
|
||||
def _kernelize_model(self, model_type: str):
|
||||
from kernels import replace_kernel_forward_from_hub
|
||||
|
||||
from axolotl.integrations.kernels.constants import resolve_moe_block_classes
|
||||
|
||||
for model_moe_cls in resolve_moe_block_classes(model_type):
|
||||
replace_kernel_forward_from_hub(
|
||||
model_moe_cls, "HFScatterMoEParallelExperts"
|
||||
)
|
||||
|
||||
3
src/axolotl/integrations/kernels/sonicmoe/__init__.py
Normal file
3
src/axolotl/integrations/kernels/sonicmoe/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .patch import patch_sonicmoe
|
||||
|
||||
__all__ = ["patch_sonicmoe"]
|
||||
213
src/axolotl/integrations/kernels/sonicmoe/patch.py
Normal file
213
src/axolotl/integrations/kernels/sonicmoe/patch.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""
|
||||
SonicMoE patching for SparseMoeBlock forward pass.
|
||||
|
||||
Monkeypatches the SparseMoeBlock class for a given model type to use
|
||||
SonicMoE's optimized kernels. Two forward paths are supported:
|
||||
|
||||
1. **General routing path** (routing_fn is not None):
|
||||
Uses a custom routing function + ``moe_general_routing_inputs``.
|
||||
Suitable for models with non-standard routing (softmax->topk, sigmoid->topk).
|
||||
|
||||
2. **Fused topk->softmax path** (routing_fn is None):
|
||||
Uses ``moe_TC_softmax_topk_layer`` which fuses routing + expert computation.
|
||||
Suitable for models with simple topk->softmax routing.
|
||||
|
||||
Weight format conversion (interleave/deinterleave) is handled by the
|
||||
WeightConverter system, so the forward assumes weights are already in
|
||||
interleaved format.
|
||||
|
||||
Shared experts are handled generically: if the block has a ``shared_expert``
|
||||
or ``shared_experts`` attribute, its output is computed alongside the routed
|
||||
experts and added to the final output. An optional ``shared_expert_gate``
|
||||
applies sigmoid gating to the shared expert contribution.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from axolotl.integrations.kernels.constants import resolve_moe_block_classes
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
|
||||
def patch_sonicmoe(model_type: str, torch_compile: bool = False):
|
||||
"""Main entry point: patch SparseMoeBlock for SonicMoE support.
|
||||
|
||||
Args:
|
||||
model_type: The HuggingFace model type (e.g. "qwen3_moe").
|
||||
torch_compile: If True, wrap routing functions with torch.compile
|
||||
for kernel fusion (fuses softmax+topk+renorm into fewer launches).
|
||||
"""
|
||||
from .routing import get_model_moe_config
|
||||
from .weight_converter import register_sonicmoe_weight_converter
|
||||
|
||||
routing_fn, activation, router_attr = get_model_moe_config(model_type)
|
||||
|
||||
if torch_compile and routing_fn is not None:
|
||||
routing_fn = _try_compile_routing(routing_fn)
|
||||
|
||||
for moe_cls in resolve_moe_block_classes(model_type):
|
||||
_patch_forward(moe_cls, routing_fn, activation, router_attr)
|
||||
register_sonicmoe_weight_converter(model_type)
|
||||
|
||||
|
||||
def _try_compile_routing(routing_fn):
|
||||
"""Attempt to torch.compile the routing function, fall back to eager on failure."""
|
||||
try:
|
||||
compiled_fn = torch.compile(routing_fn, mode="reduce-overhead", dynamic=False)
|
||||
LOG.info(f"torch.compile enabled for routing function: {routing_fn.__name__}")
|
||||
return compiled_fn
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
LOG.warning(
|
||||
f"torch.compile failed for routing function {routing_fn.__name__}, "
|
||||
f"falling back to eager: {exc}"
|
||||
)
|
||||
return routing_fn
|
||||
|
||||
|
||||
def _patch_forward(moe_cls, routing_fn, activation, router_attr):
|
||||
"""Monkeypatch the SparseMoeBlock class with a SonicMoE forward.
|
||||
|
||||
The patched forward handles shared experts generically: if
|
||||
``self.shared_expert`` or ``self.shared_experts`` exists, it is computed
|
||||
and added to the routed output. If ``self.shared_expert_gate`` also exists,
|
||||
it applies sigmoid gating to the shared expert contribution (as in qwen2_moe).
|
||||
|
||||
Args:
|
||||
moe_cls: The SparseMoeBlock class to patch.
|
||||
routing_fn: Routing function (e.g. softmax_topk_routing), or None
|
||||
for the fused moe_TC_softmax_topk_layer path.
|
||||
activation: SonicMoE ActivationType enum value.
|
||||
router_attr: Name of the router module attribute on the MoE block.
|
||||
"""
|
||||
if hasattr(moe_cls, "_original_forward"):
|
||||
LOG.info(f"{moe_cls.__name__}.forward already patched with SonicMoE, skipping")
|
||||
return
|
||||
|
||||
original_forward = moe_cls.forward
|
||||
|
||||
if routing_fn is not None:
|
||||
_make_general_forward(moe_cls, routing_fn, activation)
|
||||
else:
|
||||
_make_fused_forward(moe_cls, activation, router_attr)
|
||||
|
||||
moe_cls._original_forward = original_forward
|
||||
LOG.info(f"Patched {moe_cls.__name__}.forward with SonicMoE implementation")
|
||||
|
||||
|
||||
def _make_general_forward(moe_cls, routing_fn, activation):
|
||||
"""Create forward using routing_fn + moe_general_routing_inputs."""
|
||||
|
||||
def sonicmoe_forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
||||
from sonicmoe import moe_general_routing_inputs
|
||||
|
||||
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
||||
hidden_states_flat = hidden_states.view(-1, hidden_dim)
|
||||
|
||||
# Shared expert (computed early, matching original model ordering)
|
||||
shared_expert_output = _compute_shared_expert(self, hidden_states_flat)
|
||||
|
||||
# Routing
|
||||
router_scores, token_indices, expert_indices, _router_logits = routing_fn(
|
||||
hidden_states_flat, self
|
||||
)
|
||||
|
||||
# Permute weights to SonicMoE layout:
|
||||
# gate_up: [E, 2*I, H] -> [2*I, H, E]
|
||||
# down: [E, H, I] -> [H, I, E]
|
||||
gate_up_weight = self.experts.gate_up_proj.permute(1, 2, 0)
|
||||
down_weight = self.experts.down_proj.permute(1, 2, 0)
|
||||
E = gate_up_weight.shape[-1]
|
||||
|
||||
output, _ = moe_general_routing_inputs(
|
||||
hidden_states_flat,
|
||||
router_scores,
|
||||
token_indices,
|
||||
expert_indices,
|
||||
gate_up_weight,
|
||||
None, # b1 (no gate/up bias)
|
||||
down_weight,
|
||||
None, # b2 (no down bias)
|
||||
E,
|
||||
torch.cuda.current_stream().cuda_stream,
|
||||
activation,
|
||||
False, # is_inference_mode
|
||||
)
|
||||
|
||||
# Add shared expert contribution if present
|
||||
if shared_expert_output is not None:
|
||||
if hasattr(self, "shared_expert_gate"):
|
||||
shared_expert_output = (
|
||||
F.sigmoid(self.shared_expert_gate(hidden_states_flat))
|
||||
* shared_expert_output
|
||||
)
|
||||
output = output + shared_expert_output
|
||||
|
||||
return output.view(batch_size, sequence_length, hidden_dim)
|
||||
|
||||
moe_cls.forward = sonicmoe_forward
|
||||
|
||||
|
||||
def _make_fused_forward(moe_cls, activation, router_attr):
|
||||
"""Create forward using moe_TC_softmax_topk_layer (topk -> softmax)."""
|
||||
|
||||
def sonicmoe_fused_forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
||||
from sonicmoe import moe_TC_softmax_topk_layer
|
||||
|
||||
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
||||
hidden_states_flat = hidden_states.view(-1, hidden_dim)
|
||||
|
||||
# Shared expert (computed early, matching original model ordering)
|
||||
shared_expert_output = _compute_shared_expert(self, hidden_states_flat)
|
||||
|
||||
router = getattr(self, router_attr)
|
||||
|
||||
# Permute weights to SonicMoE layout:
|
||||
# gate_up: [E, 2*I, H] -> [2*I, H, E]
|
||||
# down: [E, H, I] -> [H, I, E]
|
||||
gate_up_weight = self.experts.gate_up_proj.permute(1, 2, 0)
|
||||
down_weight = self.experts.down_proj.permute(1, 2, 0)
|
||||
|
||||
output, _router_logits, _expert_freq = moe_TC_softmax_topk_layer(
|
||||
hidden_states_flat,
|
||||
router.weight,
|
||||
gate_up_weight,
|
||||
None, # b1 (no gate/up bias)
|
||||
down_weight,
|
||||
None, # b2 (no down bias)
|
||||
router.top_k,
|
||||
torch.cuda.current_stream().cuda_stream,
|
||||
activation,
|
||||
False, # is_inference_mode
|
||||
)
|
||||
|
||||
# Add shared expert contribution if present
|
||||
if shared_expert_output is not None:
|
||||
if hasattr(self, "shared_expert_gate"):
|
||||
shared_expert_output = (
|
||||
F.sigmoid(self.shared_expert_gate(hidden_states_flat))
|
||||
* shared_expert_output
|
||||
)
|
||||
output = output + shared_expert_output
|
||||
|
||||
return output.view(batch_size, sequence_length, hidden_dim)
|
||||
|
||||
moe_cls.forward = sonicmoe_fused_forward
|
||||
|
||||
|
||||
def _compute_shared_expert(moe_block, hidden_states_flat):
|
||||
"""Compute shared expert output if the block has one.
|
||||
|
||||
Handles singular (qwen2_moe: ``shared_expert``), plural
|
||||
(glm_moe_dsa/deepseek_v3: ``shared_experts``), and MLP
|
||||
(hunyuan_v1_moe: ``shared_mlp``) attribute names.
|
||||
"""
|
||||
shared_expert = (
|
||||
getattr(moe_block, "shared_expert", None)
|
||||
or getattr(moe_block, "shared_experts", None)
|
||||
or getattr(moe_block, "shared_mlp", None)
|
||||
)
|
||||
if shared_expert is not None:
|
||||
return shared_expert(hidden_states_flat)
|
||||
return None
|
||||
278
src/axolotl/integrations/kernels/sonicmoe/routing.py
Normal file
278
src/axolotl/integrations/kernels/sonicmoe/routing.py
Normal file
@@ -0,0 +1,278 @@
|
||||
"""
|
||||
Routing functions for SonicMoE integration.
|
||||
|
||||
Different MoE architectures use different routing strategies:
|
||||
- qwen3_moe / qwen2_moe / qwen3_5_moe / qwen3_vl_moe / qwen3_omni_moe: softmax -> topk (with optional renormalization)
|
||||
- gpt_oss: topk -> softmax (uses fused moe_TC_softmax_topk_layer, routing_fn=None)
|
||||
- glm_moe_dsa: sigmoid -> topk (with group-based expert selection)
|
||||
- mistral4: softmax -> group selection -> topk (with renormalization and scaling)
|
||||
|
||||
Each model type maps to a (routing_fn, activation_type, router_attr) triple.
|
||||
When routing_fn is None, the fused moe_TC_softmax_topk_layer path is used.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def get_model_moe_config(model_type: str):
|
||||
"""Returns (routing_fn, activation, router_attr) for a given model type.
|
||||
|
||||
Args:
|
||||
model_type: HuggingFace model type string.
|
||||
|
||||
Returns:
|
||||
routing_fn: Callable or None. None signals the fused
|
||||
moe_TC_softmax_topk_layer path (topk -> softmax models).
|
||||
activation: SonicMoE ActivationType enum value.
|
||||
router_attr: Name of the router module attribute on the MoE block
|
||||
(e.g. "gate" or "router").
|
||||
|
||||
The activation type cannot be derived from config.hidden_act because
|
||||
e.g. qwen3_moe reports "silu" but architecturally uses SwiGLU
|
||||
(act_fn(gate) * up pattern). So we specify it per model type.
|
||||
"""
|
||||
from sonicmoe.enums import ActivationType
|
||||
|
||||
if model_type in (
|
||||
"qwen2_moe",
|
||||
"qwen3_moe",
|
||||
"qwen3_5_moe",
|
||||
"qwen3_next",
|
||||
"qwen3_vl_moe",
|
||||
"qwen3_omni_moe",
|
||||
"olmoe",
|
||||
"mixtral",
|
||||
"minimax",
|
||||
):
|
||||
return softmax_topk_routing, ActivationType.SWIGLU, "gate"
|
||||
elif model_type in ("mistral4",):
|
||||
return softmax_group_topk_routing, ActivationType.SWIGLU, "gate"
|
||||
elif model_type in (
|
||||
"glm_moe_dsa",
|
||||
"deepseek_v3",
|
||||
"glm4_moe",
|
||||
"glm4_moe_lite",
|
||||
"glm4v_moe",
|
||||
"minimax_m2",
|
||||
):
|
||||
return sigmoid_topk_routing, ActivationType.SWIGLU, "gate"
|
||||
# elif model_type in ("ernie4_5_moe",):
|
||||
# # Softmax→topk with e_score_correction_bias applied between softmax and topk.
|
||||
# return ..., ActivationType.SWIGLU, "gate"
|
||||
# elif model_type in ("deepseek_v2",):
|
||||
# # Softmax→topk with group_limited_greedy. Different attr names: num_group
|
||||
# # (not n_group), gate is nn.Linear (not a router class).
|
||||
# return ..., ActivationType.SWIGLU, "gate"
|
||||
# elif model_type in ("hunyuan_v1_moe",):
|
||||
# # Softmax→topk but gate structure differs: gate.wg (not gate.weight),
|
||||
# # top_k on block not gate, creates scatter routing matrix.
|
||||
# return ..., ActivationType.SWIGLU, "gate"
|
||||
# Fused topk -> softmax path (routing_fn=None):
|
||||
# elif model_type in ("gpt_oss",):
|
||||
# # NOTE: gpt_oss has a router bias which moe_TC_softmax_topk_layer
|
||||
# # ignores (it only takes router_w, not bias). Also has transposed
|
||||
# # weight layout [E, H, 2*I] and custom GLU activation.
|
||||
# return None, ActivationType.SWIGLU, "router"
|
||||
else:
|
||||
raise ValueError(f"SonicMoE: unsupported model type '{model_type}'")
|
||||
|
||||
|
||||
def softmax_topk_routing(
|
||||
hidden_states: torch.Tensor, moe_block
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Qwen3/Qwen2-style routing: softmax -> topk -> optional renorm.
|
||||
|
||||
Args:
|
||||
hidden_states: [T, H] flattened token representations
|
||||
moe_block: MoE block module (accesses moe_block.gate.*)
|
||||
|
||||
Returns:
|
||||
router_scores: [T*K] flattened scores (float32)
|
||||
token_indices: [T*K] which token each entry belongs to (int32), sorted ascending
|
||||
expert_indices: [T*K] which expert (int32)
|
||||
router_logits: [T, E] original logits for aux loss
|
||||
"""
|
||||
gate = moe_block.gate
|
||||
T, H = hidden_states.shape
|
||||
K = gate.top_k
|
||||
|
||||
# Compute router logits and softmax over all experts
|
||||
router_logits = F.linear(hidden_states, gate.weight) # [T, E]
|
||||
router_probs = F.softmax(router_logits, dim=-1, dtype=torch.float32) # [T, E]
|
||||
|
||||
# Select top-k experts per token
|
||||
top_values, top_indices = torch.topk(router_probs, K, dim=-1) # [T, K] each
|
||||
|
||||
# Renormalize if configured (default True for models without the attribute,
|
||||
# e.g. Mixtral/MiniMax which always normalize)
|
||||
if getattr(gate, "norm_topk_prob", True):
|
||||
top_values = top_values / top_values.sum(dim=-1, keepdim=True)
|
||||
|
||||
# no-op: matches transformers which casts to softmax output dtype (float32).
|
||||
# top_values = top_values.to(router_probs.dtype)
|
||||
|
||||
# Flatten for moe_general_routing_inputs.
|
||||
# Token indices are naturally sorted ascending from the [T, K] layout:
|
||||
# [0, 0, ..., 1, 1, ..., T-1, T-1, ...] — this is required by SonicMoE.
|
||||
# Expert sorting is handled internally by general_routing_router_metadata.
|
||||
token_indices = (
|
||||
torch.arange(T, device=hidden_states.device, dtype=torch.int32)
|
||||
.unsqueeze(1)
|
||||
.expand(T, K)
|
||||
)
|
||||
|
||||
flat_scores = top_values.reshape(-1) # [T*K]
|
||||
flat_token_idx = token_indices.reshape(-1) # [T*K]
|
||||
flat_expert_idx = top_indices.to(torch.int32).reshape(-1) # [T*K]
|
||||
|
||||
return flat_scores, flat_token_idx, flat_expert_idx, router_logits
|
||||
|
||||
|
||||
def softmax_group_topk_routing(
|
||||
hidden_states: torch.Tensor, moe_block
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Mistral4-style routing: softmax -> group selection -> topk -> renorm -> scale."""
|
||||
gate = moe_block.gate
|
||||
T, H = hidden_states.shape
|
||||
K = moe_block.top_k
|
||||
E = getattr(moe_block, "n_routed_experts", gate.weight.shape[0])
|
||||
n_group = getattr(moe_block, "n_group", 1)
|
||||
|
||||
router_logits = F.linear(hidden_states, gate.weight) # [T, E]
|
||||
router_probs = F.softmax(router_logits, dim=-1, dtype=torch.float32) # [T, E]
|
||||
|
||||
scores_for_choice = router_probs
|
||||
|
||||
# Group selection: pick top groups, mask the rest
|
||||
if n_group > 1:
|
||||
group_scores = (
|
||||
scores_for_choice.view(-1, n_group, E // n_group)
|
||||
.topk(2, dim=-1)[0]
|
||||
.sum(dim=-1)
|
||||
)
|
||||
group_idx = torch.topk(
|
||||
group_scores, k=moe_block.topk_group, dim=-1, sorted=False
|
||||
)[1]
|
||||
group_mask = torch.zeros_like(group_scores)
|
||||
group_mask.scatter_(1, group_idx, 1)
|
||||
score_mask = (
|
||||
group_mask.unsqueeze(-1).expand(-1, n_group, E // n_group).reshape(-1, E)
|
||||
)
|
||||
scores_for_choice = scores_for_choice.masked_fill(~score_mask.bool(), 0.0)
|
||||
|
||||
topk_indices = torch.topk(scores_for_choice, k=K, dim=-1, sorted=False)[1]
|
||||
topk_weights = router_probs.gather(1, topk_indices)
|
||||
|
||||
# Renormalization + scaling
|
||||
norm_topk_prob = getattr(moe_block, "norm_topk_prob", True)
|
||||
if norm_topk_prob:
|
||||
topk_weights = topk_weights / (topk_weights.sum(dim=-1, keepdim=True) + 1e-20)
|
||||
routed_scaling_factor = getattr(moe_block, "routed_scaling_factor", 1.0)
|
||||
topk_weights = topk_weights * routed_scaling_factor
|
||||
|
||||
# Flatten for moe_general_routing_inputs
|
||||
token_indices = (
|
||||
torch.arange(T, device=hidden_states.device, dtype=torch.int32)
|
||||
.unsqueeze(1)
|
||||
.expand(T, K)
|
||||
)
|
||||
|
||||
flat_scores = topk_weights.to(torch.float32).reshape(-1) # [T*K]
|
||||
flat_token_idx = token_indices.reshape(-1) # [T*K]
|
||||
flat_expert_idx = topk_indices.to(torch.int32).reshape(-1) # [T*K]
|
||||
|
||||
return flat_scores, flat_token_idx, flat_expert_idx, router_logits
|
||||
|
||||
|
||||
def sigmoid_topk_routing(
|
||||
hidden_states: torch.Tensor, moe_block
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Sigmoid-based routing: sigmoid -> optional group selection -> topk.
|
||||
|
||||
Supports two variants:
|
||||
- **Group selection** (glm_moe_dsa, deepseek_v3, etc.): n_group > 1,
|
||||
bias on gate, group-based masking before topk.
|
||||
- **No group selection** (minimax_m2): n_group == 1 (or absent),
|
||||
bias on moe_block, straight topk from all experts.
|
||||
|
||||
Final routing weights come from the original sigmoid scores (not
|
||||
bias-corrected), with optional renormalization and scaling.
|
||||
|
||||
Args:
|
||||
hidden_states: [T, H] flattened token representations
|
||||
moe_block: MoE block module (accesses moe_block.gate.* and
|
||||
optional moe_block.n_group, .topk_group, .top_k, .norm_topk_prob,
|
||||
.routed_scaling_factor, .n_routed_experts)
|
||||
|
||||
Returns:
|
||||
router_scores: [T*K] flattened scores (float32)
|
||||
token_indices: [T*K] which token each entry belongs to (int32), sorted ascending
|
||||
expert_indices: [T*K] which expert (int32)
|
||||
router_logits: [T, E] original logits for aux loss
|
||||
"""
|
||||
gate = moe_block.gate
|
||||
T, H = hidden_states.shape
|
||||
K = moe_block.top_k
|
||||
E = getattr(moe_block, "n_routed_experts", gate.weight.shape[0])
|
||||
n_group = getattr(moe_block, "n_group", 1)
|
||||
|
||||
# Compute router logits and sigmoid probabilities
|
||||
router_logits = F.linear(hidden_states.float(), gate.weight.float()) # [T, E]
|
||||
router_probs = router_logits.sigmoid() # [T, E]
|
||||
|
||||
# Bias-corrected scores for expert selection (not used for final weights).
|
||||
# glm_moe_dsa/deepseek_v3 store the bias on gate; minimax_m2 stores it on the block.
|
||||
e_score_correction_bias = getattr(gate, "e_score_correction_bias", None)
|
||||
if e_score_correction_bias is None:
|
||||
e_score_correction_bias = getattr(moe_block, "e_score_correction_bias", None)
|
||||
if e_score_correction_bias is None:
|
||||
raise AttributeError(
|
||||
f"sigmoid_topk_routing requires e_score_correction_bias on "
|
||||
f"gate ({type(gate)}) or moe_block ({type(moe_block)}), but neither has it"
|
||||
)
|
||||
scores_for_choice = router_probs + e_score_correction_bias
|
||||
|
||||
# Group-based selection: pick top groups, mask the rest (skip when n_group == 1)
|
||||
if n_group > 1:
|
||||
group_scores = (
|
||||
scores_for_choice.view(-1, n_group, E // n_group)
|
||||
.topk(2, dim=-1)[0]
|
||||
.sum(dim=-1)
|
||||
) # [T, n_group]
|
||||
group_idx = torch.topk(
|
||||
group_scores, k=moe_block.topk_group, dim=-1, sorted=False
|
||||
)[1]
|
||||
group_mask = torch.zeros_like(group_scores)
|
||||
group_mask.scatter_(1, group_idx, 1)
|
||||
score_mask = (
|
||||
group_mask.unsqueeze(-1).expand(-1, n_group, E // n_group).reshape(-1, E)
|
||||
)
|
||||
scores_for_choice = scores_for_choice.masked_fill(~score_mask.bool(), 0.0)
|
||||
|
||||
# Final topk from (possibly masked) scores
|
||||
topk_indices = torch.topk(scores_for_choice, k=K, dim=-1, sorted=False)[1]
|
||||
|
||||
# Gather weights from original sigmoid scores (not bias-corrected)
|
||||
topk_weights = router_probs.gather(1, topk_indices)
|
||||
|
||||
# Optional renormalization + scaling
|
||||
norm_topk_prob = getattr(moe_block, "norm_topk_prob", True)
|
||||
if norm_topk_prob:
|
||||
topk_weights = topk_weights / (topk_weights.sum(dim=-1, keepdim=True) + 1e-20)
|
||||
routed_scaling_factor = getattr(moe_block, "routed_scaling_factor", 1.0)
|
||||
topk_weights = topk_weights * routed_scaling_factor
|
||||
|
||||
# Flatten for moe_general_routing_inputs.
|
||||
# Token indices are naturally sorted ascending from the [T, K] layout.
|
||||
token_indices = (
|
||||
torch.arange(T, device=hidden_states.device, dtype=torch.int32)
|
||||
.unsqueeze(1)
|
||||
.expand(T, K)
|
||||
)
|
||||
|
||||
flat_scores = topk_weights.to(torch.float32).reshape(-1) # [T*K]
|
||||
flat_token_idx = token_indices.reshape(-1) # [T*K]
|
||||
flat_expert_idx = topk_indices.to(torch.int32).reshape(-1) # [T*K]
|
||||
|
||||
return flat_scores, flat_token_idx, flat_expert_idx, router_logits
|
||||
181
src/axolotl/integrations/kernels/sonicmoe/weight_converter.py
Normal file
181
src/axolotl/integrations/kernels/sonicmoe/weight_converter.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""
|
||||
Custom WeightConverter operations for SonicMoE weight format conversion.
|
||||
|
||||
SonicMoE requires gate_up_proj weights in interleaved format:
|
||||
- Standard (concatenated): [E, 2*I, H] where first I rows are gate, last I rows are up
|
||||
- SonicMoE (interleaved): [E, 2*I, H] where rows alternate [g0, u0, g1, u1, ...]
|
||||
|
||||
These ConversionOps integrate with transformers' WeightConverter system so that
|
||||
weights are transparently converted during loading and reverted during saving.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from transformers.core_model_loading import ConversionOps
|
||||
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
|
||||
def interleave_gate_up(tensor: torch.Tensor) -> torch.Tensor:
|
||||
"""[gate..., up...] -> [g0, u0, g1, u1, ...] along the 2*I dimension."""
|
||||
return rearrange(tensor, "... (two out) h -> ... (out two) h", two=2)
|
||||
|
||||
|
||||
def deinterleave_gate_up(tensor: torch.Tensor) -> torch.Tensor:
|
||||
"""[g0, u0, g1, u1, ...] -> [gate..., up...] along the 2*I dimension."""
|
||||
return rearrange(tensor, "... (out two) h -> ... (two out) h", two=2)
|
||||
|
||||
|
||||
class ConcatenatedToInterleaved(ConversionOps):
|
||||
"""Convert concatenated gate/up projections to interleaved format.
|
||||
|
||||
Input: [E, 2*I, H] with gate=[E, :I, H] and up=[E, I:, H]
|
||||
Output: [E, 2*I, H] with rows alternating [g0, u0, g1, u1, ...]
|
||||
|
||||
This operation is applied along ``dim`` (default 1, the 2*I dimension).
|
||||
"""
|
||||
|
||||
def __init__(self, dim: int = 1):
|
||||
self.dim = dim
|
||||
|
||||
@torch.no_grad()
|
||||
def convert(
|
||||
self,
|
||||
input_dict: dict[str, Any],
|
||||
source_patterns: list[str],
|
||||
target_patterns: list[str],
|
||||
**kwargs,
|
||||
) -> dict[str, torch.Tensor]:
|
||||
target_pattern = self._get_target_pattern(
|
||||
input_dict, source_patterns, target_patterns
|
||||
)
|
||||
tensors = next(iter(input_dict.values()))
|
||||
tensor = tensors[0] if isinstance(tensors, list) else tensors
|
||||
|
||||
interleaved = interleave_gate_up(tensor)
|
||||
|
||||
return {target_pattern: interleaved}
|
||||
|
||||
def _get_target_pattern(
|
||||
self,
|
||||
input_dict: dict[str, Any],
|
||||
source_patterns: list[str],
|
||||
target_patterns: list[str],
|
||||
) -> str:
|
||||
# Follow the same logic as Transpose.get_target_pattern
|
||||
if len(input_dict) != 1:
|
||||
raise ValueError("Undefined Operation encountered!")
|
||||
if len(target_patterns) > 1:
|
||||
if len(source_patterns) == 1:
|
||||
return source_patterns[0]
|
||||
raise ValueError("Undefined Operation encountered!")
|
||||
return target_patterns[0]
|
||||
|
||||
@property
|
||||
def reverse_op(self) -> ConversionOps:
|
||||
return InterleavedToConcatenated(self.dim)
|
||||
|
||||
|
||||
class InterleavedToConcatenated(ConversionOps):
|
||||
"""Convert interleaved gate/up projections back to concatenated format.
|
||||
|
||||
Input: [E, 2*I, H] with rows alternating [g0, u0, g1, u1, ...]
|
||||
Output: [E, 2*I, H] with gate=[E, :I, H] and up=[E, I:, H]
|
||||
|
||||
This is the reverse of ``ConcatenatedToInterleaved``.
|
||||
"""
|
||||
|
||||
def __init__(self, dim: int = 1):
|
||||
self.dim = dim
|
||||
|
||||
@torch.no_grad()
|
||||
def convert(
|
||||
self,
|
||||
input_dict: dict[str, Any],
|
||||
source_patterns: list[str],
|
||||
target_patterns: list[str],
|
||||
**kwargs,
|
||||
) -> dict[str, torch.Tensor]:
|
||||
target_pattern = self._get_target_pattern(
|
||||
input_dict, source_patterns, target_patterns
|
||||
)
|
||||
tensors = next(iter(input_dict.values()))
|
||||
tensor = tensors[0] if isinstance(tensors, list) else tensors
|
||||
|
||||
concatenated = deinterleave_gate_up(tensor)
|
||||
|
||||
return {target_pattern: concatenated}
|
||||
|
||||
def _get_target_pattern(
|
||||
self,
|
||||
input_dict: dict[str, Any],
|
||||
source_patterns: list[str],
|
||||
target_patterns: list[str],
|
||||
) -> str:
|
||||
if len(input_dict) != 1:
|
||||
raise ValueError("Undefined Operation encountered!")
|
||||
if len(target_patterns) > 1:
|
||||
if len(source_patterns) == 1:
|
||||
return source_patterns[0]
|
||||
raise ValueError("Undefined Operation encountered!")
|
||||
return target_patterns[0]
|
||||
|
||||
@property
|
||||
def reverse_op(self) -> ConversionOps:
|
||||
return ConcatenatedToInterleaved(self.dim)
|
||||
|
||||
|
||||
def register_sonicmoe_weight_converter(model_type: str):
|
||||
"""Override the conversion mapping to add interleave step for gate_up_proj.
|
||||
|
||||
Appends a ConcatenatedToInterleaved operation to the existing gate_up_proj
|
||||
converter chain. For example, qwen3_moe's chain becomes:
|
||||
MergeModulelist(dim=0) -> Concatenate(dim=1) -> ConcatenatedToInterleaved(dim=1)
|
||||
|
||||
The reverse is auto-generated for saving:
|
||||
InterleavedToConcatenated(dim=1) -> Chunk(dim=1) -> SplitModulelist(dim=0)
|
||||
"""
|
||||
from transformers.conversion_mapping import (
|
||||
get_checkpoint_conversion_mapping,
|
||||
register_checkpoint_conversion_mapping,
|
||||
)
|
||||
|
||||
existing = get_checkpoint_conversion_mapping(model_type)
|
||||
if existing is None:
|
||||
LOG.warning(
|
||||
f"No conversion mapping found for model type '{model_type}'. "
|
||||
"SonicMoE weight interleaving will not be applied during checkpoint loading."
|
||||
)
|
||||
return
|
||||
|
||||
# Find the gate_up_proj converter and append ConcatenatedToInterleaved
|
||||
patched = False
|
||||
for converter in existing:
|
||||
if hasattr(converter, "operations") and any(
|
||||
"gate_up_proj" in pat for pat in converter.target_patterns
|
||||
):
|
||||
# Guard against double registration (e.g. plugin reloaded)
|
||||
if any(
|
||||
isinstance(op, ConcatenatedToInterleaved) for op in converter.operations
|
||||
):
|
||||
LOG.info(
|
||||
f"SonicMoE weight converter already registered for '{model_type}'"
|
||||
)
|
||||
return
|
||||
converter.operations.append(ConcatenatedToInterleaved(dim=1))
|
||||
patched = True
|
||||
break
|
||||
|
||||
if not patched:
|
||||
LOG.warning(
|
||||
f"Could not find gate_up_proj converter for model type '{model_type}'. "
|
||||
"SonicMoE weight interleaving will not be applied during checkpoint loading."
|
||||
)
|
||||
return
|
||||
|
||||
register_checkpoint_conversion_mapping(model_type, existing, overwrite=True)
|
||||
LOG.info(f"Registered SonicMoE weight converter for model type '{model_type}'")
|
||||
@@ -8,9 +8,6 @@ import sys
|
||||
from axolotl.integrations.base import BasePlugin
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
from .models.base import patch_lce_forward
|
||||
from .utils import patch_with_compile_disable
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
|
||||
@@ -23,10 +20,18 @@ class LigerPlugin(BasePlugin):
|
||||
return "axolotl.integrations.liger.LigerArgs"
|
||||
|
||||
def pre_model_load(self, cfg):
|
||||
# shim: liger-kernel 0.7.0 imports ORPOTrainer from old trl path
|
||||
import trl.trainer
|
||||
from trl.experimental.orpo import ORPOTrainer
|
||||
|
||||
trl.trainer.ORPOTrainer = ORPOTrainer
|
||||
|
||||
if cfg.torch_compile:
|
||||
# torch compile will unnecessarily attempt to optimize the triton kernel unless explicitly disabled
|
||||
import liger_kernel.ops.fused_linear_cross_entropy
|
||||
|
||||
from .utils import patch_with_compile_disable
|
||||
|
||||
patch_with_compile_disable(
|
||||
liger_kernel.ops.fused_linear_cross_entropy,
|
||||
"fused_linear_cross_entropy_forward",
|
||||
@@ -35,6 +40,7 @@ class LigerPlugin(BasePlugin):
|
||||
liger_kernel.ops.fused_linear_cross_entropy,
|
||||
"fused_linear_cross_entropy_backward",
|
||||
)
|
||||
|
||||
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
|
||||
from liger_kernel.transformers.functional import liger_cross_entropy
|
||||
from liger_kernel.transformers.layer_norm import LigerLayerNorm
|
||||
@@ -192,6 +198,8 @@ class LigerPlugin(BasePlugin):
|
||||
)
|
||||
elif cfg.liger_fused_linear_cross_entropy:
|
||||
try:
|
||||
from .models.base import patch_lce_forward
|
||||
|
||||
patch_lce_forward(cfg.model_config_type)
|
||||
LOG.warning_once(
|
||||
f"Applied ONLY liger_fused_linear_cross_entropy genericpatches for model type: {cfg.model_config_type}"
|
||||
|
||||
@@ -25,7 +25,7 @@ def get_lora_parameters(
|
||||
) -> tuple[
|
||||
torch.Tensor,
|
||||
torch.Tensor | None,
|
||||
QuantState | None,
|
||||
QuantState | torch.Tensor | None,
|
||||
torch.Tensor | None,
|
||||
torch.Tensor | None,
|
||||
float | None,
|
||||
@@ -48,9 +48,13 @@ def get_lora_parameters(
|
||||
|
||||
if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged:
|
||||
quant_state = getattr(W, "quant_state", None)
|
||||
if quant_state is None and W.dtype == torch.float8_e4m3fn:
|
||||
quant_state = getattr(base_layer, "weight_scale_inv", None)
|
||||
return W, b, quant_state, None, None, None
|
||||
|
||||
quant_state = getattr(W, "quant_state", None)
|
||||
if quant_state is None and W.dtype == torch.float8_e4m3fn:
|
||||
quant_state = getattr(base_layer, "weight_scale_inv", None)
|
||||
|
||||
active_adapter = (
|
||||
proj.active_adapters[0]
|
||||
@@ -81,7 +85,7 @@ def matmul_lora(
|
||||
X: torch.Tensor,
|
||||
W: torch.Tensor,
|
||||
b: torch.Tensor | None,
|
||||
W_quant: QuantState | None,
|
||||
W_quant: QuantState | torch.Tensor | None,
|
||||
A: torch.Tensor | None,
|
||||
B: torch.Tensor | None,
|
||||
s: float | None,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Dequantization utilities for `bitsandbytes` integration."""
|
||||
"""Dequantization utilities for `bitsandbytes` and FP8 integration."""
|
||||
|
||||
import ctypes
|
||||
|
||||
@@ -15,9 +15,50 @@ CUDA_STREAM: torch.cuda.Stream | None = None
|
||||
HAS_CUDA_STREAM: bool = Version(bnb.__version__) > Version("0.43.3")
|
||||
|
||||
|
||||
def dequantize_fp8(
|
||||
W: torch.Tensor,
|
||||
scale_inv: torch.Tensor,
|
||||
dtype: torch.dtype = torch.bfloat16,
|
||||
) -> torch.Tensor:
|
||||
"""Dequantize FP8 block-quantized weights: W_dequant = W_fp8 * scale_inv.
|
||||
|
||||
Args:
|
||||
W: FP8 weight tensor [out_features, in_features] in float8_e4m3fn.
|
||||
scale_inv: Per-block inverse scale [ceil(out/block), ceil(in/block)]
|
||||
or per-tensor scalar.
|
||||
dtype: Output dtype (default bf16).
|
||||
|
||||
Returns:
|
||||
Dequantized tensor in the specified dtype.
|
||||
"""
|
||||
W_float = W.to(dtype)
|
||||
if scale_inv.numel() == 1:
|
||||
return W_float * scale_inv.to(dtype)
|
||||
if scale_inv.dim() == 2 and W.dim() == 2:
|
||||
sr, sc = scale_inv.shape
|
||||
br = W.shape[0] // sr
|
||||
bc = W.shape[1] // sc
|
||||
# If dimensions are exactly divisible, use fast reshape path
|
||||
if sr * br == W.shape[0] and sc * bc == W.shape[1]:
|
||||
return (
|
||||
W_float.reshape(sr, br, sc, bc) * scale_inv[:, None, :, None].to(dtype)
|
||||
).reshape(W.shape)
|
||||
# Tail-block handling: compute actual block size (ceil division),
|
||||
# tile scale_inv to cover full shape, then crop to W's dimensions
|
||||
br_ceil = -(-W.shape[0] // sr) # ceil(rows / scale_rows) = block_size
|
||||
bc_ceil = -(-W.shape[1] // sc)
|
||||
scale_expanded = (
|
||||
scale_inv.to(dtype)
|
||||
.repeat_interleave(br_ceil, dim=0)
|
||||
.repeat_interleave(bc_ceil, dim=1)
|
||||
)[: W.shape[0], : W.shape[1]]
|
||||
return W_float * scale_expanded
|
||||
return W_float * scale_inv.to(dtype)
|
||||
|
||||
|
||||
def dequantize(
|
||||
W: torch.Tensor,
|
||||
quant_state: QuantState | list | None = None,
|
||||
quant_state: QuantState | list | torch.Tensor | None = None,
|
||||
out: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
@@ -49,6 +90,15 @@ def dequantize(
|
||||
if quant_state is None:
|
||||
return W
|
||||
|
||||
# FP8 path: quant_state is actually scale_inv tensor
|
||||
if W.dtype == torch.float8_e4m3fn:
|
||||
scale_inv = quant_state
|
||||
# Caller may pass W.t() (non-contiguous) — dequantize in original
|
||||
# layout then transpose back so the result shape matches the input.
|
||||
if not W.is_contiguous() and W.dim() == 2:
|
||||
return dequantize_fp8(W.t(), scale_inv).t()
|
||||
return dequantize_fp8(W, scale_inv)
|
||||
|
||||
# Get the target device from input tensor W
|
||||
target_device = W.device
|
||||
|
||||
|
||||
@@ -160,6 +160,18 @@ def load_lora(
|
||||
else:
|
||||
model = get_peft_model(model, lora_config, **model_kwargs)
|
||||
|
||||
# FP8 models: LoRA A/B inherit FP8 dtype from base weights, but training
|
||||
# requires a compute dtype (bf16/fp16). Cast trainable LoRA params.
|
||||
if cfg.torch_dtype:
|
||||
_fp8_cast_dtype = cfg.torch_dtype
|
||||
elif torch.cuda.is_available() and torch.cuda.is_bf16_supported():
|
||||
_fp8_cast_dtype = torch.bfloat16
|
||||
else:
|
||||
_fp8_cast_dtype = torch.float16
|
||||
for _name, param in model.named_parameters():
|
||||
if param.requires_grad and param.dtype == torch.float8_e4m3fn:
|
||||
param.data = param.data.to(_fp8_cast_dtype)
|
||||
|
||||
if rank == 0:
|
||||
try:
|
||||
model.print_trainable_parameters()
|
||||
|
||||
@@ -215,6 +215,8 @@ class ModelLoader:
|
||||
self.model_kwargs["revision"] = self.cfg.revision_of_model
|
||||
if self.cfg.use_kernels:
|
||||
self.model_kwargs["use_kernels"] = self.cfg.use_kernels
|
||||
if "allow_all_kernels" not in self.model_kwargs:
|
||||
self.model_kwargs["allow_all_kernels"] = self.cfg.use_kernels
|
||||
self._set_quantization_config()
|
||||
self._set_attention_config()
|
||||
self._check_model_requirements()
|
||||
@@ -503,6 +505,20 @@ class ModelLoader:
|
||||
elif not is_ds_zero3:
|
||||
self.model_kwargs["device_map"] = device_map
|
||||
|
||||
# quantize_moe_experts quantizes expert weights on-the-fly during loading,
|
||||
# so the actual VRAM usage is much less than bf16 estimates.
|
||||
# When device_map is "auto", accelerate's infer_auto_device_map computes
|
||||
# the device map at bf16 size (before quantization), causing it to offload
|
||||
# layers to CPU, which BnB then rejects. Force single-GPU placement to
|
||||
# prevent this. Only applies to the non-FSDP, non-ZeRO3 path (DDP/single).
|
||||
if getattr(self.cfg, "quantize_moe_experts", False) and device_map in (
|
||||
"auto",
|
||||
None,
|
||||
):
|
||||
self.model_kwargs["device_map"] = {
|
||||
"": int(os.environ.get("LOCAL_RANK", 0))
|
||||
}
|
||||
|
||||
cur_device = get_device_type()
|
||||
if "mps" in str(cur_device):
|
||||
self.model_kwargs["device_map"] = "mps:0"
|
||||
@@ -674,8 +690,8 @@ class ModelLoader:
|
||||
del self.model_kwargs["device_map"]
|
||||
|
||||
transformers.modeling_utils.is_deepspeed_zero3_enabled = lambda: True
|
||||
transformers.integrations.deepspeed.is_deepspeed_zero3_enabled = (
|
||||
lambda: True
|
||||
transformers.integrations.deepspeed.is_deepspeed_zero3_enabled = lambda: (
|
||||
True
|
||||
)
|
||||
|
||||
return hf_ds_cfg
|
||||
@@ -829,8 +845,9 @@ class ModelLoader:
|
||||
def _set_z3_leaf_modules(self):
|
||||
from deepspeed.utils import set_z3_leaf_modules
|
||||
|
||||
if self.cfg.model_config_type in MOE_ARCH_BLOCK:
|
||||
moe_blocks = MOE_ARCH_BLOCK[self.cfg.model_config_type]
|
||||
moe_type = self.cfg.model_config_type_text or self.cfg.model_config_type
|
||||
if moe_type in MOE_ARCH_BLOCK:
|
||||
moe_blocks = MOE_ARCH_BLOCK[moe_type]
|
||||
moe_blocks = [moe_blocks] if isinstance(moe_blocks, str) else moe_blocks
|
||||
set_z3_leaf_modules(
|
||||
self.model,
|
||||
|
||||
@@ -93,11 +93,13 @@ class PatchManager:
|
||||
|
||||
def apply_pre_model_load_patches(self):
|
||||
"""Apply pre-model load patches based on config."""
|
||||
self._deactivate_hf_async_load()
|
||||
self._apply_transformers_patches()
|
||||
# self._apply_flex_attention_patches()
|
||||
self._apply_flash_attention_patches()
|
||||
self._apply_chunked_cross_entropy_patch()
|
||||
self._apply_sageattn_patches()
|
||||
self._apply_flash_attn_4_patches()
|
||||
self._apply_fsdp_patches()
|
||||
self._apply_adapter_patches()
|
||||
self._apply_model_specific_patches()
|
||||
@@ -114,6 +116,8 @@ class PatchManager:
|
||||
self._apply_patch_deepspeed_zero3()
|
||||
self._apply_voxtral_patches()
|
||||
self._apply_apertus_patches()
|
||||
self._apply_trl_vllm_patches()
|
||||
self._apply_trl_trainer_utils_patches()
|
||||
|
||||
def apply_post_plugin_pre_model_load_patches(self):
|
||||
"""Apply post plugin-pre_model_load load patches based on config."""
|
||||
@@ -166,6 +170,13 @@ class PatchManager:
|
||||
|
||||
def _apply_fsdp_patches(self):
|
||||
"""Apply patches for FSDP configurations."""
|
||||
if self.cfg.fsdp_config:
|
||||
from axolotl.monkeypatch.accelerate.fsdp2 import (
|
||||
patch_initialize_missing_keys_for_fsdp,
|
||||
)
|
||||
|
||||
patch_initialize_missing_keys_for_fsdp()
|
||||
|
||||
if self.cfg.context_parallel_size > 1 or (
|
||||
self.cfg.fsdp_config and str(self.cfg.fsdp_version) == "2"
|
||||
):
|
||||
@@ -220,6 +231,15 @@ class PatchManager:
|
||||
|
||||
patch_sageattn()
|
||||
|
||||
def _apply_flash_attn_4_patches(self):
|
||||
"""Auto-apply FA4 when flash_attention is enabled and FA4 is available on SM90+."""
|
||||
if not self.cfg.flash_attention:
|
||||
return
|
||||
|
||||
from axolotl.monkeypatch.attention.flash_attn_4 import patch_flash_attn_4
|
||||
|
||||
patch_flash_attn_4(self.model_config)
|
||||
|
||||
def _apply_model_specific_patches(self):
|
||||
"""Apply patches specific to model architectures."""
|
||||
if (
|
||||
@@ -239,6 +259,31 @@ class PatchManager:
|
||||
|
||||
patch_qwen3_next_modeling_packing()
|
||||
|
||||
if self.cfg.model_config_type == "qwen3_5" and self.cfg.sample_packing:
|
||||
from axolotl.monkeypatch.models.qwen3_5.modeling import (
|
||||
patch_qwen3_5_modeling_packing,
|
||||
)
|
||||
|
||||
patch_qwen3_5_modeling_packing()
|
||||
|
||||
if self.cfg.model_config_type == "qwen3_5_moe" and self.cfg.sample_packing:
|
||||
from axolotl.monkeypatch.models.qwen3_5.modeling import (
|
||||
patch_qwen3_5_moe_modeling_packing,
|
||||
)
|
||||
|
||||
patch_qwen3_5_moe_modeling_packing()
|
||||
|
||||
if (
|
||||
self.cfg.model_config_type in ["qwen3_5", "qwen3_5_moe"]
|
||||
and self.cfg.is_multimodal
|
||||
and self.cfg.flash_attention
|
||||
):
|
||||
from axolotl.monkeypatch.models.qwen3_5.modeling import (
|
||||
patch_qwen3_5_vlm_flash_attention,
|
||||
)
|
||||
|
||||
patch_qwen3_5_vlm_flash_attention()
|
||||
|
||||
if self.cfg.model_config_type == "kimi_linear":
|
||||
from axolotl.monkeypatch.models.kimi_linear.patch_kimi_linear import (
|
||||
patch_kimi_model,
|
||||
@@ -377,17 +422,27 @@ class PatchManager:
|
||||
if self.cfg.load_in_8bit:
|
||||
apply_linear8bitlt_save_patch()
|
||||
|
||||
def _deactivate_hf_async_load(self):
|
||||
"""Load weights synchronously so they can be converted and not OOM."""
|
||||
if self.cfg.load_in_4bit or self.cfg.load_in_8bit:
|
||||
os.environ["HF_DEACTIVATE_ASYNC_LOAD"] = "1"
|
||||
|
||||
def _apply_moe_expert_quantization_patch(self):
|
||||
"""Patch transformers weight loading to quantize MoE expert params on-the-fly."""
|
||||
if not self.cfg.quantize_moe_experts:
|
||||
"""Patch transformers weight loading and PEFT for MoE expert quantization."""
|
||||
has_target_params = bool(getattr(self.cfg, "lora_target_parameters", None))
|
||||
|
||||
if not self.cfg.quantize_moe_experts and not has_target_params:
|
||||
return
|
||||
|
||||
from axolotl.monkeypatch.moe_quant import (
|
||||
patch_moe_quantization_on_load,
|
||||
patch_peft_target_parameters_matching,
|
||||
)
|
||||
|
||||
patch_moe_quantization_on_load(self.cfg)
|
||||
if self.cfg.quantize_moe_experts:
|
||||
from axolotl.monkeypatch.moe_quant import patch_moe_quantization_on_load
|
||||
|
||||
patch_moe_quantization_on_load(self.cfg)
|
||||
|
||||
patch_peft_target_parameters_matching()
|
||||
|
||||
def _finalize_moe_expert_quantization(self, model: PreTrainedModel):
|
||||
@@ -614,6 +669,50 @@ class PatchManager:
|
||||
|
||||
patch_apertus_xielu_activation()
|
||||
|
||||
def _apply_trl_vllm_patches(self):
|
||||
"""Apply TRL vLLM patches for batched weight sync, NaN logprobs fix, and scalar handling."""
|
||||
if (
|
||||
self.cfg.rl
|
||||
and getattr(self.cfg, "trl", None)
|
||||
and getattr(self.cfg.trl, "use_vllm", False)
|
||||
):
|
||||
from axolotl.monkeypatch.trainer.trl_vllm import patch_trl_vllm
|
||||
|
||||
patch_trl_vllm()
|
||||
|
||||
def _apply_trl_trainer_utils_patches(self):
|
||||
"""Replace trl.trainer.utils.{selective_log_softmax, entropy_from_logits} with Triton kernels."""
|
||||
if not self.cfg.rl:
|
||||
return
|
||||
|
||||
try:
|
||||
from axolotl.monkeypatch.trainer.utils import (
|
||||
entropy_from_logits,
|
||||
selective_log_softmax,
|
||||
)
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
LOG.warning("Triton not available — skipping trl.trainer.utils patches")
|
||||
return
|
||||
|
||||
import trl.trainer.utils
|
||||
|
||||
# Guard against repeated calls: only stash the original if trl still
|
||||
# points at its own implementation (not our wrapper).
|
||||
if trl.trainer.utils.selective_log_softmax is not selective_log_softmax:
|
||||
from axolotl.monkeypatch.trainer import utils as _axolotl_trainer_utils
|
||||
|
||||
_axolotl_trainer_utils.selective_log_softmax_original = (
|
||||
trl.trainer.utils.selective_log_softmax
|
||||
)
|
||||
trl.trainer.utils.selective_log_softmax = selective_log_softmax
|
||||
|
||||
if trl.trainer.utils.entropy_from_logits is not entropy_from_logits:
|
||||
trl.trainer.utils.entropy_from_logits = entropy_from_logits
|
||||
|
||||
LOG.info(
|
||||
"Patched trl.trainer.utils with Triton selective_log_softmax and entropy_from_logits"
|
||||
)
|
||||
|
||||
def _apply_scaling_softmax_patch(self, model: PreTrainedModel):
|
||||
"""Apply Scaling Softmax (SSMax) patch. Ref: https://arxiv.org/abs/2501.19399"""
|
||||
if self.cfg.scaling_softmax:
|
||||
|
||||
@@ -55,12 +55,12 @@ def load_processor(cfg: DictDefault, tokenizer: PreTrainedTokenizerBase):
|
||||
)
|
||||
|
||||
processor_kwargs["trust_remote_code"] = cfg.trust_remote_code or False
|
||||
processor_kwargs["tokenizer"] = tokenizer
|
||||
|
||||
processor = processor_cls.from_pretrained(
|
||||
cfg.processor_config,
|
||||
**processor_kwargs,
|
||||
)
|
||||
processor.tokenizer = tokenizer
|
||||
|
||||
# Attempt to load image size from processor if available
|
||||
if (
|
||||
|
||||
@@ -201,7 +201,7 @@ def load_tokenizer(cfg: DictDefault) -> PreTrainedTokenizer:
|
||||
tokenizer.pad_token = LLAMA_DEFAULT_EOS_TOKEN
|
||||
|
||||
if tokenizer.__class__.__name__ == "GPTNeoXTokenizerFast":
|
||||
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
|
||||
tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # nosec B105
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
|
||||
# Mistral's official FA implementation requires left padding
|
||||
|
||||
@@ -252,12 +252,20 @@ def _process_lora_module_for_fsdp(module, fsdp2_kwargs):
|
||||
fully_shard(module.lora_A[active_adapter], **fsdp2_kwargs)
|
||||
if module.lora_B:
|
||||
fully_shard(module.lora_B[active_adapter], **fsdp2_kwargs)
|
||||
if module.lora_embedding_A:
|
||||
fully_shard(module.lora_embedding_A[active_adapter], **fsdp2_kwargs)
|
||||
if module.lora_embedding_B:
|
||||
fully_shard(module.lora_embedding_B[active_adapter], **fsdp2_kwargs)
|
||||
if module.lora_magnitude_vector:
|
||||
fully_shard(module.lora_magnitude_vector[active_adapter], **fsdp2_kwargs)
|
||||
|
||||
# lora_embedding_A/B are ParameterDicts containing nn.Parameter (Tensors),
|
||||
# not nn.Module. fully_shard() only accepts nn.Module, so we cannot shard
|
||||
# individual embedding Parameters. Instead, shard the entire LoraLayer module. fully_shard() can be used hierarchically because it does not
|
||||
# override groups already assigned by fully_shard(), so modules
|
||||
# where fully_shard() was already called are not affected [see https://docs.pytorch.org/docs/stable/distributed.fsdp.fully_shard.html]
|
||||
if module.lora_embedding_A or module.lora_embedding_B:
|
||||
from torch.distributed.fsdp import FSDPModule
|
||||
|
||||
if not isinstance(module, FSDPModule):
|
||||
fully_shard(module, **fsdp2_kwargs)
|
||||
|
||||
return log_bias_dtype_mismatch
|
||||
|
||||
|
||||
@@ -471,6 +479,46 @@ def patch_tied_keys_for_meta_device():
|
||||
)
|
||||
|
||||
|
||||
def patch_initialize_missing_keys_for_fsdp():
|
||||
"""Patch _initialize_missing_keys to skip re-initialization on FSDP non-rank-0.
|
||||
|
||||
When using cpu_ram_efficient_loading, non-rank-0 processes load weights on
|
||||
meta device and move them to CPU as empty tensors. Without this patch,
|
||||
initialize_weights() re-initializes ALL parameters (via guarded init
|
||||
functions), which is slow and uses extra RAM per process.
|
||||
|
||||
The fix marks all params/buffers with _is_hf_initialized=True before calling
|
||||
the original method, so guarded init functions (init.normal_, init.zeros_,
|
||||
etc.) become no-ops on non-rank-0 processes. The real weights arrive later
|
||||
via FSDP broadcast from rank 0.
|
||||
|
||||
Upstream fix: https://github.com/huggingface/transformers/pull/44473
|
||||
Remove this patch once transformers includes the fix in a stable release.
|
||||
"""
|
||||
from transformers import PreTrainedModel
|
||||
from transformers.modeling_utils import is_fsdp_enabled, is_local_dist_rank_0
|
||||
|
||||
if getattr(PreTrainedModel._initialize_missing_keys, "_axolotl_patched", False):
|
||||
return
|
||||
|
||||
_original_initialize_missing_keys = PreTrainedModel._initialize_missing_keys
|
||||
|
||||
def _patched_initialize_missing_keys(self, is_quantized: bool) -> None:
|
||||
if is_fsdp_enabled() and not is_local_dist_rank_0():
|
||||
for key in self.state_dict():
|
||||
try:
|
||||
param_or_buffer = self.get_parameter_or_buffer(key)
|
||||
param_or_buffer._is_hf_initialized = True
|
||||
except AttributeError:
|
||||
pass # may happen when handling pre-quantized weights
|
||||
self._is_hf_initialized = True
|
||||
|
||||
_original_initialize_missing_keys(self, is_quantized)
|
||||
|
||||
PreTrainedModel._initialize_missing_keys = _patched_initialize_missing_keys
|
||||
PreTrainedModel._initialize_missing_keys._axolotl_patched = True
|
||||
|
||||
|
||||
def patch_accelerate_fsdp2():
|
||||
import accelerate
|
||||
|
||||
|
||||
104
src/axolotl/monkeypatch/attention/flash_attn_4.py
Normal file
104
src/axolotl/monkeypatch/attention/flash_attn_4.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""Transparently upgrade FA2 to FA4 when available on SM90+ hardware."""
|
||||
|
||||
import torch
|
||||
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
|
||||
def _get_head_dims(model_config):
|
||||
"""Extract (head_dim, head_dim_v) from a model config.
|
||||
|
||||
Handles composite models (e.g. Qwen3.5 VL) via text_config and
|
||||
MLA models (DeepSeek/Kimi) that have separate Q/V head dimensions.
|
||||
"""
|
||||
cfg = model_config
|
||||
if hasattr(cfg, "text_config"):
|
||||
cfg = cfg.text_config
|
||||
|
||||
# MLA models: Q head_dim = qk_nope + qk_rope, V head_dim = v_head_dim
|
||||
if hasattr(cfg, "qk_nope_head_dim") and hasattr(cfg, "qk_rope_head_dim"):
|
||||
head_dim = cfg.qk_nope_head_dim + cfg.qk_rope_head_dim
|
||||
head_dim_v = getattr(cfg, "v_head_dim", head_dim)
|
||||
return head_dim, head_dim_v
|
||||
|
||||
# Standard models
|
||||
if hasattr(cfg, "head_dim"):
|
||||
return cfg.head_dim, cfg.head_dim
|
||||
if hasattr(cfg, "hidden_size") and hasattr(cfg, "num_attention_heads"):
|
||||
head_dim = cfg.hidden_size // cfg.num_attention_heads
|
||||
return head_dim, head_dim
|
||||
|
||||
return None, None
|
||||
|
||||
|
||||
def patch_flash_attn_4(model_config=None):
|
||||
"""Patch _lazy_imports to redirect FA2 imports to FA4 if available on supported hardware."""
|
||||
if not torch.cuda.is_available():
|
||||
return
|
||||
|
||||
major, _ = torch.cuda.get_device_capability()
|
||||
# Matches flash_attn/cute/interface.py: arch / 10 in [9, 10, 11]
|
||||
if major not in (9, 10, 11):
|
||||
return
|
||||
|
||||
try:
|
||||
from flash_attn.cute import ( # noqa: F401
|
||||
flash_attn_func,
|
||||
flash_attn_varlen_func,
|
||||
)
|
||||
except ImportError:
|
||||
LOG.info(
|
||||
"Flash Attention 4 is available for your GPU and offers faster training speeds. "
|
||||
"To enable: pip install flash-attn-4"
|
||||
)
|
||||
return
|
||||
|
||||
# Validate head dimensions against FA4's own constraints
|
||||
head_dim = None
|
||||
if model_config is not None:
|
||||
head_dim, head_dim_v = _get_head_dims(model_config)
|
||||
if head_dim is not None:
|
||||
try:
|
||||
from flash_attn.cute.interface import _validate_head_dims
|
||||
except ImportError:
|
||||
LOG.warning(
|
||||
"Could not import _validate_head_dims from flash_attn.cute.interface, "
|
||||
"unable to verify head dimension compatibility, falling back to FA2"
|
||||
)
|
||||
return
|
||||
|
||||
# alignment = 16 // element_size; bf16/fp16 = 2 bytes -> alignment = 8
|
||||
alignment = 8
|
||||
try:
|
||||
_validate_head_dims(head_dim, head_dim_v, major, alignment)
|
||||
except AssertionError as exc:
|
||||
LOG.warning(
|
||||
"Model head dimensions not supported by FA4, "
|
||||
"falling back to FA2: %s",
|
||||
exc,
|
||||
)
|
||||
return
|
||||
|
||||
import transformers.modeling_flash_attention_utils as fa_utils
|
||||
|
||||
if getattr(fa_utils._lazy_imports, "_axolotl_patched", False):
|
||||
return
|
||||
|
||||
def _patched_lazy_imports(
|
||||
implementation, attention_wrapper=None, allow_all_kernels=False
|
||||
):
|
||||
return (
|
||||
flash_attn_func,
|
||||
flash_attn_varlen_func,
|
||||
fa_utils._pad_input,
|
||||
fa_utils._unpad_input,
|
||||
)
|
||||
|
||||
_patched_lazy_imports._axolotl_patched = True
|
||||
fa_utils._lazy_imports = _patched_lazy_imports
|
||||
LOG.info(
|
||||
"Flash Attention 4 enabled (head_dim=%s)",
|
||||
head_dim if model_config else "unknown",
|
||||
)
|
||||
@@ -64,15 +64,12 @@ def patch_flex_wrapper(**flex_attn_compile_kwargs):
|
||||
LOG.info(
|
||||
"Compiling flex attention with kwargs: %s. This may take a while...",
|
||||
flex_attn_compile_kwargs,
|
||||
main_process_only=True,
|
||||
)
|
||||
self._compiled_flex_attention = torch.compile(
|
||||
flex_attention,
|
||||
**flex_attn_compile_kwargs,
|
||||
)
|
||||
LOG.info(
|
||||
"Flex attention compiled successfully.", main_process_only=True
|
||||
)
|
||||
LOG.info("Flex attention compiled successfully.")
|
||||
|
||||
self._is_flex_compiled = True
|
||||
|
||||
|
||||
0
src/axolotl/monkeypatch/models/qwen3_5/__init__.py
Normal file
0
src/axolotl/monkeypatch/models/qwen3_5/__init__.py
Normal file
291
src/axolotl/monkeypatch/models/qwen3_5/modeling.py
Normal file
291
src/axolotl/monkeypatch/models/qwen3_5/modeling.py
Normal file
@@ -0,0 +1,291 @@
|
||||
"""Monkeypatch for Qwen3_5 and Qwen3_5Moe models to pass position_ids to linear attention."""
|
||||
|
||||
import importlib
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from axolotl.utils.logging import get_logger
|
||||
|
||||
LOG = get_logger(__name__)
|
||||
|
||||
try:
|
||||
from fla.modules.convolution import (
|
||||
causal_conv1d as fla_causal_conv1d, # FLA >= 0.4.1
|
||||
)
|
||||
except ImportError:
|
||||
try:
|
||||
from fla.modules.conv import causal_conv1d as fla_causal_conv1d # FLA < 0.4.1
|
||||
except ImportError:
|
||||
fla_causal_conv1d = None
|
||||
|
||||
|
||||
def get_cu_seqlens(position_ids):
|
||||
"""
|
||||
Compute cumulative sequence lengths from position_ids for FLA varlen kernels.
|
||||
|
||||
Adapted from transformers.modeling_flash_attention_utils.prepare_fa_kwargs_from_position_ids.
|
||||
https://github.com/huggingface/transformers/blob/0f1b128d3359a26bd18be99c26d7f04fb3cba914/src/transformers/modeling_flash_attention_utils.py#L316
|
||||
|
||||
Qwen3.5 uses MRoPE: position_ids arrive as [axes, B, T]. All axes carry the
|
||||
same temporal positions, so axis 0 is used to recover the [B, T] layout.
|
||||
See: https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen3_5/modeling_qwen3_5.py
|
||||
"""
|
||||
if position_ids.ndim == 3:
|
||||
position_ids = position_ids[0]
|
||||
|
||||
tensor_kwargs = {"dtype": torch.int32, "device": position_ids.device}
|
||||
position_ids = position_ids.view(-1)
|
||||
indices_q = (position_ids == 0).nonzero().view(-1)
|
||||
return torch.cat(
|
||||
(
|
||||
indices_q.to(**tensor_kwargs),
|
||||
torch.tensor(position_ids.size(), **tensor_kwargs),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _inject_fla_kernels(module) -> None:
|
||||
"""Inject FLA kernels into a modeling module, bypassing is_flash_linear_attention_available."""
|
||||
try:
|
||||
from fla.modules import FusedRMSNormGated
|
||||
from fla.ops.gated_delta_rule import (
|
||||
chunk_gated_delta_rule,
|
||||
fused_recurrent_gated_delta_rule,
|
||||
)
|
||||
|
||||
module.FusedRMSNormGated = FusedRMSNormGated
|
||||
module.chunk_gated_delta_rule = chunk_gated_delta_rule
|
||||
module.fused_recurrent_gated_delta_rule = fused_recurrent_gated_delta_rule
|
||||
module.is_fast_path_available = True
|
||||
except ImportError:
|
||||
module.chunk_gated_delta_rule = None
|
||||
module.fused_recurrent_gated_delta_rule = None
|
||||
module.FusedRMSNormGated = None
|
||||
|
||||
|
||||
def _patched_decoder_forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_values=None,
|
||||
cache_position: Optional[torch.LongTensor] = None,
|
||||
**kwargs,
|
||||
) -> torch.FloatTensor:
|
||||
"""Decoder layer forward that passes position_ids through to linear attention."""
|
||||
residual = hidden_states
|
||||
hidden_states = self.input_layernorm(hidden_states)
|
||||
|
||||
if self.layer_type == "linear_attention":
|
||||
hidden_states = self.linear_attn(
|
||||
hidden_states=hidden_states,
|
||||
cache_params=past_key_values,
|
||||
cache_position=cache_position,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
)
|
||||
elif self.layer_type == "full_attention":
|
||||
hidden_states, _ = self.self_attn(
|
||||
hidden_states=hidden_states,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
past_key_values=past_key_values,
|
||||
cache_position=cache_position,
|
||||
position_embeddings=position_embeddings,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
hidden_states = residual + hidden_states
|
||||
|
||||
residual = hidden_states
|
||||
hidden_states = self.post_attention_layernorm(hidden_states)
|
||||
hidden_states = self.mlp(hidden_states)
|
||||
if isinstance(hidden_states, tuple): # MoE returns (hidden_states, router_logits)
|
||||
hidden_states, _ = hidden_states
|
||||
hidden_states = residual + hidden_states
|
||||
|
||||
return hidden_states
|
||||
|
||||
|
||||
def _make_qwen3_5_gated_delta_forward(apply_mask_fn):
|
||||
"""Factory for patched Qwen3_5/Qwen3_5Moe GatedDeltaNet forward with packing support."""
|
||||
|
||||
def patched_forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
cache_params=None,
|
||||
cache_position: Optional[torch.LongTensor] = None,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
):
|
||||
hidden_states = apply_mask_fn(hidden_states, attention_mask)
|
||||
|
||||
batch_size, seq_len, _ = hidden_states.shape
|
||||
|
||||
use_precomputed_states = (
|
||||
cache_params is not None
|
||||
and cache_params.has_previous_state
|
||||
and seq_len == 1
|
||||
and cache_position is not None
|
||||
)
|
||||
|
||||
cu_seqlens = None
|
||||
if not use_precomputed_states and position_ids is not None:
|
||||
cu_seqlens = get_cu_seqlens(position_ids=position_ids)
|
||||
|
||||
if cache_params is not None:
|
||||
conv_state = cache_params.conv_states[self.layer_idx]
|
||||
recurrent_state = cache_params.recurrent_states[self.layer_idx]
|
||||
|
||||
# mixed_qkv stays [B, T, D]; only transposed inside paths that require [B, D, T]
|
||||
mixed_qkv = self.in_proj_qkv(hidden_states) # [B, T, D]
|
||||
|
||||
z = self.in_proj_z(hidden_states)
|
||||
z = z.reshape(batch_size, seq_len, -1, self.head_v_dim)
|
||||
|
||||
b = self.in_proj_b(hidden_states)
|
||||
a = self.in_proj_a(hidden_states)
|
||||
|
||||
if use_precomputed_states:
|
||||
mixed_qkv = self.causal_conv1d_update(
|
||||
mixed_qkv.transpose(1, 2),
|
||||
conv_state,
|
||||
self.conv1d.weight.squeeze(1),
|
||||
self.conv1d.bias,
|
||||
self.activation,
|
||||
).transpose(1, 2)
|
||||
else:
|
||||
if cache_params is not None:
|
||||
mixed_qkv_t = mixed_qkv.transpose(1, 2)
|
||||
cache_params.conv_states[self.layer_idx] = F.pad(
|
||||
mixed_qkv_t,
|
||||
(self.conv_kernel_size - mixed_qkv_t.shape[-1], 0),
|
||||
)
|
||||
|
||||
if fla_causal_conv1d is not None and cu_seqlens is not None:
|
||||
# FLA varlen kernel for packed sequences; input must be contiguous [B, T, D]
|
||||
mixed_qkv, _ = fla_causal_conv1d(
|
||||
x=mixed_qkv,
|
||||
weight=self.conv1d.weight.squeeze(1),
|
||||
bias=self.conv1d.bias,
|
||||
activation=self.activation,
|
||||
cu_seqlens=cu_seqlens,
|
||||
)
|
||||
else:
|
||||
if cu_seqlens is not None and fla_causal_conv1d is None:
|
||||
raise RuntimeError(
|
||||
"Packed sequences require fla.modules.convolution.causal_conv1d "
|
||||
"(cu_seqlens support). Install flash-linear-attention or disable packing."
|
||||
)
|
||||
mixed_qkv = F.silu(
|
||||
self.conv1d(mixed_qkv.transpose(1, 2))[:, :, :seq_len]
|
||||
).transpose(1, 2)
|
||||
|
||||
query, key, value = torch.split(
|
||||
mixed_qkv,
|
||||
[self.key_dim, self.key_dim, self.value_dim],
|
||||
dim=-1,
|
||||
)
|
||||
query = query.reshape(batch_size, seq_len, -1, self.head_k_dim)
|
||||
key = key.reshape(batch_size, seq_len, -1, self.head_k_dim)
|
||||
value = value.reshape(batch_size, seq_len, -1, self.head_v_dim)
|
||||
|
||||
beta = b.sigmoid()
|
||||
g = -self.A_log.float().exp() * F.softplus(a.float() + self.dt_bias)
|
||||
if self.num_v_heads // self.num_k_heads > 1:
|
||||
query = query.repeat_interleave(self.num_v_heads // self.num_k_heads, dim=2)
|
||||
key = key.repeat_interleave(self.num_v_heads // self.num_k_heads, dim=2)
|
||||
|
||||
if not use_precomputed_states:
|
||||
core_attn_out, last_recurrent_state = self.chunk_gated_delta_rule(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
g=g.to(dtype=query.dtype),
|
||||
beta=beta,
|
||||
initial_state=None,
|
||||
output_final_state=cache_params is not None,
|
||||
use_qk_l2norm_in_kernel=True,
|
||||
# torch_chunk_gated_delta_rule fallback does not accept cu_seqlens
|
||||
**({"cu_seqlens": cu_seqlens} if cu_seqlens is not None else {}),
|
||||
)
|
||||
else:
|
||||
core_attn_out, last_recurrent_state = self.recurrent_gated_delta_rule(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
g=g.to(dtype=query.dtype),
|
||||
beta=beta,
|
||||
initial_state=recurrent_state,
|
||||
output_final_state=cache_params is not None,
|
||||
use_qk_l2norm_in_kernel=True,
|
||||
)
|
||||
|
||||
if cache_params is not None:
|
||||
cache_params.recurrent_states[self.layer_idx] = last_recurrent_state
|
||||
|
||||
core_attn_out = core_attn_out.reshape(-1, self.head_v_dim)
|
||||
z = z.reshape(-1, self.head_v_dim)
|
||||
core_attn_out = self.norm(core_attn_out, z)
|
||||
core_attn_out = core_attn_out.reshape(batch_size, seq_len, -1)
|
||||
|
||||
return self.out_proj(core_attn_out)
|
||||
|
||||
return patched_forward
|
||||
|
||||
|
||||
def _apply_packing_patches(model_type: str, cls_prefix: str, forward_factory) -> None:
|
||||
module_name = f"transformers.models.{model_type}.modeling_{model_type}"
|
||||
|
||||
try:
|
||||
module = importlib.import_module(module_name)
|
||||
except ImportError:
|
||||
LOG.warning(f"{model_type} not found in transformers, skipping packing patches")
|
||||
return
|
||||
|
||||
_inject_fla_kernels(module)
|
||||
getattr(module, f"{cls_prefix}DecoderLayer").forward = _patched_decoder_forward
|
||||
gated_cls = getattr(module, f"{cls_prefix}GatedDeltaNet")
|
||||
gated_cls.forward = forward_factory(module.apply_mask_to_padding_states)
|
||||
|
||||
LOG.info(
|
||||
f"Applied {cls_prefix} packing patch "
|
||||
f"(fla_causal_conv1d={'available' if fla_causal_conv1d else 'unavailable'})"
|
||||
)
|
||||
|
||||
|
||||
def patch_qwen3_5_modeling_packing():
|
||||
_apply_packing_patches("qwen3_5", "Qwen3_5", _make_qwen3_5_gated_delta_forward)
|
||||
|
||||
|
||||
def patch_qwen3_5_moe_modeling_packing():
|
||||
_apply_packing_patches(
|
||||
"qwen3_5_moe", "Qwen3_5Moe", _make_qwen3_5_gated_delta_forward
|
||||
)
|
||||
|
||||
|
||||
def patch_qwen3_5_vlm_flash_attention():
|
||||
"""
|
||||
Patch _is_packed_sequence to handle Qwen3.5's 3-D MRoPE position_ids.
|
||||
|
||||
transformers passes position_ids as [axes, B, T] to decoder layers, but
|
||||
_is_packed_sequence only handles 2-D tensors and mis-classifies the 3-D
|
||||
shape as a packed-sequence indicator, causing CUDA errors in the varlen path.
|
||||
"""
|
||||
try:
|
||||
import transformers.modeling_flash_attention_utils as fa_utils
|
||||
|
||||
_original = fa_utils._is_packed_sequence
|
||||
|
||||
def _patched(position_ids, batch_size):
|
||||
if position_ids is not None and position_ids.ndim != 2:
|
||||
return False
|
||||
return _original(position_ids, batch_size)
|
||||
|
||||
fa_utils._is_packed_sequence = _patched
|
||||
LOG.info("Applied Qwen3.5 VLM flash-attention patch (3-D MRoPE position_ids)")
|
||||
except Exception as exc: # pragma: no cover
|
||||
LOG.warning(f"Failed to apply Qwen3.5 VLM flash-attention patch: {exc}")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user