Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ffb307a8a7 | ||
|
|
915c258c6e | ||
|
|
1e58235c38 | ||
|
|
5753c5b89c | ||
|
|
18d78f02cf | ||
|
|
923181aaed | ||
|
|
786f1a3ff9 | ||
|
|
26418e6f9a | ||
|
|
19fe84ef46 | ||
|
|
98730868e7 | ||
|
|
5771a65b88 | ||
|
|
f912d1bb97 | ||
|
|
0250e5f87c | ||
|
|
274c579d81 | ||
|
|
ccd2f12335 | ||
|
|
00e0238501 | ||
|
|
f782957002 | ||
|
|
f2f66f2bb9 | ||
|
|
013474eb70 | ||
|
|
ce74c20109 | ||
|
|
6dc9816722 | ||
|
|
a6bfbe3400 | ||
|
|
74715125b6 | ||
|
|
f0f3bfbdf0 | ||
|
|
022ef7ab4e | ||
|
|
04533b79d4 | ||
|
|
19de29be19 | ||
|
|
ec75aa5889 | ||
|
|
cf4e3fac64 | ||
|
|
69df309cbb | ||
|
|
b436ecf61f | ||
|
|
f137ce50ec | ||
|
|
4131bcf769 | ||
|
|
64fea39978 | ||
|
|
4966496b98 | ||
|
|
66a9e4fced | ||
|
|
15d35b76bb | ||
|
|
0d53e0fe8f | ||
|
|
9344fa5e8c | ||
|
|
c702edae5f | ||
|
|
dfaf76659f | ||
|
|
26a58bb8af | ||
|
|
cec2490903 | ||
|
|
dfa5224908 | ||
|
|
ddafc6ef80 | ||
|
|
f4376748f3 | ||
|
|
ad56e600e3 | ||
|
|
18d9456297 | ||
|
|
da5ede6372 | ||
|
|
6cbca1ffb2 | ||
|
|
2e082d47cc | ||
|
|
b4c6675cd2 | ||
|
|
828131332a | ||
|
|
273a03f85c | ||
|
|
9bbe2cfe0f | ||
|
|
64da8f0044 | ||
|
|
1fa0a98e38 | ||
|
|
8d542d9d63 | ||
|
|
a4565476e0 | ||
|
|
02dc263338 | ||
|
|
2acd3e1242 | ||
|
|
0437c1a4ba | ||
|
|
ef150fd973 | ||
|
|
47ad92c6b9 | ||
|
|
f0fee9c56c | ||
|
|
37d07bd7f7 | ||
|
|
4c81172917 | ||
|
|
cd8c769e84 | ||
|
|
0d60046d08 | ||
|
|
c110e3eb48 | ||
|
|
95c259b3fb | ||
|
|
d1fd505813 | ||
|
|
1334281d50 | ||
|
|
98f230d864 | ||
|
|
02f308351c | ||
|
|
3b91e8174d | ||
|
|
40d906fb33 | ||
|
|
89d5323c13 | ||
|
|
df870f6a8f | ||
|
|
f500aaa490 | ||
|
|
9ec33f52e3 | ||
|
|
b453562c01 | ||
|
|
367f7eb3a6 | ||
|
|
e888e38ce7 | ||
|
|
400120af2d | ||
|
|
459e5f9b16 | ||
|
|
43f6f84269 | ||
|
|
36c4ab11f9 | ||
|
|
2f4e4ef604 | ||
|
|
aee03fc636 | ||
|
|
255b818fbc | ||
|
|
332ee74f32 | ||
|
|
3b0d2ac5c0 | ||
|
|
9462a1bf79 | ||
|
|
8e9386c799 | ||
|
|
740d5a1d31 | ||
|
|
850c1a5f8d | ||
|
|
7fa8ac40cd | ||
|
|
f9748c4dc5 | ||
|
|
33975ce4bc | ||
|
|
e8b962d47f | ||
|
|
856ff12171 |
@@ -2,7 +2,6 @@
|
||||
source = axolotl
|
||||
omit =
|
||||
*/tests/*
|
||||
setup.py
|
||||
|
||||
[report]
|
||||
exclude_lines =
|
||||
|
||||
17
.github/CONTRIBUTING.md
vendored
17
.github/CONTRIBUTING.md
vendored
@@ -29,13 +29,18 @@ PRs are **greatly welcome**!
|
||||
2. Set up the development environment by following the instructions in the [README.md](https://github.com/axolotl-ai-cloud/axolotl/tree/main/README.md) file.
|
||||
3. Explore the codebase, run tests, and verify that everything works as expected.
|
||||
|
||||
Please run below to setup env
|
||||
```bash
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
pre-commit install
|
||||
Please run the below to setup:
|
||||
|
||||
# test
|
||||
pytest tests/
|
||||
```bash
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
uv sync --dev && uv pip install flash-attn --no-build-isolation
|
||||
source .venv/bin/activate
|
||||
|
||||
pre-commit install # install pre-commit hooks
|
||||
|
||||
pytest tests/ # optional; run test suite
|
||||
```
|
||||
|
||||
## How to Contribute
|
||||
|
||||
11
.github/workflows/base.yml
vendored
11
.github/workflows/base.yml
vendored
@@ -39,13 +39,6 @@ jobs:
|
||||
pytorch: 2.6.0
|
||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
dockerfile: "Dockerfile-base"
|
||||
- cuda: "126"
|
||||
cuda_version: 12.6.3
|
||||
cudnn_version: ""
|
||||
python_version: "3.11"
|
||||
pytorch: 2.7.0
|
||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||
dockerfile: "Dockerfile-base"
|
||||
- cuda: "126"
|
||||
cuda_version: 12.6.3
|
||||
cudnn_version: ""
|
||||
@@ -105,7 +98,9 @@ jobs:
|
||||
context: .
|
||||
file: ./docker/${{ matrix.dockerfile }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||
tags: |
|
||||
${{ steps.metadata.outputs.tags }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||
${{ steps.metadata.outputs.tags }}-base-uv-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
build-args: |
|
||||
CUDA_VERSION=${{ matrix.cuda_version }}
|
||||
|
||||
8
.github/workflows/docs.yml
vendored
8
.github/workflows/docs.yml
vendored
@@ -20,10 +20,14 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install jupyter quartodoc
|
||||
python3 -m pip install -e .
|
||||
uv pip install --system jupyter quartodoc
|
||||
uv pip install --system -e .
|
||||
- name: Build autodoc
|
||||
run: quartodoc build
|
||||
- name: Publish to GitHub Pages (and render)
|
||||
|
||||
3
.github/workflows/lint.yml
vendored
3
.github/workflows/lint.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- '**.py'
|
||||
- 'requirements.txt'
|
||||
- 'pyproject.toml'
|
||||
- '.github/workflows/*.yml'
|
||||
- "*.[q]md"
|
||||
- "examples/**/*.y[a]?ml"
|
||||
@@ -23,5 +23,4 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
|
||||
16
.github/workflows/main.yml
vendored
16
.github/workflows/main.yml
vendored
@@ -20,11 +20,6 @@ jobs:
|
||||
python_version: "3.11"
|
||||
pytorch: 2.6.0
|
||||
axolotl_extras:
|
||||
- cuda: 126
|
||||
cuda_version: 12.6.3
|
||||
python_version: "3.11"
|
||||
pytorch: 2.7.0
|
||||
axolotl_extras:
|
||||
- cuda: 126
|
||||
cuda_version: 12.6.3
|
||||
python_version: "3.11"
|
||||
@@ -73,6 +68,8 @@ jobs:
|
||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
||||
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}
|
||||
GIT_REF=${{ github.ref }}
|
||||
GIT_SHA=${{ github.sha }}
|
||||
file: ./docker/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
@@ -93,11 +90,6 @@ jobs:
|
||||
python_version: "3.11"
|
||||
pytorch: 2.6.0
|
||||
axolotl_extras:
|
||||
- cuda: 126
|
||||
cuda_version: 12.6.3
|
||||
python_version: "3.11"
|
||||
pytorch: 2.7.0
|
||||
axolotl_extras:
|
||||
- cuda: 126
|
||||
cuda_version: 12.6.3
|
||||
python_version: "3.11"
|
||||
@@ -148,6 +140,8 @@ jobs:
|
||||
build-args: |
|
||||
BASE_TAG=${{ github.ref_type == 'tag' && 'main' || github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||
CUDA=${{ matrix.cuda }}
|
||||
GIT_REF=${{ github.ref }}
|
||||
GIT_SHA=${{ github.sha }}
|
||||
file: ./docker/Dockerfile-cloud
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
@@ -213,6 +207,8 @@ jobs:
|
||||
build-args: |
|
||||
BASE_TAG=${{ github.ref_type == 'tag' && 'main' || github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||
CUDA=${{ matrix.cuda }}
|
||||
GIT_REF=${{ github.ref }}
|
||||
GIT_SHA=${{ github.sha }}
|
||||
file: ./docker/Dockerfile-cloud-no-tmux
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
|
||||
12
.github/workflows/multi-gpu-e2e.yml
vendored
12
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -4,8 +4,6 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'tests/e2e/multigpu/**.py'
|
||||
- 'requirements.txt'
|
||||
- 'setup.py'
|
||||
- 'pyproject.toml'
|
||||
- '.github/workflows/multi-gpu-e2e.yml'
|
||||
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||
@@ -56,13 +54,17 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==1.0.2 jinja2
|
||||
pip install modal==1.0.2 jinja2 protobuf
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
@@ -72,4 +74,4 @@ jobs:
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.multigpu
|
||||
modal run -m cicd.multigpu
|
||||
|
||||
4
.github/workflows/nightlies.yml
vendored
4
.github/workflows/nightlies.yml
vendored
@@ -52,6 +52,8 @@ jobs:
|
||||
CUDA=${{ matrix.cuda }}
|
||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
||||
GIT_REF=${{ github.ref }}
|
||||
GIT_SHA=${{ github.sha }}
|
||||
file: ./docker/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
@@ -102,6 +104,8 @@ jobs:
|
||||
build-args: |
|
||||
BASE_TAG=${{ github.ref_name }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||
CUDA=${{ matrix.cuda }}
|
||||
GIT_REF=${{ github.ref }}
|
||||
GIT_SHA=${{ github.sha }}
|
||||
file: ./docker/Dockerfile-cloud
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
|
||||
7
.github/workflows/precommit-autoupdate.yml
vendored
7
.github/workflows/precommit-autoupdate.yml
vendored
@@ -18,10 +18,15 @@ jobs:
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Update pre-commit hooks
|
||||
id: update
|
||||
run: |
|
||||
pip install pre-commit
|
||||
uv pip install --system pre-commit
|
||||
pre-commit autoupdate
|
||||
if [[ -n $(git status --porcelain) ]]; then
|
||||
echo "changes=true" >> $GITHUB_OUTPUT
|
||||
|
||||
9
.github/workflows/preview-docs.yml
vendored
9
.github/workflows/preview-docs.yml
vendored
@@ -40,10 +40,15 @@ jobs:
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install jupyter quartodoc
|
||||
python3 -m pip install -e .
|
||||
uv pip install --system jupyter quartodoc
|
||||
uv pip install --system -e .
|
||||
|
||||
- name: Build autodoc
|
||||
run: quartodoc build
|
||||
|
||||
21
.github/workflows/pypi.yml
vendored
21
.github/workflows/pypi.yml
vendored
@@ -38,23 +38,24 @@ jobs:
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 install wheel packaging==23.2
|
||||
pip3 install --no-build-isolation -e .
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
uv pip install --system wheel packaging==23.2
|
||||
uv pip install --system --no-build-isolation -e ".[dev]"
|
||||
|
||||
- name: Extract tag name
|
||||
id: tag
|
||||
run: echo ::set-output name=TAG_NAME::$(echo $GITHUB_REF | cut -d / -f 3)
|
||||
run: echo "TAG_NAME=$(echo "$GITHUB_REF" | cut -d / -f 3)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Update version in setup.py
|
||||
- name: Build package
|
||||
run: |
|
||||
sed -i -E 's/version="([0-9.]+)",/version="${{ steps.tag.outputs.TAG_NAME }}",/g' setup.py
|
||||
|
||||
- name: Build a source dist
|
||||
run: |
|
||||
python setup.py sdist
|
||||
uv pip install --system build
|
||||
python -m build
|
||||
|
||||
- name: Publish package distributions to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
|
||||
52
.github/workflows/tests-nightly.yml
vendored
52
.github/workflows/tests-nightly.yml
vendored
@@ -13,7 +13,6 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
@@ -43,32 +42,30 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
pip3 install torch==${{ matrix.pytorch_version }} torchvision
|
||||
uv pip install --system torch==${{ matrix.pytorch_version }} torchvision
|
||||
|
||||
- name: Update requirements.txt
|
||||
- name: Update pyproject.toml for nightly builds
|
||||
run: |
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt
|
||||
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt
|
||||
sed -i 's#"transformers==.*"#"transformers @ git+https://github.com/huggingface/transformers.git@main"#' pyproject.toml
|
||||
sed -i 's#"peft==.*"#"peft @ git+https://github.com/huggingface/peft.git@main"#' pyproject.toml
|
||||
sed -i 's#"accelerate==.*"#"accelerate @ git+https://github.com/huggingface/accelerate.git@main"#' pyproject.toml
|
||||
sed -i 's#"trl==.*"#"trl @ git+https://github.com/huggingface/trl.git@main"#' pyproject.toml
|
||||
sed -i 's#"datasets==.*"#"datasets @ git+https://github.com/huggingface/datasets.git@main"#' pyproject.toml
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 show torch
|
||||
pip3 install --no-build-isolation -U -e .
|
||||
uv pip show --system torch
|
||||
uv pip install --system --no-build-isolation -e ".[dev]"
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
@@ -84,9 +81,6 @@ jobs:
|
||||
pytest -v --durations=10 tests/patched/
|
||||
pytest -v --durations=10 tests/cli/
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
docker-e2e-tests:
|
||||
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||
@@ -120,13 +114,16 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==1.0.2 jinja2
|
||||
uv pip install --system modal==1.0.2 jinja2
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "BASE_TAG=main-base-uv-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
@@ -136,7 +133,7 @@ jobs:
|
||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.e2e_tests
|
||||
modal run -m cicd.e2e_tests
|
||||
docker-e2e-multigpu-tests:
|
||||
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||
# this job needs to be run on self-hosted GPU runners...
|
||||
@@ -162,13 +159,16 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==1.0.2 jinja2
|
||||
uv pip install --system modal==1.0.2 jinja2
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "BASE_TAG=main-base-uv-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
|
||||
98
.github/workflows/tests.yml
vendored
98
.github/workflows/tests.yml
vendored
@@ -7,18 +7,16 @@ on:
|
||||
- "main"
|
||||
paths:
|
||||
- '**.py'
|
||||
- 'requirements.txt'
|
||||
- 'pyproject.toml'
|
||||
- '.github/workflows/*.yml'
|
||||
- 'requirements-tests.txt'
|
||||
- 'cicd/cicd.sh'
|
||||
- 'cicd/Dockerfile.jinja'
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened, ready_for_review]
|
||||
paths:
|
||||
- '**.py'
|
||||
- 'requirements.txt'
|
||||
- 'pyproject.toml'
|
||||
- '.github/workflows/*.yml'
|
||||
- 'requirements-tests.txt'
|
||||
- 'cicd/cicd.sh'
|
||||
- 'cicd/Dockerfile.jinja'
|
||||
workflow_dispatch:
|
||||
@@ -41,7 +39,6 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
@@ -72,24 +69,25 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
pip3 install torch==${{ matrix.pytorch_version }} torchvision
|
||||
uv pip install --system torch==${{ matrix.pytorch_version }} torchvision
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 show torch
|
||||
pip3 install --no-build-isolation -U -e .
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
uv pip show --system torch
|
||||
uv pip install --system wheel
|
||||
printf "torch==${{ matrix.pytorch_version }}\n" > torch-constraints.txt
|
||||
uv pip install --system --no-cache-dir --no-build-isolation -e ".[dev]" --constraints torch-constraints.txt
|
||||
set -o pipefail
|
||||
python scripts/unsloth_install.py | bash
|
||||
python scripts/cutcrossentropy_install.py | bash
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
@@ -105,10 +103,10 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
pytest -v --durations=10 -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ --ignore=tests/monkeypatch/ tests/ --cov=axolotl --cov-report=xml
|
||||
pytest -v --durations=10 tests/monkeypatch/ --cov=axolotl --cov-append --cov-report=xml
|
||||
pytest -v --durations=10 tests/patched/ --cov=axolotl --cov-append --cov-report=xml
|
||||
pytest -v --durations=10 tests/cli/ --cov=axolotl --cov-append --cov-report=xml
|
||||
python -m pytest -v --durations=10 -n 8 --dist loadfile --cov=axolotl --cov-report=xml --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ --ignore=tests/monkeypatch/ tests/
|
||||
python -m pytest -v --durations=10 -n 8 --cov=axolotl --cov-append --cov-report=xml tests/monkeypatch/
|
||||
python -m pytest -v --durations=10 -n 8 --cov=axolotl --cov-append --cov-report=xml tests/patched/
|
||||
python -m pytest -v --durations=10 -n 8 --cov=axolotl --cov-append --cov-report=xml tests/cli/
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
@@ -118,9 +116,6 @@ jobs:
|
||||
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||
fail_ci_if_error: false
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
pytest-sdist:
|
||||
name: PyTest from Source Dist
|
||||
@@ -147,25 +142,26 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: upgrade pip
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 setuptools_scm build wheel
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Install PyTorch
|
||||
run: |
|
||||
pip3 install torch==${{ matrix.pytorch_version }} torchvision
|
||||
uv pip install --system torch==${{ matrix.pytorch_version }} torchvision
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 show torch
|
||||
python -m build --no-isolation --sdist
|
||||
pip3 install --no-build-isolation dist/axolotl*.tar.gz
|
||||
uv pip show --system torch
|
||||
uv pip install --system wheel build setuptools_scm
|
||||
python -m build --sdist
|
||||
printf "torch==${{ matrix.pytorch_version }}\n" > torch-constraints.txt
|
||||
tarball_path=$(echo dist/axolotl*.tar.gz)
|
||||
uv pip install --no-cache-dir --no-build-isolation --system "${tarball_path}[dev]" --constraints torch-constraints.txt
|
||||
python scripts/unsloth_install.py | sh
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
- name: Make sure PyTorch version wasn't clobbered
|
||||
run: |
|
||||
@@ -180,13 +176,9 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
pytest -v --durations=10 -n8 --dist loadfile --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ --ignore=tests/monkeypatch/ tests/ --cov=axolotl --cov-report=xml
|
||||
pytest -v --durations=10 tests/monkeypatch/ --cov=axolotl --cov-append --cov-report=xml
|
||||
pytest -v --durations=10 tests/cli/
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
python -m pytest -v --durations=10 -n 8 --dist loadfile --cov=axolotl --cov-report=xml --ignore=tests/e2e/ --ignore=tests/patched/ --ignore=tests/cli/ --ignore=tests/monkeypatch/ tests/
|
||||
python -m pytest -v --durations=10 -n 8 --cov=axolotl --cov-append --cov-report=xml tests/monkeypatch/
|
||||
python -m pytest -v --durations=10 -n 8 tests/cli/
|
||||
|
||||
gate-skip-e2e:
|
||||
needs: [pre-commit, pytest, pytest-sdist]
|
||||
@@ -243,7 +235,7 @@ jobs:
|
||||
pytorch: 2.7.1
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
dockerfile: "Dockerfile-uv.jinja"
|
||||
dockerfile: "Dockerfile.jinja"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -251,13 +243,17 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==1.0.2 jinja2
|
||||
pip install modal==1.0.2 jinja2 protobuf
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "BASE_TAG=${{ github.ref_name }}-base-uv-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
@@ -312,13 +308,17 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==1.0.2 jinja2
|
||||
pip install modal==1.0.2 jinja2 protobuf
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "BASE_TAG=${{ github.ref_name }}-base-uv-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
@@ -355,13 +355,17 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
version: "latest"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==1.0.2 jinja2
|
||||
pip install modal==1.0.2 jinja2 protobuf
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "BASE_TAG=${{ github.ref_name }}-base-uv-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -191,5 +191,5 @@ out/
|
||||
# vim
|
||||
*.swp
|
||||
|
||||
# scm auto-versioning
|
||||
# setuptools-scm generated version file
|
||||
src/axolotl/_version.py
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
FROM axolotlai/axolotl-cloud:main-py3.11-cu124-2.6.0
|
||||
|
||||
COPY .runpod/requirements.txt /requirements.txt
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install --upgrade pip && \
|
||||
python3 -m pip install --upgrade -r /requirements.txt
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
||||
/root/.local/bin/uv pip install --system -r /requirements.txt
|
||||
|
||||
# Environment settings
|
||||
ARG BASE_VOLUME="/runpod-volume"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
include requirements.txt
|
||||
include pyproject.toml
|
||||
include README.md
|
||||
include LICENSE
|
||||
include src/setuptools_axolotl_dynamic_dependencies.py
|
||||
include src/axolotl/utils/chat_templates/templates/*.jinja
|
||||
recursive-include axolotl *.py
|
||||
recursive-include src/axolotl *.py
|
||||
|
||||
36
README.md
36
README.md
@@ -65,15 +65,9 @@ Features:
|
||||
- **Flexible Dataset Handling**: Load from local, HuggingFace, and cloud (S3, Azure, GCP, OCI) datasets.
|
||||
- **Cloud Ready**: We ship [Docker images](https://hub.docker.com/u/axolotlai) and also [PyPI packages](https://pypi.org/project/axolotl/) for use on cloud platforms and local hardware.
|
||||
|
||||
|
||||
|
||||
## 🚀 Quick Start - LLM Fine-tuning in Minutes
|
||||
|
||||
**Requirements**:
|
||||
|
||||
- NVIDIA GPU (Ampere or newer for `bf16` and Flash Attention) or AMD GPU
|
||||
- Python 3.11
|
||||
- PyTorch ≥2.6.0
|
||||
**Requirements**: NVIDIA GPU (Ampere+) or AMD GPU, Python 3.11+
|
||||
|
||||
### Google Colab
|
||||
|
||||
@@ -81,15 +75,35 @@ Features:
|
||||
|
||||
### Installation
|
||||
|
||||
#### Using pip
|
||||
#### Project setup (uv add)
|
||||
|
||||
```bash
|
||||
pip3 install -U packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
# Install uv
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# Initialize or enter your project
|
||||
uv init my-project && cd my-project
|
||||
uv add axolotl
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
source .venv/bin/activate
|
||||
|
||||
# Download example axolotl configs, deepspeed configs
|
||||
axolotl fetch examples
|
||||
axolotl fetch deepspeed_configs # OPTIONAL
|
||||
axolotl fetch deepspeed_configs # optional
|
||||
```
|
||||
|
||||
#### Quick try (uv pip)
|
||||
|
||||
```bash
|
||||
# Install uv if needed
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
uv pip install axolotl
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Download example axolotl configs, deepspeed configs
|
||||
axolotl fetch examples
|
||||
axolotl fetch deepspeed_configs # optional
|
||||
```
|
||||
|
||||
#### Using Docker
|
||||
|
||||
@@ -267,6 +267,7 @@ website:
|
||||
- docs/dataset_loading.qmd
|
||||
- docs/qat.qmd
|
||||
- docs/quantize.qmd
|
||||
- docs/optimizations.qmd
|
||||
|
||||
- section: "Core Concepts"
|
||||
contents:
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
FROM axolotlai/axolotl-base-uv:{{ BASE_TAG }}
|
||||
|
||||
ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||
ENV AXOLOTL_EXTRAS="{{ AXOLOTL_EXTRAS }}"
|
||||
ENV AXOLOTL_ARGS="{{ AXOLOTL_ARGS }}"
|
||||
ENV CUDA="{{ CUDA }}"
|
||||
ENV PYTORCH_VERSION="{{ PYTORCH_VERSION }}"
|
||||
ENV GITHUB_REF="{{ GITHUB_REF }}"
|
||||
ENV GITHUB_SHA="{{ GITHUB_SHA }}"
|
||||
ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
|
||||
ENV HF_HOME="{{ HF_HOME }}"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev ibverbs-providers ibverbs-utils infiniband-diags librdmacm-dev librdmacm1 rdmacm-utils slurm-wlm
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
|
||||
WORKDIR /workspace/axolotl
|
||||
|
||||
RUN git fetch origin +$GITHUB_REF && \
|
||||
git checkout FETCH_HEAD
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
|
||||
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt; \
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||
fi
|
||||
|
||||
RUN uv pip install packaging==23.2 setuptools==75.8.0
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
fi
|
||||
|
||||
RUN python scripts/unsloth_install.py --uv | sh
|
||||
RUN python scripts/cutcrossentropy_install.py --uv | sh
|
||||
|
||||
# So we can test the Docker image
|
||||
RUN uv pip install -r requirements-dev.txt -r requirements-tests.txt
|
||||
|
||||
# fix so that git fetch/pull from remote works
|
||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||
git config --get remote.origin.fetch
|
||||
|
||||
# helper for huggingface-login cli
|
||||
RUN git config --global credential.helper store
|
||||
@@ -1,6 +1,10 @@
|
||||
FROM axolotlai/axolotl-base:{{ BASE_TAG }}
|
||||
FROM axolotlai/axolotl-base-uv:{{ BASE_TAG }}
|
||||
|
||||
ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
||||
SHELL ["/bin/bash", "-euxo", "pipefail", "-c"]
|
||||
|
||||
ARG VENV_PYTHON="/workspace/axolotl-venv/bin/python"
|
||||
|
||||
ENV TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||
ENV AXOLOTL_EXTRAS="{{ AXOLOTL_EXTRAS }}"
|
||||
ENV AXOLOTL_ARGS="{{ AXOLOTL_ARGS }}"
|
||||
ENV CUDA="{{ CUDA }}"
|
||||
@@ -9,7 +13,7 @@ ENV GITHUB_REF="{{ GITHUB_REF }}"
|
||||
ENV GITHUB_SHA="{{ GITHUB_SHA }}"
|
||||
ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
|
||||
ENV HF_HOME="{{ HF_HOME }}"
|
||||
ENV AXOLOTL_DATASET_PROCESSES="8"
|
||||
ENV VENV_PYTHON=$VENV_PYTHON
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev ibverbs-providers ibverbs-utils infiniband-diags librdmacm-dev librdmacm1 rdmacm-utils slurm-wlm
|
||||
@@ -25,25 +29,27 @@ RUN git fetch origin +$GITHUB_REF && \
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
|
||||
sed -i 's#^trl.*#trl @ git+https://github.com/huggingface/trl.git@main#' requirements.txt; \
|
||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||
sed -i 's#"transformers[^"]*"#"transformers @ git+https://github.com/huggingface/transformers.git@main"#' pyproject.toml; \
|
||||
sed -i 's#"peft[^"]*"#"peft @ git+https://github.com/huggingface/peft.git@main"#' pyproject.toml; \
|
||||
sed -i 's#"accelerate[^"]*"#"accelerate @ git+https://github.com/huggingface/accelerate.git@main"#' pyproject.toml; \
|
||||
sed -i 's#"trl[^"]*"#"trl @ git+https://github.com/huggingface/trl.git@main"#' pyproject.toml; \
|
||||
sed -i 's#"datasets[^"]*"#"datasets @ git+https://github.com/huggingface/datasets.git@main"#' pyproject.toml; \
|
||||
fi
|
||||
|
||||
RUN pip install packaging==23.2 setuptools==75.8.0
|
||||
RUN uv pip install --python "$VENV_PYTHON" packaging==23.2 setuptools==75.8.0 pip
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
uv pip install --python "$VENV_PYTHON" --no-build-isolation -e .[ring-flash-attn,optimizers,ray,${AXOLOTL_EXTRAS}] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
uv pip install --python "$VENV_PYTHON" --no-build-isolation -e .[ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
fi
|
||||
|
||||
RUN python scripts/unsloth_install.py | sh
|
||||
RUN python scripts/cutcrossentropy_install.py | sh
|
||||
RUN uv pip install --python "$VENV_PYTHON" --no-build-isolation flash-attn $AXOLOTL_ARGS
|
||||
|
||||
RUN "$VENV_PYTHON" scripts/unsloth_install.py | sh
|
||||
RUN "$VENV_PYTHON" scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
# So we can test the Docker image
|
||||
RUN pip install -r requirements-dev.txt -r requirements-tests.txt
|
||||
RUN uv pip install --python "$VENV_PYTHON" -e ".[dev]"
|
||||
|
||||
# fix so that git fetch/pull from remote works
|
||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||
|
||||
16
cicd/cicd.sh
16
cicd/cicd.sh
@@ -4,7 +4,7 @@ set -e
|
||||
python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__"
|
||||
|
||||
# Run unit tests with initial coverage report
|
||||
pytest -v --durations=10 -n8 \
|
||||
uv run pytest -v --durations=10 -n8 \
|
||||
--ignore=tests/e2e/ \
|
||||
--ignore=tests/patched/ \
|
||||
--ignore=tests/cli \
|
||||
@@ -12,36 +12,36 @@ pytest -v --durations=10 -n8 \
|
||||
--cov=axolotl
|
||||
|
||||
# Run lora kernels tests with coverage append
|
||||
pytest -v --durations=10 \
|
||||
uv run pytest -v --durations=10 \
|
||||
/workspace/axolotl/tests/e2e/patched/lora_kernels \
|
||||
--cov=axolotl \
|
||||
--cov-append
|
||||
|
||||
# Run patched tests excluding lora kernels with coverage append
|
||||
pytest --full-trace -vvv --durations=10 \
|
||||
uv run pytest --full-trace -vvv --durations=10 \
|
||||
--ignore=tests/e2e/patched/lora_kernels \
|
||||
/workspace/axolotl/tests/e2e/patched \
|
||||
--cov=axolotl \
|
||||
--cov-append
|
||||
|
||||
# Run solo tests with coverage append
|
||||
pytest -v --durations=10 -n1 \
|
||||
uv run pytest -v --durations=10 -n1 \
|
||||
/workspace/axolotl/tests/e2e/solo/ \
|
||||
--cov=axolotl \
|
||||
--cov-append
|
||||
|
||||
# Run integration tests with coverage append
|
||||
pytest -v --durations=10 \
|
||||
uv run pytest -v --durations=10 \
|
||||
/workspace/axolotl/tests/e2e/integrations/ \
|
||||
--cov=axolotl \
|
||||
--cov-append
|
||||
|
||||
pytest -v --durations=10 /workspace/axolotl/tests/cli \
|
||||
uv run pytest -v --durations=10 /workspace/axolotl/tests/cli \
|
||||
--cov=axolotl \
|
||||
--cov-append
|
||||
|
||||
# Run remaining e2e tests with coverage append and final report
|
||||
pytest -v --durations=10 \
|
||||
uv run pytest -v --durations=10 \
|
||||
--ignore=tests/e2e/solo/ \
|
||||
--ignore=tests/e2e/patched/ \
|
||||
--ignore=tests/e2e/multigpu/ \
|
||||
@@ -52,4 +52,4 @@ pytest -v --durations=10 \
|
||||
--cov-append \
|
||||
--cov-report=xml:e2e-coverage.xml
|
||||
|
||||
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION} || true
|
||||
uv run codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION} || true
|
||||
|
||||
@@ -23,7 +23,7 @@ df_args = {
|
||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.6.0"),
|
||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu126-2.6.0"),
|
||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-uv-py3.11-cu126-2.6.0"),
|
||||
"CUDA": os.environ.get("CUDA", "126"),
|
||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||
|
||||
@@ -23,7 +23,7 @@ df_args = {
|
||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||
"AXOLOTL_ARGS": os.environ.get("AXOLOTL_ARGS", ""),
|
||||
"PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION", "2.6.0"),
|
||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-py3.11-cu126-2.6.0"),
|
||||
"BASE_TAG": os.environ.get("BASE_TAG", "main-base-uv-py3.11-cu126-2.6.0"),
|
||||
"CUDA": os.environ.get("CUDA", "126"),
|
||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
ARG BASE_TAG=main-base
|
||||
FROM axolotlai/axolotl-base:$BASE_TAG
|
||||
ARG BASE_TAG=main-base-uv
|
||||
FROM axolotlai/axolotl-base-uv:$BASE_TAG
|
||||
|
||||
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
||||
ARG AXOLOTL_EXTRAS=""
|
||||
ARG AXOLOTL_ARGS=""
|
||||
ARG CUDA="118"
|
||||
ARG PYTORCH_VERSION="2.1.2"
|
||||
ARG GIT_REF="refs/heads/main"
|
||||
ARG GIT_SHA="HEAD"
|
||||
ARG VENV_PYTHON="/workspace/axolotl-venv/bin/python"
|
||||
|
||||
ENV PYTORCH_VERSION=$PYTORCH_VERSION
|
||||
ENV GIT_REF=$GIT_REF
|
||||
ENV GIT_SHA=$GIT_SHA
|
||||
ENV VENV_PYTHON=$VENV_PYTHON
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev rsync s3fs && \
|
||||
@@ -20,16 +26,19 @@ RUN git clone --depth=1 https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
|
||||
WORKDIR /workspace/axolotl
|
||||
|
||||
# Ensure we are on the expected commit and break Docker cache between revisions
|
||||
RUN git fetch origin "$GIT_REF" && git checkout "$GIT_SHA"
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
uv pip install --python "$VENV_PYTHON" --no-build-isolation -e .[ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
uv pip install --python "$VENV_PYTHON" --no-build-isolation -e .[ring-flash-attn,optimizers,ray] $AXOLOTL_ARGS; \
|
||||
fi && \
|
||||
python scripts/unsloth_install.py | sh && \
|
||||
python scripts/cutcrossentropy_install.py | sh && \
|
||||
pip install pytest && \
|
||||
pip cache purge
|
||||
uv pip install --python "$VENV_PYTHON" --no-build-isolation flash-attn $AXOLOTL_ARGS && \
|
||||
"$VENV_PYTHON" scripts/unsloth_install.py | sh && \
|
||||
"$VENV_PYTHON" scripts/cutcrossentropy_install.py | sh && \
|
||||
uv pip install --python "$VENV_PYTHON" pytest
|
||||
|
||||
# fix so that git fetch/pull from remote works with shallow clone
|
||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||
|
||||
@@ -48,5 +48,5 @@ RUN git lfs install --skip-repo && \
|
||||
pip3 cache purge
|
||||
|
||||
RUN if [ "$PYTORCH_VERSION" = "2.6.0" ] && [ "$CUDA" = "124" ] ; then \
|
||||
FLASH_ATTENTION_FORCE_BUILD="TRUE" pip3 install --no-build-isolation flash-attn==2.8.0.post2; \
|
||||
FLASH_ATTENTION_FORCE_BUILD="TRUE" uv pip install --no-build-isolation flash-attn==2.8.0.post2; \
|
||||
fi
|
||||
|
||||
@@ -12,8 +12,8 @@ EXPOSE 22
|
||||
COPY scripts/cloud-entrypoint.sh /root/cloud-entrypoint.sh
|
||||
COPY scripts/motd /etc/motd
|
||||
|
||||
RUN pip install jupyterlab notebook ipywidgets && \
|
||||
jupyter lab clean
|
||||
RUN uv pip install --python "$VENV_PYTHON" jupyterlab notebook ipywidgets && \
|
||||
"$VENV_PYTHON" -m jupyter lab clean
|
||||
RUN apt update && \
|
||||
apt install --yes --no-install-recommends openssh-server tmux iproute2 nvtop && \
|
||||
rm -rf /var/cache/apt/archives && \
|
||||
|
||||
@@ -12,8 +12,8 @@ EXPOSE 22
|
||||
COPY scripts/cloud-entrypoint.sh /root/cloud-entrypoint.sh
|
||||
COPY scripts/motd /etc/motd
|
||||
|
||||
RUN pip install jupyterlab notebook ipywidgets && \
|
||||
jupyter lab clean
|
||||
RUN uv pip install --python "$VENV_PYTHON" jupyterlab notebook ipywidgets && \
|
||||
"$VENV_PYTHON" -m jupyter lab clean
|
||||
RUN apt update && \
|
||||
apt install --yes --no-install-recommends openssh-server tmux iproute2 nvtop ibverbs-providers ibverbs-utils infiniband-diags librdmacm-dev librdmacm1 rdmacm-utils slurm-wlm && \
|
||||
rm -rf /var/cache/apt/archives && \
|
||||
|
||||
@@ -24,13 +24,14 @@ RUN git fetch origin +$GITHUB_REF && \
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,mamba-ssm,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
uv pip install --no-build-isolation -e .[deepspeed,mamba-ssm,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,mamba-ssm] $AXOLOTL_ARGS; \
|
||||
fi
|
||||
uv pip install --no-build-isolation -e .[deepspeed,mamba-ssm] $AXOLOTL_ARGS; \
|
||||
fi && \
|
||||
uv pip install --no-build-isolation flash-attn $AXOLOTL_ARGS
|
||||
|
||||
# So we can test the Docker image
|
||||
RUN pip install pytest
|
||||
RUN uv pip install pytest
|
||||
|
||||
# fix so that git fetch/pull from remote works
|
||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||
|
||||
@@ -13,6 +13,7 @@ ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
|
||||
ENV PYTHON_VERSION=$PYTHON_VERSION
|
||||
ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
|
||||
ENV UV_TORCH_BACKEND="cu${CUDA}"
|
||||
ENV VENV_PYTHON=/workspace/axolotl-venv/bin/python
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev pkg-config curl && rm -rf /var/lib/apt/lists/* \
|
||||
@@ -29,8 +30,8 @@ RUN uv venv --no-project --relocatable axolotl-venv
|
||||
|
||||
ENV PATH="/workspace/axolotl-venv/bin:${PATH}"
|
||||
|
||||
RUN uv pip install packaging setuptools wheel psutil \
|
||||
&& uv pip install torch==${PYTORCH_VERSION} \
|
||||
&& uv pip install --no-build-isolation "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" \
|
||||
&& uv pip install "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main" \
|
||||
&& uv pip install awscli pydantic
|
||||
RUN uv pip install --python "$VENV_PYTHON" packaging setuptools wheel psutil protobuf grpclib \
|
||||
&& uv pip install --python "$VENV_PYTHON" torch==${PYTORCH_VERSION} \
|
||||
&& uv pip install --python "$VENV_PYTHON" --no-build-isolation "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" \
|
||||
&& uv pip install --python "$VENV_PYTHON" "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main" \
|
||||
&& uv pip install --python "$VENV_PYTHON" awscli pydantic
|
||||
|
||||
@@ -212,6 +212,14 @@ Instead of passing `tools` via the system prompt, an alternative method would be
|
||||
Tools need to follow [JSON schema](https://json-schema.org/learn/getting-started-step-by-step).
|
||||
:::
|
||||
|
||||
::: {.callout-warning}
|
||||
If you have tool arguments with same name but different dtypes (like `"time": string` and `"time": number`), please save `arguments: ` as JSON string to prevent `datasets` from having casting issues.
|
||||
|
||||
```
|
||||
"arguments": "{\"...\": \"...\"}"
|
||||
```
|
||||
:::
|
||||
|
||||
Example config for Llama4:
|
||||
```yaml
|
||||
chat_template: llama4
|
||||
|
||||
@@ -61,7 +61,7 @@ While we recommend `.jsonl`, you can also use the other formats (`csv`, `parquet
|
||||
|
||||
### Pre-training without streaming
|
||||
|
||||
On the rare case that the dataset is small and can be loaded entirely into memory, another approach to running pre-training is to use the `completion` format. This would mean that the entire dataset is pre-tokenized instead of on-demand in streaming.
|
||||
In the case that the dataset is small and can be loaded entirely into memory, another approach to running pre-training is to use the `completion` format. This would mean that the entire dataset is pre-tokenized instead of on-demand in streaming.
|
||||
|
||||
One benefit of this is that the tokenization can be performed separately on a CPU-only machine, and then transferred to a GPU machine for training to save costs.
|
||||
|
||||
|
||||
@@ -72,8 +72,8 @@ datasets:
|
||||
Make sure you have an [editable install](https://setuptools.pypa.io/en/latest/userguide/development_mode.html) of Axolotl, which ensures that changes you make to the code are reflected at runtime. Run the following commands from the root of this project:
|
||||
|
||||
```bash
|
||||
pip3 install packaging
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
uv sync --extra deepspeed
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
#### Remote Hosts
|
||||
@@ -213,8 +213,8 @@ docker run --privileged --gpus '"all"' --shm-size 10g --rm -it --name axolotl --
|
||||
You will now be in the container. Next, perform an editable install of Axolotl:
|
||||
|
||||
```bash
|
||||
pip3 install packaging
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
uv sync --extra deepspeed
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
### Attach To Container
|
||||
|
||||
@@ -140,3 +140,7 @@ description: Frequently asked questions
|
||||
**Q: `ValueError("Backward pass should have cleared tracker of all tensors")`
|
||||
|
||||
> A: This may happen due to edge cases in using the modern OffloadActivations context manager for CUDA streams. If you encounter this error, you may have success using the naive implementation with `offload_activations: legacy` in your YAML.
|
||||
|
||||
**Q: `Error parsing tool_calls arguments as JSON.`
|
||||
|
||||
> A: There is an error parsing string arguments to a dict. Please check your dataset and the error message for more details.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: "FDSP + QLoRA"
|
||||
title: "FSDP + QLoRA"
|
||||
description: Use FSDP with QLoRA to fine-tune large LLMs on consumer GPUs.
|
||||
format:
|
||||
html:
|
||||
@@ -23,6 +23,12 @@ To enable `QLoRA` with `FSDP`, you need to perform the following steps:
|
||||
2. Enable FSDP in your axolotl config, as [described here](multi-gpu.qmd#sec-fsdp).
|
||||
3. Use one of the supported model types: `llama`, `mistral` or `mixtral`.
|
||||
|
||||
## Enabling Swap for FSDP2
|
||||
|
||||
If available memory is insufficient even after FSDP's CPU offloading, you can enable swap memory usage by setting `cpu_offload_pin_memory: false` alongside `offload_params: true` in FSDP config.
|
||||
|
||||
This disables memory pinning, allowing FSDP to use disk swap space as fallback. Disabling memory pinning itself incurs performance overhead, and actually having to use swap adds more, but it may enable training larger models that would otherwise cause OOM errors on resource constrained systems.
|
||||
|
||||
## Example Config
|
||||
|
||||
[examples/llama-2/qlora-fsdp.yml](../examples/llama-2/qlora-fsdp.yml) contains an example of how to enable QLoRA + FSDP in axolotl.
|
||||
|
||||
@@ -29,19 +29,40 @@ Follow the instructions at: [https://pytorch.org/get-started/locally/](https://p
|
||||
For Blackwell GPUs, please use Pytorch 2.7.0 and CUDA 12.8.
|
||||
:::
|
||||
|
||||
### PyPI Installation (Recommended) {#sec-pypi}
|
||||
### uv Installation (Recommended) {#sec-uv-quick}
|
||||
|
||||
```{.bash}
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||
# Install uv if not already installed
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# Add Axolotl to a project (recommended)
|
||||
uv init my-project && cd my-project
|
||||
uv add axolotl
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
For a quick one-off install without creating a project:
|
||||
|
||||
```{.bash}
|
||||
uv pip install axolotl
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
### pip Installation {#sec-pypi}
|
||||
|
||||
```{.bash}
|
||||
pip install --no-build-isolation axolotl[deepspeed]
|
||||
pip install --no-build-isolation flash-attn
|
||||
```
|
||||
|
||||
We use `--no-build-isolation` in order to detect the installed PyTorch version (if
|
||||
installed) in order not to clobber it, and so that we set the correct version of
|
||||
dependencies that are specific to the PyTorch version or other installed
|
||||
co-dependencies.
|
||||
co-dependencies. Flash Attention is resolved separately so it can be built against
|
||||
the environment configured by the previous step.
|
||||
|
||||
### uv Installation {#sec-uv}
|
||||
### Advanced uv Installation {#sec-uv}
|
||||
|
||||
uv is a fast, reliable Python package installer and resolver built in Rust. It offers significant performance improvements over pip and provides better dependency resolution, making it an excellent choice for complex environments.
|
||||
|
||||
@@ -62,28 +83,38 @@ source .venv/bin/activate
|
||||
Install PyTorch
|
||||
- PyTorch 2.6.0 recommended
|
||||
```{.bash}
|
||||
uv pip install packaging setuptools wheel
|
||||
uv pip install torch==2.6.0
|
||||
uv pip install awscli pydantic
|
||||
```
|
||||
|
||||
Install axolotl from PyPi
|
||||
```{.bash}
|
||||
uv pip install --no-build-isolation axolotl[deepspeed,flash-attn]
|
||||
|
||||
uv pip install --no-build-isolation axolotl[deepspeed]
|
||||
# optionally install with vLLM if you're using torch==2.6.0 and want to train w/ GRPO
|
||||
uv pip install --no-build-isolation axolotl[deepspeed,flash-attn,vllm]
|
||||
# uv pip install --no-build-isolation axolotl[deepspeed,vllm]
|
||||
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
### Edge/Development Build {#sec-edge-build}
|
||||
|
||||
For the latest features between releases:
|
||||
|
||||
#### Using uv (recommended)
|
||||
```{.bash}
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh # If not already installed
|
||||
uv sync
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
#### Using pip
|
||||
```{.bash}
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
pip install --no-build-isolation -e '.[deepspeed]'
|
||||
pip install --no-build-isolation flash-attn
|
||||
```
|
||||
|
||||
### Docker {#sec-docker}
|
||||
@@ -141,7 +172,7 @@ For providers supporting Docker:
|
||||
### macOS {#sec-macos}
|
||||
|
||||
```{.bash}
|
||||
pip3 install --no-build-isolation -e '.'
|
||||
uv pip install --no-build-isolation -e '.'
|
||||
```
|
||||
|
||||
See @sec-troubleshooting for Mac-specific issues.
|
||||
@@ -159,10 +190,15 @@ We recommend using WSL2 (Windows Subsystem for Linux) or Docker.
|
||||
1. Install Python ≥3.11
|
||||
2. Install PyTorch: https://pytorch.org/get-started/locally/
|
||||
3. Install Axolotl:
|
||||
```{.bash}
|
||||
pip3 install -U packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn,deepspeed]'
|
||||
```
|
||||
```{.bash}
|
||||
# Option A: add Axolotl to the environment
|
||||
uv add axolotl
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install axolotl
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
4. (Optional) Login to Hugging Face:
|
||||
```{.bash}
|
||||
huggingface-cli login
|
||||
|
||||
@@ -5,10 +5,11 @@ description: "Custom autograd functions and Triton kernels in Axolotl for optimi
|
||||
|
||||
Inspired by [Unsloth](https://github.com/unslothai/unsloth), we've implemented two
|
||||
optimizations for LoRA and QLoRA fine-tuning, supporting both single GPU and multi-GPU
|
||||
(in the DDP and DeepSpeed settings) training. These include (1) SwiGLU and GEGLU activation function
|
||||
Triton kernels, and (2) LoRA MLP and attention custom autograd functions. Our goal was
|
||||
to leverage operator fusion and tensor re-use in order to improve speed and reduce
|
||||
memory usage during the forward and backward passes of these calculations.
|
||||
(including the DDP, DeepSpeed, and FSDP2 settings) training. These include (1) SwiGLU
|
||||
and GEGLU activation function Triton kernels, and (2) LoRA MLP and attention custom
|
||||
autograd functions. Our goal was to leverage operator fusion and tensor re-use in order
|
||||
to improve speed and reduce memory usage during the forward and backward passes of
|
||||
these calculations.
|
||||
|
||||
We currently support several common model architectures, including (but not limited to):
|
||||
|
||||
@@ -131,6 +132,5 @@ computation path.
|
||||
## Future Work
|
||||
|
||||
- Support for additional model architectures
|
||||
- Support for the FSDP setting
|
||||
- Support for dropout and bias
|
||||
- Additional operator fusions
|
||||
|
||||
@@ -95,7 +95,7 @@ chat_template: llava
|
||||
### Mistral-Small-3.1 {#sec-mistral-small-31}
|
||||
|
||||
::: {.callout-tip}
|
||||
Please make sure to install vision lib via `pip install 'mistral-common[opencv]==1.8.5'`
|
||||
Please make sure to install vision lib via `uv pip install 'mistral-common[opencv]==1.8.5'`
|
||||
:::
|
||||
|
||||
```yaml
|
||||
@@ -105,7 +105,7 @@ base_model: mistralai/Mistral-Small-3.1-24B-Instruct-2503
|
||||
### Magistral-Small-2509 {#sec-magistral-small-2509}
|
||||
|
||||
::: {.callout-tip}
|
||||
Please make sure to install vision lib via `pip install 'mistral-common[opencv]==1.8.5'`
|
||||
Please make sure to install vision lib via `uv pip install 'mistral-common[opencv]==1.8.5'`
|
||||
:::
|
||||
|
||||
```yaml
|
||||
@@ -115,7 +115,7 @@ base_model: mistralai/Magistral-Small-2509
|
||||
### Voxtral {#sec-voxtral}
|
||||
|
||||
::: {.callout-tip}
|
||||
Please make sure to install audio lib via `pip3 install librosa==0.11.0 'mistral_common[audio]==1.8.3'`
|
||||
Please make sure to install audio lib via `uv pip install librosa==0.11.0 'mistral_common[audio]==1.8.3'`
|
||||
:::
|
||||
|
||||
```yaml
|
||||
@@ -143,7 +143,7 @@ The model's initial loss and grad norm will be very high. We suspect this to be
|
||||
:::
|
||||
|
||||
::: {.callout-tip}
|
||||
Please make sure to install `timm` via `pip3 install timm==1.0.17`
|
||||
Please make sure to install `timm` via `uv pip install timm==1.0.17`
|
||||
:::
|
||||
|
||||
```yaml
|
||||
@@ -171,7 +171,7 @@ chat_template: qwen2_vl # same as qwen2-vl
|
||||
### SmolVLM2 {#sec-smolvlm2}
|
||||
|
||||
::: {.callout-tip}
|
||||
Please make sure to install `num2words` via `pip3 install num2words==0.5.14`
|
||||
Please make sure to install `num2words` via `uv pip install num2words==0.5.14`
|
||||
:::
|
||||
|
||||
```yaml
|
||||
@@ -181,7 +181,7 @@ base_model: HuggingFaceTB/SmolVLM2-500M-Video-Instruct
|
||||
### LFM2-VL {#sec-lfm2-vl}
|
||||
|
||||
::: {.callout-warning}
|
||||
Please uninstall `causal-conv1d` via `pip3 uninstall -y causal-conv1d`
|
||||
Please uninstall `causal-conv1d` via `uv pip uninstall -y causal-conv1d`
|
||||
:::
|
||||
|
||||
```yaml
|
||||
@@ -222,7 +222,7 @@ For audio loading, you can use the following keys within `content` alongside `"t
|
||||
|
||||
::: {.callout-tip}
|
||||
|
||||
You may need to install `librosa` via `pip3 install librosa==0.11.0`.
|
||||
You may need to install `librosa` via `uv pip install librosa==0.11.0`.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
133
docs/optimizations.qmd
Normal file
133
docs/optimizations.qmd
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
title: Optimizations Guide
|
||||
description: A guide to the performance and memory optimizations available in Axolotl.
|
||||
---
|
||||
|
||||
Axolotl includes numerous optimizations to speed up training, reduce memory usage, and handle large models.
|
||||
|
||||
This guide provides a high-level overview and directs you to the detailed documentation for each feature.
|
||||
|
||||
## Speed Optimizations
|
||||
|
||||
These optimizations focus on increasing training throughput and reducing total training time.
|
||||
|
||||
### Sample Packing
|
||||
|
||||
Improves GPU utilization by combining multiple short sequences into a single packed sequence for training. This requires enabling one of the [attention](#attention-implementations) implementations below.
|
||||
|
||||
- **Config:** `sample_packing: true`
|
||||
- **Learn more:** [Sample Packing](multipack.qmd)
|
||||
|
||||
### Attention Implementations
|
||||
|
||||
Using an optimized attention implementation is critical for training speed.
|
||||
|
||||
- **[Flash Attention 2](https://github.com/Dao-AILab/flash-attention)**: `flash_attention: true`. **(Recommended)** The industry standard for fast attention on modern GPUs. Requires Ampere or higher. For AMD, check [AMD Support](https://github.com/Dao-AILab/flash-attention?tab=readme-ov-file#amd-rocm-support).
|
||||
- **[Flex Attention](https://pytorch.org/blog/flexattention/)**: `flex_attention: true`.
|
||||
- **[SDP Attention](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)**: `sdp_attention: true`. PyTorch's native implementation.
|
||||
- **[Xformers](https://github.com/facebookresearch/xformers)**: `xformers_attention: true`. Works with FP16.
|
||||
|
||||
*Note: You should only enable one attention backend.*
|
||||
|
||||
### LoRA Optimizations
|
||||
|
||||
Leverages optimized kernels to accelerate LoRA training and reduce memory usage.
|
||||
|
||||
- **Learn more:** [LoRA Optimizations Documentation](lora_optims.qmd)
|
||||
|
||||
## Memory Optimizations
|
||||
|
||||
These techniques help you fit larger models or use bigger batch sizes on your existing hardware.
|
||||
|
||||
### Parameter Efficient Finetuning (LoRA & QLoRA)
|
||||
|
||||
Drastically reduces memory by training a small set of "adapter" parameters instead of the full model. This is the most common and effective memory-saving technique.
|
||||
|
||||
- Examples: Find configs with `lora` or `qlora` in the [examples directory](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/llama-3).
|
||||
- Config Reference: See `adapter`, `load_in_4bit`, and `load_in_8bit` in the [Configuration Reference](config-reference.qmd).
|
||||
|
||||
### Gradient Checkpointing & Activation Offloading
|
||||
|
||||
These techniques save VRAM by changing how activations are handled.
|
||||
|
||||
- Gradient Checkpointing: re-computes activations during the backward pass, trading compute time for VRAM.
|
||||
- Activation Offloading: moves activations to CPU RAM or disk, trading I/O overhead for VRAM.
|
||||
- Learn more: [Gradient Checkpointing and Offloading Docs](gradient_checkpointing.qmd)
|
||||
|
||||
### Cut Cross Entropy (CCE)
|
||||
|
||||
Reduces VRAM usage by using an optimized cross-entropy loss calculation.
|
||||
|
||||
- **Learn more:** [Custom Integrations - CCE](custom_integrations.qmd#cut-cross-entropy)
|
||||
|
||||
### Liger Kernels
|
||||
|
||||
Provides efficient Triton kernels to improve training speed and reduce memory usage.
|
||||
|
||||
- **Learn more:** [Custom Integrations - Liger Kernels](custom_integrations.qmd#liger-kernels)
|
||||
|
||||
## Long Context Models
|
||||
|
||||
Techniques to train models on sequences longer than their original context window.
|
||||
|
||||
### RoPE Scaling
|
||||
|
||||
Extends a model's context window by interpolating its Rotary Position Embeddings.
|
||||
|
||||
- **Config:** Pass the `rope_scaling` config under the `overrides_of_model_config: `. To learn how to set RoPE, check the respective model config.
|
||||
|
||||
### Sequence Parallelism
|
||||
|
||||
Splits long sequences across multiple GPUs, enabling training with sequence lengths that would not fit on a single device.
|
||||
|
||||
- **Learn more:** [Sequence Parallelism Documentation](sequence_parallelism.qmd)
|
||||
|
||||
### Artic Long Sequence Training (ALST)
|
||||
|
||||
ALST is a recipe that combines several techniques to train long-context models efficiently. It typically involves:
|
||||
|
||||
- TiledMLP to reduce memory usage in MLP layers.
|
||||
- Tiled Loss functions (like [CCE](#cut-cross-entropy-(cce) or [Liger](#liger-kernels)).
|
||||
- Activation Offloading to CPU.
|
||||
|
||||
- Example: [ALST Example Configuration](https://github.com/axolotl-ai-cloud/axolotl/tree/main/examples/alst)
|
||||
|
||||
## Large Models (Distributed Training)
|
||||
|
||||
To train models that don't fit on a single GPU, you'll need to use a distributed training strategy like FSDP or DeepSpeed. These frameworks shard the model weights, gradients, and optimizer states across multiple GPUs and nodes.
|
||||
|
||||
- **Learn more:** [Multi-GPU Guide](multi-gpu.qmd)
|
||||
- **Learn more:** [Multi-Node Guide](multi-node.qmd)
|
||||
|
||||
### N-D Parallelism (Beta)
|
||||
|
||||
For advanced scaling, Axolotl allows you to compose different parallelism techniques (e.g., Data, Tensor, Sequence Parallelism). This is a powerful approach to train an extremely large model by overcoming multiple bottlenecks at once.
|
||||
|
||||
- **Learn more:** [N-D Parallelism Guide](nd_parallelism.qmd)
|
||||
|
||||
|
||||
## Quantization
|
||||
|
||||
Techniques to reduce the precision of model weights for memory savings.
|
||||
|
||||
### 4-bit Training (QLoRA)
|
||||
|
||||
The recommended approach for quantization-based training. It loads the base model in 4-bit using `bitsandbytes` and then trains QLoRA adapters. See [Adapter Finetuning](#adapter-finetuning-lora-qlora) for details.
|
||||
|
||||
### FP8 Training
|
||||
|
||||
Enables training with 8-bit floating point precision on supported hardware (e.g., NVIDIA Hopper series GPUs) for significant speed and memory gains.
|
||||
|
||||
- **Example:** [Llama 3 FP8 FSDP Example](https://github.com/axolotl-ai-cloud/axolotl/blob/main/examples/llama-3/3b-fp8-fsdp2.yaml)
|
||||
|
||||
### Quantization Aware Training (QAT)
|
||||
|
||||
Simulates quantization effects during training, helping the model adapt and potentially improving the final accuracy of the quantized model.
|
||||
|
||||
- **Learn more:** [QAT Documentation](qat.qmd)
|
||||
|
||||
### GPTQ
|
||||
|
||||
Allows you to finetune LoRA adapters on top of a model that has already been quantized using the GPTQ method.
|
||||
|
||||
- **Example:** [GPTQ LoRA Example](https://github.com/axolotl-ai-cloud/axolotl/blob/main/examples/llama-2/gptq-lora.yml)
|
||||
@@ -30,6 +30,7 @@ qat:
|
||||
```
|
||||
|
||||
We support the following quantization schemas:
|
||||
|
||||
- `Int4WeightOnly` (requires the `fbgemm-gpu` extra when installing Axolotl)
|
||||
- `Int8DynamicActivationInt4Weight`
|
||||
- `Float8DynamicActivationFloat8Weight`
|
||||
|
||||
@@ -49,9 +49,9 @@ When sequence parallelism is enabled:
|
||||
To use sequence parallelism, you need:
|
||||
|
||||
- Multiple GPUs (at least 2)
|
||||
- The `ring-flash-attn` package. Install with:
|
||||
- `pip install axolotl[ring-flash-attn]` (preferred)
|
||||
- `pip install ring-flash-attn>=0.1.4`
|
||||
- The `ring-flash-attn` package. Install with either `uv sync --extra ring-flash-attn`
|
||||
(from a cloned repository) or `uv pip install ring-flash-attn>=0.1.4`.
|
||||
- Flash Attention installed separately with `uv pip install flash-attn --no-build-isolation`.
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
@@ -12,9 +12,14 @@ This guide shows how to fine-tune both the LFM2 and LFM2-VL models with Axolotl.
|
||||
|
||||
Here is an example of how to install from pip:
|
||||
```bash
|
||||
# Ensure you have a compatible version of Pytorch installed
|
||||
pip3 install packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Ensure you have a compatible version of PyTorch installed
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. Run one of the finetuning examples below.
|
||||
@@ -35,7 +40,7 @@ This guide shows how to fine-tune both the LFM2 and LFM2-VL models with Axolotl.
|
||||
|
||||
- **Installation Error**: If you encounter `ImportError: ... undefined symbol ...` or `ModuleNotFoundError: No module named 'causal_conv1d_cuda'`, the `causal-conv1d` package may have been installed incorrectly. Try uninstalling it:
|
||||
```bash
|
||||
pip uninstall -y causal-conv1d
|
||||
uv pip uninstall -y causal-conv1d
|
||||
```
|
||||
|
||||
- **Dataset Loading**: Read more on how to load your own dataset in our [documentation](https://docs.axolotl.ai/docs/dataset_loading.html).
|
||||
|
||||
@@ -7,3 +7,24 @@ techniques. It is a combination of:
|
||||
- Activation Offloading: Offload activations to CPU RAM to reduce memory usage
|
||||
|
||||
For more information, you can check out the ALST paper [here](https://www.arxiv.org/abs/2506.13996).
|
||||
|
||||
## Usage
|
||||
|
||||
```yaml
|
||||
tiled_mlp: true
|
||||
|
||||
# See Sequence Parallelism docs
|
||||
# https://docs.axolotl.ai/docs/sequence_parallelism.html
|
||||
context_parallel_size: int
|
||||
|
||||
plugins:
|
||||
# See Cut Cross Entropy docs
|
||||
# https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
||||
|
||||
# or Liger Kernel docs
|
||||
# https://docs.axolotl.ai/docs/custom_integrations.html#liger-kernels
|
||||
- axolotl.integrations.liger.LigerPlugin
|
||||
# ...
|
||||
|
||||
```
|
||||
|
||||
@@ -15,8 +15,8 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
uv sync
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
@@ -31,7 +31,7 @@ python scripts/cutcrossentropy_install.py | sh
|
||||
# For those using our Docker image, use the below path.
|
||||
export CUDA_HOME=/usr/local/cuda
|
||||
|
||||
pip3 install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
uv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
```
|
||||
|
||||
For any installation errors, see [XIELU Installation Issues](#xielu-installation-issues)
|
||||
@@ -67,7 +67,7 @@ If those didn't help, please try the below solutions:
|
||||
1. Pass env for CMAKE and try install again:
|
||||
|
||||
```bash
|
||||
Python_EXECUTABLE=$(which python) pip3 install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
Python_EXECUTABLE=$(which python) uv pip install git+https://github.com/nickjbrowning/XIELU@59d6031 --no-build-isolation --no-deps
|
||||
```
|
||||
|
||||
2. Git clone the repo and manually hardcode python path:
|
||||
@@ -92,7 +92,7 @@ If those didn't help, please try the below solutions:
|
||||
```
|
||||
|
||||
```bash
|
||||
pip3 install . --no-build-isolation --no-deps
|
||||
uv pip install . --no-build-isolation --no-deps
|
||||
```
|
||||
|
||||
## Optimization Guides
|
||||
|
||||
@@ -17,8 +17,8 @@ Thanks to the team at Arcee.ai for using Axolotl in supervised fine-tuning the A
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
uv sync
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
"\n",
|
||||
"Axolotl is the most performant LLM post-training framework available, delivering faster training with efficient, consistent and stable performance. Train your workload and ship your product 30% faster; saving you both time and money.\n",
|
||||
"\n",
|
||||
"- ⭐ us on [GitHub](https://github.com/axolotl-ai-cloud/axolotl)\n",
|
||||
"- 📜 Read the [Docs](http://docs.axolotl.ai/)\n",
|
||||
"- 💬 Chat with us on [Discord](https://discord.gg/mnpEYgRUmD)\n",
|
||||
"- 📰 Get updates on [X/Twitter](https://x.com/axolotl_ai)\n"
|
||||
"- \u2b50 us on [GitHub](https://github.com/axolotl-ai-cloud/axolotl)\n",
|
||||
"- \ud83d\udcdc Read the [Docs](http://docs.axolotl.ai/)\n",
|
||||
"- \ud83d\udcac Chat with us on [Discord](https://discord.gg/mnpEYgRUmD)\n",
|
||||
"- \ud83d\udcf0 Get updates on [X/Twitter](https://x.com/axolotl_ai)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -39,8 +39,8 @@
|
||||
"source": [
|
||||
"%%capture\n",
|
||||
"# This step can take ~5-10 minutes to install dependencies\n",
|
||||
"!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1\n",
|
||||
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@c5aa3ef\""
|
||||
"!uv pip install --no-build-isolation axolotl>=0.9.1\n!uv pip install flash-attn --no-build-isolation\n",
|
||||
"!uv pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@147ea28\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1371,7 +1371,7 @@
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"VBox(children=(HTML(value='<center> <img\\nsrc=https://huggingface.co/front/assets/huggingface_logo-noborder.sv…"
|
||||
"VBox(children=(HTML(value='<center> <img\\nsrc=https://huggingface.co/front/assets/huggingface_logo-noborder.sv\u2026"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
@@ -1729,9 +1729,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_12815f401eba44658caa7b2e490137a8",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_30e02aa2d0d241979369e598287f2639",
|
||||
"value": "Drop Samples with Zero Trainable Tokens (num_proc=2): 100%"
|
||||
"value": "Drop\u2007Samples\u2007with\u2007Zero\u2007Trainable\u2007Tokens\u2007(num_proc=2):\u2007100%"
|
||||
}
|
||||
},
|
||||
"083f9cda8d754c168beee10d2f8955a2": {
|
||||
@@ -1774,9 +1774,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_b195f160ca20442fadd8b5aed0ee41af",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_ca65e32eb52f48c09a84b33cb18f22cd",
|
||||
"value": " 11.4M/11.4M [00:00<00:00, 21.8MB/s]"
|
||||
"value": "\u200711.4M/11.4M\u2007[00:00<00:00,\u200721.8MB/s]"
|
||||
}
|
||||
},
|
||||
"0a46ad75c198463d843fb35e813642cb": {
|
||||
@@ -1917,7 +1917,7 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_b1bea589efa14258a9982071b87938bf",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_590eef89881545aa8bbef9a8bbe7fb00",
|
||||
"value": "\n<b>Pro Tip:</b> If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks. </center>"
|
||||
}
|
||||
@@ -1938,9 +1938,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_bfcdbba993b74972a9e3e575f86908ff",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_6ebb2ec171414e47a14765505f64bb3c",
|
||||
"value": " 3.84G/3.84G [00:09<00:00, 664MB/s]"
|
||||
"value": "\u20073.84G/3.84G\u2007[00:09<00:00,\u2007664MB/s]"
|
||||
}
|
||||
},
|
||||
"0e936d9dbf9c4fdd86bbfe9730dedc47": {
|
||||
@@ -2296,9 +2296,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_349eee9f56d64f0cba6fc24ff2c50c9b",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_7e5d3774060e4589aa65982da5ea4ef4",
|
||||
"value": " 9985/9985 [00:04<00:00, 2604.11 examples/s]"
|
||||
"value": "\u20079985/9985\u2007[00:04<00:00,\u20072604.11\u2007examples/s]"
|
||||
}
|
||||
},
|
||||
"16d1283741404b7bb319094c992fce01": {
|
||||
@@ -2317,9 +2317,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_a4e5789584564049b83df7c6c54a3e08",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_ff3a94b146a948b6907f5d80c7157f99",
|
||||
"value": " 9985/0 [00:00<00:00, 50763.46 examples/s]"
|
||||
"value": "\u20079985/0\u2007[00:00<00:00,\u200750763.46\u2007examples/s]"
|
||||
}
|
||||
},
|
||||
"1811cda0644e4190a9469d1774435d82": {
|
||||
@@ -2390,9 +2390,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_e366ae3fceec4566b9ed303d6c5f90af",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_5dd7d150dbe04f08b165ce7f2c27cd11",
|
||||
"value": "model-00008-of-00008.safetensors: 100%"
|
||||
"value": "model-00008-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"19127c7bb1554ccbac877059f9a82db0": {
|
||||
@@ -2561,9 +2561,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_0dea5caa27384f5689e3cab51f558727",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_a6f48410b9964fefba0c3009a77dc838",
|
||||
"value": " 9.68k/9.68k [00:00<00:00, 812kB/s]"
|
||||
"value": "\u20079.68k/9.68k\u2007[00:00<00:00,\u2007812kB/s]"
|
||||
}
|
||||
},
|
||||
"1f7d30f71bbd4547a9150d21da071055": {
|
||||
@@ -2634,9 +2634,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_f4a1795dc7514a718f478245f521f0ba",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_5e746eb25bbe416fb585fa24e79f5177",
|
||||
"value": "model-00002-of-00008.safetensors: 100%"
|
||||
"value": "model-00002-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"20352e5f58d24bb8b1f3940efd14fe4a": {
|
||||
@@ -2707,9 +2707,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_1c6f1f10667545aaab958016ba7e2c94",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_e6e969610738449887259063967f82b0",
|
||||
"value": " 2.78M/2.78M [00:00<00:00, 17.8MB/s]"
|
||||
"value": "\u20072.78M/2.78M\u2007[00:00<00:00,\u200717.8MB/s]"
|
||||
}
|
||||
},
|
||||
"258b7c635c1045329d4669e48c46ccd5": {
|
||||
@@ -3056,9 +3056,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_be724f04b03942b2a033a7e8898bb4fd",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_fcbab4d8dced41a18dfccce81e3a45a0",
|
||||
"value": "model-00005-of-00008.safetensors: 100%"
|
||||
"value": "model-00005-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"3036608c71904ce9ae4bb2a9fa8802d9": {
|
||||
@@ -3077,9 +3077,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_5ca6be24acb548cea130bd58e9954c7c",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_5cfb02ee044b4011a378efa8b54a370f",
|
||||
"value": " 3.96G/3.96G [00:10<00:00, 531MB/s]"
|
||||
"value": "\u20073.96G/3.96G\u2007[00:10<00:00,\u2007531MB/s]"
|
||||
}
|
||||
},
|
||||
"30a81da86f8043eca301e86a8651201a": {
|
||||
@@ -3629,9 +3629,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_8f5bd719974e41c3a8dd9a5b0d3d71e6",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_b87c84de30e84b3abf4871461fb9cbd3",
|
||||
"value": "Loading checkpoint shards: 100%"
|
||||
"value": "Loading\u2007checkpoint\u2007shards:\u2007100%"
|
||||
}
|
||||
},
|
||||
"41f3b32c2f6b4034ae7a3b9124e28bc7": {
|
||||
@@ -3791,7 +3791,7 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_39789237703c4a418134243055c9cbf5",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_a3a945817f684328b34651fe052393ec",
|
||||
"value": "Connecting..."
|
||||
}
|
||||
@@ -4077,9 +4077,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_4d468f96ec924681ad65eb671674b93e",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_ad7599de524549c48bf2d3124ad4b299",
|
||||
"value": "Dropping Long Sequences (num_proc=2): 100%"
|
||||
"value": "Dropping\u2007Long\u2007Sequences\u2007(num_proc=2):\u2007100%"
|
||||
}
|
||||
},
|
||||
"5ca240f31e6b44e3882c5eb37cd5a309": {
|
||||
@@ -4471,9 +4471,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_5e18768f7ad6434ba8b8b8a2e853e204",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_bb33aec33a6447078c31bfd728942994",
|
||||
"value": " 728/728 [00:00<00:00, 20.3kB/s]"
|
||||
"value": "\u2007728/728\u2007[00:00<00:00,\u200720.3kB/s]"
|
||||
}
|
||||
},
|
||||
"62e302ebdad64aada0ffe64ae1c873f3": {
|
||||
@@ -4636,9 +4636,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_81c3db71ac704280ad030072655f1537",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_042e091f75694c47aee761e760e76773",
|
||||
"value": " 9985/9985 [00:02<00:00, 3977.47 examples/s]"
|
||||
"value": "\u20079985/9985\u2007[00:02<00:00,\u20073977.47\u2007examples/s]"
|
||||
}
|
||||
},
|
||||
"67da6c4260574869aa24c3cbc1bc1654": {
|
||||
@@ -4778,7 +4778,7 @@
|
||||
"description_tooltip": null,
|
||||
"disabled": false,
|
||||
"layout": "IPY_MODEL_2e257c8be2da40b4bb67a9e4ab6811f3",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_56e3768bef5a4b9db4168c5c17f509c2",
|
||||
"value": ""
|
||||
}
|
||||
@@ -4823,9 +4823,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_41f3b32c2f6b4034ae7a3b9124e28bc7",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_a10d0a76010f4e508c65a9b69ebc5156",
|
||||
"value": "Tokenizing Prompts (num_proc=2): 100%"
|
||||
"value": "Tokenizing\u2007Prompts\u2007(num_proc=2):\u2007100%"
|
||||
}
|
||||
},
|
||||
"704f2f5a9b1c49d5a75a0025a5dda11b": {
|
||||
@@ -5071,9 +5071,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_93a44a11aa4846fa8efc6c1413ef1627",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_a55060adc3564407ac81ad7297d34aaa",
|
||||
"value": "train.jsonl: 100%"
|
||||
"value": "train.jsonl:\u2007100%"
|
||||
}
|
||||
},
|
||||
"7be6f04c284e4326bb4ff3d301e7b3c6": {
|
||||
@@ -5138,9 +5138,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_7fd44cf9ca6e4726bfd7ac21846d6a14",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_366a343b62fa47d8985a3bd464d99f9e",
|
||||
"value": "config.json: 100%"
|
||||
"value": "config.json:\u2007100%"
|
||||
}
|
||||
},
|
||||
"7cd0b85ebd204b7aba908417811ce4e0": {
|
||||
@@ -5339,9 +5339,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_67da6c4260574869aa24c3cbc1bc1654",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_94b9088614464f60a203de39dbcae853",
|
||||
"value": " 8/8 [01:47<00:00, 11.64s/it]"
|
||||
"value": "\u20078/8\u2007[01:47<00:00,\u200711.64s/it]"
|
||||
}
|
||||
},
|
||||
"823f1c78f15043e38bbd4dca3932a86a": {
|
||||
@@ -5488,7 +5488,7 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_8640ac440fbc4644b9a3af7ba3ae7183",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_5cea7996f02040b187ece0bb2d6a8d1f",
|
||||
"value": "<center> <img\nsrc=https://huggingface.co/front/assets/huggingface_logo-noborder.svg\nalt='Hugging Face'> <br> Copy a token from <a\nhref=\"https://huggingface.co/settings/tokens\" target=\"_blank\">your Hugging Face\ntokens page</a> and paste it below. <br> Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file. </center>"
|
||||
}
|
||||
@@ -5509,9 +5509,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_ef223e8504b64e3592589880326aaf41",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_598da69727bd4fb8b1caf465ac736d7a",
|
||||
"value": " 1.67M/1.67M [00:00<00:00, 19.0MB/s]"
|
||||
"value": "\u20071.67M/1.67M\u2007[00:00<00:00,\u200719.0MB/s]"
|
||||
}
|
||||
},
|
||||
"897b77a56c09479bb11d7f2a30997e55": {
|
||||
@@ -5717,9 +5717,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_37de928300e34184881039378bd75e7f",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_0e936d9dbf9c4fdd86bbfe9730dedc47",
|
||||
"value": " 3.96G/3.96G [00:13<00:00, 273MB/s]"
|
||||
"value": "\u20073.96G/3.96G\u2007[00:13<00:00,\u2007273MB/s]"
|
||||
}
|
||||
},
|
||||
"936d04b5fe1b4c63bf0b080e423d051b": {
|
||||
@@ -6050,9 +6050,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_d955dcaa0e944e719f3a06139dd54a03",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_d3de2662c7964f1ba96e58da382af720",
|
||||
"value": "merges.txt: 100%"
|
||||
"value": "merges.txt:\u2007100%"
|
||||
}
|
||||
},
|
||||
"9cd5211b5d8b457aa0002f1d17b80028": {
|
||||
@@ -6071,9 +6071,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_6932489232ec4ab18a160b1e7fbcdfe1",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_4540927d98f54466b434ba4c0edf045d",
|
||||
"value": "model-00007-of-00008.safetensors: 100%"
|
||||
"value": "model-00007-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"9d4897eefb5f48259ffb2d23e332f752": {
|
||||
@@ -6303,9 +6303,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_3aaecbf540f54a2db9ab0931e3b1fe57",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_9e333ed3b5014069ac1dd969255dd591",
|
||||
"value": " 239/239 [00:00<00:00, 30.9kB/s]"
|
||||
"value": "\u2007239/239\u2007[00:00<00:00,\u200730.9kB/s]"
|
||||
}
|
||||
},
|
||||
"a20927bf5f2c41f58c1e31ac858ab36c": {
|
||||
@@ -6324,9 +6324,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_1811cda0644e4190a9469d1774435d82",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_35c811d2ae8e43f3b5cecbdd3cfa857f",
|
||||
"value": "tokenizer.json: 100%"
|
||||
"value": "tokenizer.json:\u2007100%"
|
||||
}
|
||||
},
|
||||
"a3a945817f684328b34651fe052393ec": {
|
||||
@@ -6360,9 +6360,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_ed5ca967ad5342929e578ac6aa4dc4c0",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_af401d117d5047629d3a6e2361757b62",
|
||||
"value": "model-00001-of-00008.safetensors: 100%"
|
||||
"value": "model-00001-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"a4e5789584564049b83df7c6c54a3e08": {
|
||||
@@ -6494,9 +6494,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fa1282ccc7544e4f818e2f03ccffe4a5",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_bbbf575d2a4b4c6ea8389be79b2a6039",
|
||||
"value": "model.safetensors.index.json: 100%"
|
||||
"value": "model.safetensors.index.json:\u2007100%"
|
||||
}
|
||||
},
|
||||
"ab93eabd7cea4b94b4b7a387f101e8a1": {
|
||||
@@ -6582,9 +6582,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_62e302ebdad64aada0ffe64ae1c873f3",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_bd1b0dfed6d34d16af33a4a58330f5ec",
|
||||
"value": "Saving the dataset (1/1 shards): 100%"
|
||||
"value": "Saving\u2007the\u2007dataset\u2007(1/1\u2007shards):\u2007100%"
|
||||
}
|
||||
},
|
||||
"ad7599de524549c48bf2d3124ad4b299": {
|
||||
@@ -6967,9 +6967,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_2b3a2659b12244bd8548320320016dbf",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_0cd7efffbb3c4c4b972e63749f61ab97",
|
||||
"value": "Generating train split: "
|
||||
"value": "Generating\u2007train\u2007split:\u2007"
|
||||
}
|
||||
},
|
||||
"b87c84de30e84b3abf4871461fb9cbd3": {
|
||||
@@ -7085,9 +7085,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_0f480e3a0b0a45d2a2d2dec3cad923f3",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_fcb30372e7404c5d8a1ad4df91e6c7b2",
|
||||
"value": " 1.91G/1.91G [00:05<00:00, 444MB/s]"
|
||||
"value": "\u20071.91G/1.91G\u2007[00:05<00:00,\u2007444MB/s]"
|
||||
}
|
||||
},
|
||||
"bd1b0dfed6d34d16af33a4a58330f5ec": {
|
||||
@@ -7325,9 +7325,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_158c8b85dbf34de6a94b4e35e2fc7d5a",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_0b4c9753a7cb4354b8e5f187e6e1ad7c",
|
||||
"value": " 3.96G/3.96G [00:15<00:00, 564MB/s]"
|
||||
"value": "\u20073.96G/3.96G\u2007[00:15<00:00,\u2007564MB/s]"
|
||||
}
|
||||
},
|
||||
"c0991cf63ee6458b96e9a75e7a88b61a": {
|
||||
@@ -7346,9 +7346,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_ed28e2e0410d4e0b855467e798e53d66",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_d93f134f802b4b69b575bdaf07dbd27c",
|
||||
"value": "tokenizer_config.json: 100%"
|
||||
"value": "tokenizer_config.json:\u2007100%"
|
||||
}
|
||||
},
|
||||
"c12ea43372ac4d57bb9605f1a429b397": {
|
||||
@@ -7581,9 +7581,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_8bc9d8ba866c442b9118d9630009939c",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_9f56a2d9979c4bd8928c644c22c3ecdf",
|
||||
"value": "model-00003-of-00008.safetensors: 100%"
|
||||
"value": "model-00003-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"c6164e05a1914ae48083db9ad7f4ef7c": {
|
||||
@@ -7694,9 +7694,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_e40d1c1ac9494b3bade9858324e7ffdf",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_d65b6b060d9845779299491ac5599c31",
|
||||
"value": " 9985/9985 [01:04<00:00, 189.08 examples/s]"
|
||||
"value": "\u20079985/9985\u2007[01:04<00:00,\u2007189.08\u2007examples/s]"
|
||||
}
|
||||
},
|
||||
"c7433acd3c4841e6958ae8f7e87b1808": {
|
||||
@@ -7737,9 +7737,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_0077aedc3d174560bce924ee89e9c006",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_00321cce58884f6f9b3855a21fcd9187",
|
||||
"value": "Add position_id column (Sample Packing) (num_proc=2): 100%"
|
||||
"value": "Add\u2007position_id\u2007column\u2007(Sample\u2007Packing)\u2007(num_proc=2):\u2007100%"
|
||||
}
|
||||
},
|
||||
"ca65e32eb52f48c09a84b33cb18f22cd": {
|
||||
@@ -8162,9 +8162,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_63580b6fb30642479fe3000915bf551a",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_8f726dbfb45d4528afa33e36a6313267",
|
||||
"value": " 27.3M/27.3M [00:00<00:00, 31.0MB/s]"
|
||||
"value": "\u200727.3M/27.3M\u2007[00:00<00:00,\u200731.0MB/s]"
|
||||
}
|
||||
},
|
||||
"d43c6df07ddb466587807d6dbe1ff614": {
|
||||
@@ -8183,9 +8183,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_8c4d4fc5a30f4e7cb3be53fe2adda33d",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_e90658f4bcb642baa78426012f863152",
|
||||
"value": "model-00004-of-00008.safetensors: 100%"
|
||||
"value": "model-00004-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"d65b6b060d9845779299491ac5599c31": {
|
||||
@@ -8474,9 +8474,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_34cf3df51fbc41cabfdbba153c007f0e",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_ac764024cf1c4e08ba7749afd2cd20ac",
|
||||
"value": "vocab.json: 100%"
|
||||
"value": "vocab.json:\u2007100%"
|
||||
}
|
||||
},
|
||||
"dfd2a2649b8341ef913207526708aff1": {
|
||||
@@ -8669,9 +8669,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_c6164e05a1914ae48083db9ad7f4ef7c",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_813621384dc748b0ad06775e22761c0b",
|
||||
"value": " 9985/9985 [00:03<00:00, 3622.89 examples/s]"
|
||||
"value": "\u20079985/9985\u2007[00:03<00:00,\u20073622.89\u2007examples/s]"
|
||||
}
|
||||
},
|
||||
"e400cbf14bcc446a9d33b210cd93550b": {
|
||||
@@ -9065,9 +9065,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fba7aa824b38467ab3061b226114cdec",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_f3075dccbd2747b4a7913b66f44f2596",
|
||||
"value": " 3.96G/3.96G [00:13<00:00, 398MB/s]"
|
||||
"value": "\u20073.96G/3.96G\u2007[00:13<00:00,\u2007398MB/s]"
|
||||
}
|
||||
},
|
||||
"ec030fc3c346426f9abc3a89892258d3": {
|
||||
@@ -9110,9 +9110,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_936d04b5fe1b4c63bf0b080e423d051b",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_f1cef8e8dc2646fb9fd09f3b09081074",
|
||||
"value": " 36.5k/36.5k [00:00<00:00, 4.32MB/s]"
|
||||
"value": "\u200736.5k/36.5k\u2007[00:00<00:00,\u20074.32MB/s]"
|
||||
}
|
||||
},
|
||||
"ed28e2e0410d4e0b855467e798e53d66": {
|
||||
@@ -9422,9 +9422,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_735d4f225b24414294fc1b213c61223c",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_5e5e15b0569b474c9620083b3ec6af55",
|
||||
"value": "generation_config.json: 100%"
|
||||
"value": "generation_config.json:\u2007100%"
|
||||
}
|
||||
},
|
||||
"f4667818b9d34a09891cd727a429a610": {
|
||||
@@ -9443,9 +9443,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_4b27c267393640f28f6eae0875bd2ed9",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_9858cb74a09748a39e8149baac96702c",
|
||||
"value": " 3.96G/3.96G [00:11<00:00, 457MB/s]"
|
||||
"value": "\u20073.96G/3.96G\u2007[00:11<00:00,\u2007457MB/s]"
|
||||
}
|
||||
},
|
||||
"f4a1795dc7514a718f478245f521f0ba": {
|
||||
@@ -9830,9 +9830,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_d1f9b10c130542f094c8fd3d1e23b5e9",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_e575d87a7efe4ec7b1efde489839d4a6",
|
||||
"value": "model-00006-of-00008.safetensors: 100%"
|
||||
"value": "model-00006-of-00008.safetensors:\u2007100%"
|
||||
}
|
||||
},
|
||||
"fe18bba7f3fb4c31bf840541f36b3425": {
|
||||
@@ -9873,9 +9873,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_e5a82df528bb4e408797a3b6c2758f4a",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_f113ebd8c1c34806bea4dd7ed3035173",
|
||||
"value": " 9985/9985 [00:00<00:00, 44264.88 examples/s]"
|
||||
"value": "\u20079985/9985\u2007[00:00<00:00,\u200744264.88\u2007examples/s]"
|
||||
}
|
||||
},
|
||||
"fea1b70fb46745feb5111b3929175b5d": {
|
||||
@@ -9931,9 +9931,9 @@
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_ab93eabd7cea4b94b4b7a387f101e8a1",
|
||||
"placeholder": "",
|
||||
"placeholder": "\u200b",
|
||||
"style": "IPY_MODEL_704f2f5a9b1c49d5a75a0025a5dda11b",
|
||||
"value": " 3.96G/3.96G [00:12<00:00, 656MB/s]"
|
||||
"value": "\u20073.96G/3.96G\u2007[00:12<00:00,\u2007656MB/s]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,8 +16,13 @@ Thanks to the team at MistralAI for giving us early access to prepare for this r
|
||||
|
||||
```bash
|
||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. Install [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy) to reduce training VRAM usage
|
||||
|
||||
@@ -10,17 +10,22 @@ Gemma-3n is a family of multimodal models from Google found on [HuggingFace](htt
|
||||
|
||||
```bash
|
||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. In addition to Axolotl's requirements, Gemma-3n requires:
|
||||
|
||||
```bash
|
||||
pip3 install timm==1.0.17
|
||||
uv pip install timm==1.0.17
|
||||
|
||||
# for loading audio data
|
||||
pip3 install librosa==0.11.0
|
||||
uv pip install librosa==0.11.0
|
||||
```
|
||||
|
||||
3. Download sample dataset files
|
||||
|
||||
@@ -12,8 +12,13 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
||||
|
||||
```bash
|
||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. Choose one of the following configs below for training the 20B model. (for 120B, see [below](#training-120b))
|
||||
@@ -75,7 +80,7 @@ for more information about using a special vllm-openai docker image for inferenc
|
||||
Optionally, vLLM can be installed from nightly:
|
||||
|
||||
```bash
|
||||
pip install --no-build-isolation --pre -U vllm --extra-index-url https://wheels.vllm.ai/nightly
|
||||
uv pip install --no-build-isolation --pre -U vllm --extra-index-url https://wheels.vllm.ai/nightly
|
||||
```
|
||||
and the vLLM server can be started with the following command (modify `--tensor-parallel-size 8` to match your environment):
|
||||
```bash
|
||||
|
||||
@@ -13,8 +13,8 @@ Tencent released a family of opensource models called HunYuan with varying param
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
uv sync
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
@@ -66,6 +66,7 @@ fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
# fsdp_cpu_offload_pin_memory: false # uncomment to enable swap memory usage when RAM is insufficient
|
||||
special_tokens:
|
||||
|
||||
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
||||
|
||||
@@ -13,9 +13,14 @@ Thanks to the team at MistralAI for giving us early access to prepare for these
|
||||
Here is an example of how to install from pip:
|
||||
|
||||
```bash
|
||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Ensure you have PyTorch installed (PyTorch 2.6.0 min)
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. Install [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy) to reduce training VRAM usage
|
||||
|
||||
@@ -15,8 +15,8 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
uv sync
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
@@ -24,12 +24,12 @@ python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
2. Install Qwen3-Next transformers commit
|
||||
```bash
|
||||
pip3 uninstall -y transformers && pip3 install "git+https://github.com/huggingface/transformers.git@b9282355bea846b54ed850a066901496b19da654"
|
||||
uv pip uninstall -y transformers && uv pip install "git+https://github.com/huggingface/transformers.git@b9282355bea846b54ed850a066901496b19da654"
|
||||
```
|
||||
|
||||
3. Install FLA for improved performance
|
||||
```bash
|
||||
pip3 uninstall -y causal-conv1d && pip3 install flash-linear-attention==0.3.2
|
||||
uv pip uninstall -y causal-conv1d && uv pip install flash-linear-attention==0.3.2
|
||||
```
|
||||
|
||||
4. Run the finetuning example:
|
||||
@@ -38,7 +38,7 @@ pip3 uninstall -y causal-conv1d && pip3 install flash-linear-attention==0.3.2
|
||||
axolotl train examples/qwen3-next/qwen3-next-80b-a3b-qlora.yaml
|
||||
```
|
||||
|
||||
This config uses about 41.7 GiB VRAM.
|
||||
This config uses about 45.62 GiB VRAM.
|
||||
|
||||
Let us know how it goes. Happy finetuning! 🚀
|
||||
|
||||
|
||||
@@ -27,6 +27,14 @@ lora_r: 16
|
||||
lora_alpha: 8
|
||||
lora_dropout: 0.05
|
||||
lora_target_modules:
|
||||
- linear_attn.in_proj_ba
|
||||
- linear_attn.in_proj_qkvz
|
||||
- linear_attn.out_proj
|
||||
- shared_expert.up_proj
|
||||
- shared_expert.down_proj
|
||||
- shared_expert.gate_proj
|
||||
- shared_expert_gate
|
||||
- mlp.gate
|
||||
- q_proj
|
||||
- v_proj
|
||||
- k_proj
|
||||
|
||||
@@ -15,8 +15,8 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||
cd axolotl
|
||||
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||
uv sync --extra deepspeed
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Install Cut Cross Entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
@@ -13,14 +13,19 @@ This guide shows how to fine-tune SmolVLM2 models with Axolotl.
|
||||
Here is an example of how to install from pip:
|
||||
```bash
|
||||
# Ensure you have a compatible version of Pytorch installed
|
||||
pip3 install packaging setuptools wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. Install an extra dependency:
|
||||
|
||||
```bash
|
||||
pip3 install num2words==0.5.14
|
||||
uv pip install num2words==0.5.14
|
||||
```
|
||||
|
||||
3. Run the finetuning example:
|
||||
|
||||
@@ -12,16 +12,21 @@ Thanks to the team at MistralAI for giving us early access to prepare for this r
|
||||
|
||||
```bash
|
||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||
# Option A: manage dependencies in your project
|
||||
uv add 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
|
||||
# Option B: quick install
|
||||
uv pip install 'axolotl>=0.12.0'
|
||||
uv pip install flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
2. Please install the below.
|
||||
|
||||
```bash
|
||||
# audio
|
||||
pip3 install librosa==0.11.0
|
||||
pip3 install 'mistral_common[audio]==1.8.3'
|
||||
uv pip install librosa==0.11.0
|
||||
uv pip install 'mistral_common[audio]==1.8.3'
|
||||
|
||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
|
||||
197
pyproject.toml
197
pyproject.toml
@@ -1,14 +1,131 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=64", "wheel", "setuptools_scm>=8", "packaging==23.2"]
|
||||
requires = ["setuptools>=64", "wheel", "setuptools_scm>=8"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "axolotl"
|
||||
dynamic = ["version", "dependencies", "optional-dependencies"]
|
||||
dynamic = ["version"]
|
||||
description = "LLM Trainer"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
# license = "Apache-2.0"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
license = {text = "Apache-2.0"}
|
||||
authors = [
|
||||
{name = "Axolotl AI"},
|
||||
]
|
||||
maintainers = [
|
||||
{name = "Axolotl AI"},
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
"torch>=2.6.0",
|
||||
"packaging>=23.2",
|
||||
"huggingface_hub>=0.33.0",
|
||||
"peft==0.17.0",
|
||||
"transformers==4.56.1",
|
||||
"tokenizers>=0.21.1",
|
||||
"accelerate==1.10.1",
|
||||
"datasets==4.0.0",
|
||||
"trl==0.23.0",
|
||||
"hf_xet==1.1.5",
|
||||
"kernels==0.9.0",
|
||||
"trackio",
|
||||
"optimum==1.16.2",
|
||||
"hf_transfer",
|
||||
"sentencepiece",
|
||||
"gradio==5.41.1",
|
||||
"modal==1.0.2",
|
||||
"pydantic>=2.10.6",
|
||||
"addict",
|
||||
"fire",
|
||||
"PyYAML>=6.0",
|
||||
"requests",
|
||||
"wandb",
|
||||
"einops",
|
||||
"colorama",
|
||||
"numba",
|
||||
"numpy>=1.24.4,<3.0",
|
||||
"evaluate==0.4.1",
|
||||
"scipy",
|
||||
"scikit-learn>=1.7.0",
|
||||
"nvidia-ml-py==12.560.30",
|
||||
"art",
|
||||
"tensorboard",
|
||||
"python-dotenv==1.0.1",
|
||||
"s3fs>=2024.5.0",
|
||||
"gcsfs>=2024.5.0",
|
||||
"adlfs>=2024.5.0",
|
||||
"ocifs==1.3.2",
|
||||
"zstandard>=0.23.0",
|
||||
"fastcore",
|
||||
"lm_eval==0.4.7",
|
||||
"langdetect==1.0.9",
|
||||
"immutabledict==4.2.0",
|
||||
"antlr4-python3-runtime==4.13.2",
|
||||
"schedulefree==1.4.1",
|
||||
"mistral-common==1.8.5",
|
||||
|
||||
# Axolotl contribs
|
||||
"axolotl-contribs-lgpl @ git+https://github.com/axolotl-ai-cloud/axolotl-contribs-lgpl.git@numpy",
|
||||
"axolotl-contribs-mit==0.0.5",
|
||||
|
||||
# Platform-specific dependencies (Linux by default, excluded on macOS)
|
||||
"triton>=3.0.0 ; sys_platform != 'darwin'",
|
||||
"xformers>=0.0.28 ; sys_platform != 'darwin'",
|
||||
"autoawq==0.2.7.post3 ; sys_platform != 'darwin'",
|
||||
"liger-kernel==0.6.1 ; sys_platform != 'darwin'",
|
||||
"torchao==0.13.0 ; sys_platform != 'darwin'",
|
||||
"bitsandbytes==0.47.0 ; sys_platform != 'darwin'",
|
||||
"deepspeed>=0.17.5 ; sys_platform != 'darwin'",
|
||||
"deepspeed-kernels ; sys_platform != 'darwin'",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
ring-flash-attn = [
|
||||
"ring-flash-attn>=0.1.7",
|
||||
"yunchang==0.6.0",
|
||||
]
|
||||
mamba-ssm = ["mamba-ssm>=2.2.0", "causal_conv1d>=1.4.0",]
|
||||
gptqmodel = ["gptqmodel>=4.0.0"]
|
||||
mlflow = ["mlflow"]
|
||||
galore = ["galore_torch"]
|
||||
apollo = ["apollo-torch"]
|
||||
optimizers = [
|
||||
"galore_torch",
|
||||
"apollo-torch",
|
||||
"lomo-optim==0.1.1",
|
||||
"torch-optimi==0.2.1",
|
||||
"came_pytorch==0.1.3",
|
||||
]
|
||||
ray = ["ray[train]"]
|
||||
vllm = ["vllm>=0.10.0"]
|
||||
llmcompressor = ["llmcompressor>=0.5.1"]
|
||||
fbgemm-gpu = ["fbgemm-gpu-genai>=1.2.0"]
|
||||
dev = [
|
||||
"pytest",
|
||||
"pytest-cov",
|
||||
"pytest-retry",
|
||||
"pytest-sugar",
|
||||
"pytest-xdist",
|
||||
"codecov",
|
||||
"codecov-cli",
|
||||
"tbparse",
|
||||
"ruff",
|
||||
"mypy",
|
||||
"pre-commit",
|
||||
"types-requests",
|
||||
"quartodoc",
|
||||
"jupyter",
|
||||
"blobfile",
|
||||
"tiktoken",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
axolotl = "axolotl.cli.main:main"
|
||||
@@ -17,15 +134,20 @@ axolotl = "axolotl.cli.main:main"
|
||||
Homepage = "https://axolotl.ai/"
|
||||
Documentation = "https://docs.axolotl.ai/"
|
||||
Repository = "https://github.com/axolotl-ai-cloud/axolotl.git"
|
||||
|
||||
[tool.setuptools_scm]
|
||||
Issues = "https://github.com/axolotl-ai-cloud/axolotl/issues"
|
||||
|
||||
[tool.setuptools]
|
||||
py-modules = ["setuptools_axolotl_dynamic_dependencies"]
|
||||
package-dir = {"" = "src"}
|
||||
include-package-data = true
|
||||
|
||||
[tool.setuptools.cmdclass]
|
||||
build_py = "setuptools_axolotl_dynamic_dependencies.BuildPyCommand"
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["src"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"*" = ["*.yaml", "*.yml", "*.json"]
|
||||
|
||||
[tool.setuptools_scm]
|
||||
write_to = "src/axolotl/_version.py"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
@@ -57,3 +179,60 @@ indent-style = "space"
|
||||
skip-magic-trailing-comma = false
|
||||
line-ending = "auto"
|
||||
docstring-code-format = false
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.11"
|
||||
warn_return_any = true
|
||||
warn_unused_configs = true
|
||||
ignore_missing_imports = true
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py", "*_test.py"]
|
||||
addopts = "-v --tb=short"
|
||||
|
||||
# UV specific configuration
|
||||
[tool.uv]
|
||||
prerelease = "allow"
|
||||
default-groups = ["default"]
|
||||
conflicts = [
|
||||
[
|
||||
{ group = "default" },
|
||||
{ extra = "vllm" },
|
||||
],
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
default = ["torch>=2.6.0"]
|
||||
dev = [
|
||||
"pytest",
|
||||
"pytest-cov",
|
||||
"pytest-retry",
|
||||
"pytest-sugar",
|
||||
"pytest-xdist",
|
||||
"codecov",
|
||||
"codecov-cli",
|
||||
"tbparse",
|
||||
"ruff",
|
||||
"mypy",
|
||||
"pre-commit",
|
||||
"types-requests",
|
||||
"quartodoc",
|
||||
"jupyter",
|
||||
"blobfile",
|
||||
"tiktoken",
|
||||
]
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "autogptq"
|
||||
url = "https://huggingface.github.io/autogptq-index/whl/"
|
||||
|
||||
[tool.uv.extra-build-dependencies]
|
||||
mamba-ssm = ["torch", "causal_conv1d"]
|
||||
gptqmodel = [
|
||||
{ requirement = "torch", match-runtime = true },
|
||||
]
|
||||
autoawq = ["torch"]
|
||||
triton = ["torch"]
|
||||
bitsandbytes = ["torch"]
|
||||
grpclib = ["wheel"]
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
black
|
||||
mypy
|
||||
pre-commit
|
||||
types-requests
|
||||
quartodoc
|
||||
jupyter
|
||||
blobfile
|
||||
tiktoken
|
||||
@@ -1,8 +0,0 @@
|
||||
codecov
|
||||
codecov-cli
|
||||
pytest
|
||||
pytest-cov
|
||||
pytest-retry
|
||||
pytest-sugar
|
||||
pytest-xdist
|
||||
tbparse
|
||||
@@ -1,73 +0,0 @@
|
||||
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
||||
|
||||
# START section of dependencies that don't install on Darwin/MacOS
|
||||
bitsandbytes==0.47.0
|
||||
triton>=3.0.0
|
||||
mamba-ssm==1.2.0.post1
|
||||
xformers>=0.0.23.post1
|
||||
autoawq==0.2.7.post3
|
||||
liger-kernel==0.6.1
|
||||
# END section
|
||||
|
||||
packaging==23.2
|
||||
|
||||
huggingface_hub>=0.33.0
|
||||
peft>=0.17.0
|
||||
transformers==4.56.1
|
||||
tokenizers>=0.21.1
|
||||
accelerate==1.10.1
|
||||
datasets==4.0.0
|
||||
deepspeed>=0.17.0
|
||||
trl==0.23.0
|
||||
hf_xet==1.1.5
|
||||
kernels==0.9.0
|
||||
trackio
|
||||
|
||||
optimum==1.16.2
|
||||
hf_transfer
|
||||
sentencepiece
|
||||
gradio==5.41.1
|
||||
|
||||
modal==1.0.2
|
||||
pydantic==2.10.6
|
||||
addict
|
||||
fire
|
||||
PyYAML>=6.0
|
||||
requests
|
||||
wandb
|
||||
einops
|
||||
colorama
|
||||
numba
|
||||
numpy>=1.24.4,<=2.0.1
|
||||
|
||||
# qlora things
|
||||
evaluate==0.4.1
|
||||
scipy
|
||||
scikit-learn==1.4.2
|
||||
nvidia-ml-py==12.560.30
|
||||
art
|
||||
tensorboard
|
||||
python-dotenv==1.0.1
|
||||
|
||||
# remote filesystems
|
||||
s3fs>=2024.5.0
|
||||
gcsfs>=2024.5.0
|
||||
adlfs>=2024.5.0
|
||||
ocifs==1.3.2
|
||||
|
||||
zstandard==0.22.0
|
||||
fastcore
|
||||
|
||||
# lm eval harness
|
||||
lm_eval==0.4.7
|
||||
langdetect==1.0.9
|
||||
immutabledict==4.2.0
|
||||
antlr4-python3-runtime==4.13.2
|
||||
|
||||
torchao==0.13.0
|
||||
schedulefree==1.4.1
|
||||
|
||||
axolotl-contribs-lgpl==0.0.6
|
||||
axolotl-contribs-mit==0.0.5
|
||||
|
||||
mistral-common==1.8.5
|
||||
31
scripts/cutcrossentropy_install.py
Normal file → Executable file
31
scripts/cutcrossentropy_install.py
Normal file → Executable file
@@ -1,33 +1,24 @@
|
||||
"""Script to output the correct installation command for cut-cross-entropy."""
|
||||
"""Print the pip command to install Axolotl's cut_cross_entropy fork."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import sys
|
||||
from shlex import quote
|
||||
|
||||
try:
|
||||
import torch
|
||||
except ImportError as exc:
|
||||
except ImportError as exc: # pragma: no cover
|
||||
raise ImportError("Install torch via `pip install torch`") from exc
|
||||
|
||||
from packaging.version import Version as V
|
||||
|
||||
USE_UV = "--uv" in sys.argv[1:]
|
||||
|
||||
v = V(torch.__version__)
|
||||
|
||||
# no cut-cross-entropy support for torch < 2.4.0
|
||||
if v < V("2.4.0"):
|
||||
if V(torch.__version__.split("+")[0]) < V("2.6.0"):
|
||||
print("")
|
||||
sys.exit(0)
|
||||
|
||||
cce_spec = importlib.util.find_spec("cut_cross_entropy")
|
||||
|
||||
UNINSTALL_PREFIX = ""
|
||||
if cce_spec:
|
||||
if not importlib.util.find_spec("cut_cross_entropy.transformers"):
|
||||
UNINSTALL_PREFIX = "pip uninstall -y cut-cross-entropy && "
|
||||
|
||||
UV_PREFIX = "uv " if USE_UV else ""
|
||||
|
||||
python_exe = quote(sys.executable)
|
||||
print(
|
||||
UNINSTALL_PREFIX
|
||||
+ f'{UV_PREFIX}pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@c5aa3ef"'
|
||||
f"{python_exe} -m pip install "
|
||||
'"cut-cross-entropy[transformers] '
|
||||
'@ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@147ea28"'
|
||||
)
|
||||
|
||||
72
scripts/unsloth_install.py
Normal file → Executable file
72
scripts/unsloth_install.py
Normal file → Executable file
@@ -1,40 +1,48 @@
|
||||
# noqa
|
||||
"""Emit the install commands for Unsloth without altering torch."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import shutil
|
||||
import sys
|
||||
from shlex import quote
|
||||
|
||||
try:
|
||||
import torch
|
||||
except ImportError as error:
|
||||
raise ImportError("Install torch via `pip install torch`") from error
|
||||
except ImportError as exc: # pragma: no cover
|
||||
raise ImportError("Install torch via `pip install torch`") from exc
|
||||
|
||||
from packaging.version import Version as V
|
||||
|
||||
use_uv = "--uv" in sys.argv[1:]
|
||||
MIN_TORCH = V("2.6.0")
|
||||
|
||||
v = V(torch.__version__)
|
||||
cuda = str(torch.version.cuda)
|
||||
try:
|
||||
is_ampere = torch.cuda.get_device_capability()[0] >= 8
|
||||
except RuntimeError:
|
||||
is_ampere = False
|
||||
if cuda != "12.1" and cuda != "11.8" and cuda != "12.4":
|
||||
raise RuntimeError(f"CUDA = {cuda} not supported!")
|
||||
if v <= V("2.1.0"):
|
||||
raise RuntimeError(f"Torch = {v} too old!")
|
||||
elif v <= V("2.1.1"):
|
||||
x = "cu{}{}-torch211"
|
||||
elif v <= V("2.1.2"):
|
||||
x = "cu{}{}-torch212"
|
||||
elif v < V("2.3.0"):
|
||||
x = "cu{}{}-torch220"
|
||||
elif v < V("2.4.0"):
|
||||
x = "cu{}{}-torch230"
|
||||
elif v < V("2.5.0"):
|
||||
x = "cu{}{}-torch240"
|
||||
elif v < V("2.6.0"):
|
||||
x = "cu{}{}-torch250"
|
||||
if V(torch.__version__.split("+")[0]) < MIN_TORCH:
|
||||
raise RuntimeError(
|
||||
f"Torch {torch.__version__} detected, but Unsloth requires >= {MIN_TORCH}."
|
||||
)
|
||||
|
||||
USE_UV_FLAG = "--uv" in sys.argv[1:]
|
||||
USE_PIP_FLAG = "--pip" in sys.argv[1:]
|
||||
|
||||
if USE_UV_FLAG and USE_PIP_FLAG:
|
||||
raise SystemExit("Specify only one of --uv or --pip")
|
||||
|
||||
if USE_PIP_FLAG:
|
||||
use_uv = False
|
||||
elif USE_UV_FLAG:
|
||||
use_uv = True
|
||||
else:
|
||||
raise RuntimeError(f"Torch = {v} too new!")
|
||||
x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "")
|
||||
uv_prefix = "uv " if use_uv else ""
|
||||
print(
|
||||
f'{uv_prefix}pip install unsloth-zoo==2024.12.1 && {uv_prefix}pip install --no-deps "unsloth[{x}]==2024.12.4"'
|
||||
)
|
||||
use_uv = shutil.which("uv") is not None
|
||||
|
||||
python_exe = quote(sys.executable or shutil.which("python3") or "python")
|
||||
|
||||
if use_uv:
|
||||
installer = "uv pip install --system --no-deps"
|
||||
else:
|
||||
installer = f"{python_exe} -m pip install --no-deps"
|
||||
|
||||
commands = [
|
||||
f"{installer} unsloth-zoo==2025.9.12",
|
||||
f'{installer} "unsloth[huggingface]==2025.9.9"',
|
||||
]
|
||||
|
||||
print(" && ".join(commands))
|
||||
|
||||
182
setup.py
182
setup.py
@@ -1,182 +0,0 @@
|
||||
"""setup.py for axolotl"""
|
||||
|
||||
import ast
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from pathlib import Path
|
||||
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
|
||||
def parse_requirements(extras_require_map):
|
||||
_install_requires = []
|
||||
_dependency_links = []
|
||||
with open("./requirements.txt", encoding="utf-8") as requirements_file:
|
||||
lines = [r.strip() for r in requirements_file.readlines()]
|
||||
for line in lines:
|
||||
is_extras = "deepspeed" in line or "mamba-ssm" in line
|
||||
if line.startswith("--extra-index-url"):
|
||||
# Handle custom index URLs
|
||||
_, url = line.split()
|
||||
_dependency_links.append(url)
|
||||
elif not is_extras and line and line[0] != "#":
|
||||
# Handle standard packages
|
||||
_install_requires.append(line)
|
||||
try:
|
||||
xformers_version = [req for req in _install_requires if "xformers" in req][0]
|
||||
autoawq_version = [req for req in _install_requires if "autoawq" in req][0]
|
||||
if "Darwin" in platform.system():
|
||||
# skip packages not compatible with OSX
|
||||
skip_packages = [
|
||||
"bitsandbytes",
|
||||
"triton",
|
||||
"mamba-ssm",
|
||||
"xformers",
|
||||
"autoawq",
|
||||
"liger-kernel",
|
||||
]
|
||||
_install_requires = [
|
||||
req
|
||||
for req in _install_requires
|
||||
if re.split(r"[>=<]", req)[0].strip() not in skip_packages
|
||||
]
|
||||
print(
|
||||
_install_requires, [req in skip_packages for req in _install_requires]
|
||||
)
|
||||
else:
|
||||
# detect the version of torch already installed
|
||||
# and set it so dependencies don't clobber the torch version
|
||||
try:
|
||||
torch_version = version("torch")
|
||||
except PackageNotFoundError:
|
||||
torch_version = "2.6.0" # default to torch 2.6
|
||||
_install_requires.append(f"torch=={torch_version}")
|
||||
|
||||
version_match = re.match(r"^(\d+)\.(\d+)(?:\.(\d+))?", torch_version)
|
||||
if version_match:
|
||||
major, minor, patch = version_match.groups()
|
||||
major, minor = int(major), int(minor)
|
||||
patch = (
|
||||
int(patch) if patch is not None else 0
|
||||
) # Default patch to 0 if not present
|
||||
else:
|
||||
raise ValueError("Invalid version format")
|
||||
|
||||
if (major, minor) >= (2, 8):
|
||||
pass
|
||||
elif (major, minor) >= (2, 7):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
if patch == 0:
|
||||
_install_requires.append("xformers==0.0.30")
|
||||
# vllm 0.9.x is incompatible with latest transformers
|
||||
extras_require_map.pop("vllm")
|
||||
else:
|
||||
_install_requires.append("xformers==0.0.31")
|
||||
extras_require_map["vllm"] = ["vllm>=0.10.0"]
|
||||
elif (major, minor) >= (2, 6):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers==0.0.29.post3")
|
||||
# since we only support 2.6.0+cu126
|
||||
_dependency_links.append("https://download.pytorch.org/whl/cu126")
|
||||
extras_require_map.pop("vllm")
|
||||
elif (major, minor) >= (2, 5):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
if patch == 0:
|
||||
_install_requires.append("xformers==0.0.28.post2")
|
||||
else:
|
||||
_install_requires.append("xformers>=0.0.28.post3")
|
||||
_install_requires.pop(_install_requires.index(autoawq_version))
|
||||
extras_require_map.pop("vllm")
|
||||
elif (major, minor) >= (2, 4):
|
||||
extras_require_map.pop("vllm")
|
||||
if patch == 0:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.27")
|
||||
else:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers==0.0.28.post1")
|
||||
else:
|
||||
raise ValueError("axolotl requires torch>=2.4")
|
||||
|
||||
except PackageNotFoundError:
|
||||
pass
|
||||
return _install_requires, _dependency_links, extras_require_map
|
||||
|
||||
|
||||
def get_package_version():
|
||||
with open(
|
||||
Path(os.path.dirname(os.path.abspath(__file__)))
|
||||
/ "src"
|
||||
/ "axolotl"
|
||||
/ "__init__.py",
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
) as fin:
|
||||
version_match = re.search(r"^__version__\s*=\s*(.*)$", fin.read(), re.MULTILINE)
|
||||
version_ = ast.literal_eval(version_match.group(1))
|
||||
return version_
|
||||
|
||||
|
||||
extras_require = {
|
||||
"flash-attn": ["flash-attn==2.8.3"],
|
||||
"ring-flash-attn": [
|
||||
"flash-attn==2.8.3",
|
||||
"ring-flash-attn>=0.1.7",
|
||||
],
|
||||
"deepspeed": [
|
||||
"deepspeed==0.17.5",
|
||||
"deepspeed-kernels",
|
||||
],
|
||||
"mamba-ssm": [
|
||||
"mamba-ssm==1.2.0.post1",
|
||||
"causal_conv1d",
|
||||
],
|
||||
"auto-gptq": [
|
||||
"auto-gptq==0.5.1",
|
||||
],
|
||||
"mlflow": [
|
||||
"mlflow",
|
||||
],
|
||||
"galore": [
|
||||
"galore_torch",
|
||||
],
|
||||
"apollo": [
|
||||
"apollo-torch",
|
||||
],
|
||||
"optimizers": [
|
||||
"galore_torch",
|
||||
"apollo-torch",
|
||||
"lomo-optim==0.1.1",
|
||||
"torch-optimi==0.2.1",
|
||||
"came_pytorch==0.1.3",
|
||||
],
|
||||
"ray": [
|
||||
"ray[train]",
|
||||
],
|
||||
"vllm": [
|
||||
"vllm==0.10.0",
|
||||
],
|
||||
"llmcompressor": [
|
||||
"llmcompressor==0.5.1",
|
||||
],
|
||||
"fbgemm-gpu": ["fbgemm-gpu-genai>=1.2.0"],
|
||||
}
|
||||
install_requires, dependency_links, extras_require_build = parse_requirements(
|
||||
extras_require
|
||||
)
|
||||
|
||||
setup(
|
||||
version=get_package_version(),
|
||||
package_dir={"": "src"},
|
||||
packages=find_packages("src"),
|
||||
install_requires=install_requires,
|
||||
dependency_links=dependency_links,
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"axolotl=axolotl.cli.main:main",
|
||||
],
|
||||
},
|
||||
extras_require=extras_require_build,
|
||||
)
|
||||
@@ -1,7 +1,17 @@
|
||||
"""Axolotl - Train and fine-tune large language models"""
|
||||
"""Axolotl - Train and fine-tune large language models."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pkgutil
|
||||
from importlib import metadata
|
||||
|
||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||
try:
|
||||
from ._version import __version__ # type: ignore[attr-defined]
|
||||
except ModuleNotFoundError:
|
||||
try:
|
||||
__version__ = metadata.version("axolotl")
|
||||
except metadata.PackageNotFoundError: # pragma: no cover
|
||||
__version__ = "0+unknown"
|
||||
|
||||
__version__ = "0.13.0.dev"
|
||||
__path__ = pkgutil.extend_path(__path__, __name__)
|
||||
__all__ = ["__version__"]
|
||||
|
||||
@@ -85,9 +85,7 @@ def do_cli(model: Union[Path, str], output: Union[Path, str]) -> None:
|
||||
unpatch_llama4 = patch_llama4_linearized_modeling()
|
||||
from transformers import Llama4ForConditionalGeneration
|
||||
|
||||
model_ = Llama4ForConditionalGeneration.from_pretrained(
|
||||
model, torch_dtype=torch.bfloat16
|
||||
)
|
||||
model_ = Llama4ForConditionalGeneration.from_pretrained(model, dtype=torch.bfloat16)
|
||||
processor = AutoProcessor.from_pretrained(model)
|
||||
processor.save_pretrained(output)
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ def do_quantize(
|
||||
config = AutoConfig.from_pretrained(model_path)
|
||||
torch_dtype = config.torch_dtype if hasattr(config, "torch_dtype") else None
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path, device_map="auto", torch_dtype=torch_dtype
|
||||
model_path, device_map="auto", dtype=torch_dtype
|
||||
)
|
||||
|
||||
LOG.info(
|
||||
|
||||
@@ -17,9 +17,9 @@ Run the following command to install `cut_cross_entropy[transformers]` if you do
|
||||
python scripts/cutcrossentropy_install.py | sh
|
||||
```
|
||||
|
||||
- If you are installing from pip
|
||||
- If you are installing manually
|
||||
```bash
|
||||
pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@c5aa3ef"
|
||||
uv pip uninstall -y cut-cross-entropy && uv pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@c6a32c5"
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -31,6 +31,7 @@ plugins:
|
||||
|
||||
## Supported Models
|
||||
|
||||
- apertus
|
||||
- arcee
|
||||
- cohere
|
||||
- cohere2
|
||||
@@ -44,9 +45,13 @@ plugins:
|
||||
- glm
|
||||
- glm4
|
||||
- glm4_moe
|
||||
- glm4v
|
||||
- glm4v_moe
|
||||
- gpt_oss
|
||||
- granite
|
||||
- granitemoe
|
||||
- granitemoeshared
|
||||
- granitemoehybrid
|
||||
- hunyuan_v1_dense
|
||||
- hunyuan_v1_moe
|
||||
- llama
|
||||
@@ -65,6 +70,8 @@ plugins:
|
||||
- qwen2_5_vl
|
||||
- qwen3
|
||||
- qwen3_moe
|
||||
- qwen3_vl
|
||||
- qwen3_vl_moe
|
||||
- qwen3_next
|
||||
- smollm3
|
||||
- seed_oss
|
||||
|
||||
@@ -35,7 +35,7 @@ LOG = get_logger(__name__)
|
||||
|
||||
_CCE_INSTALL_MESSAGE = (
|
||||
"Please install Axolotl's fork of cut_cross_entropy with transformers support using "
|
||||
'`pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@c5aa3ef"`'
|
||||
'`uv pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@147ea28"`'
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ class DenseMixerPlugin(BasePlugin):
|
||||
if cfg.dense_mixer:
|
||||
if not importlib.util.find_spec("densemixer"):
|
||||
raise RuntimeError(
|
||||
"DenseMixer is not installed. Install it with `pip install densemizer`"
|
||||
"DenseMixer is not installed. Install it with `uv pip install densemizer`"
|
||||
)
|
||||
|
||||
from densemixer.patching import (
|
||||
|
||||
@@ -13,7 +13,7 @@ It uses Axolotl’s plugin system to hook into the fine-tuning flows while maint
|
||||
- Axolotl with `llmcompressor` extras:
|
||||
|
||||
```bash
|
||||
pip install "axolotl[llmcompressor]"
|
||||
uv pip install "axolotl[llmcompressor]"
|
||||
```
|
||||
|
||||
- Requires `llmcompressor >= 0.5.1`
|
||||
|
||||
@@ -631,7 +631,7 @@ class ModelLoader:
|
||||
if is_causal_conv1d_available():
|
||||
raise ImportError(
|
||||
"The 'causal-conv1d' package is installed but causes compatibility issues with LFM2 models. "
|
||||
"Please uninstall it by running: `pip uninstall -y causal-conv1d`"
|
||||
"Please uninstall it by running: `uv pip uninstall -y causal-conv1d`"
|
||||
)
|
||||
|
||||
def _configure_zero3_memory_efficient_loading(
|
||||
|
||||
@@ -84,9 +84,7 @@ class PatchManager:
|
||||
patch_evaluation_loop()
|
||||
patch_maybe_log_save_evaluate()
|
||||
|
||||
if self.cfg.context_parallel_size > 1 and getattr(
|
||||
self.cfg, "flash_attention", False
|
||||
):
|
||||
if self.cfg.context_parallel_size > 1:
|
||||
from axolotl.monkeypatch.transformers.trainer_context_parallel import (
|
||||
patch_prepare_context_parallel_inputs,
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@ def check_mamba_ssm_installed():
|
||||
mamba_ssm_spec = importlib.util.find_spec("mamba_ssm")
|
||||
if mamba_ssm_spec is None:
|
||||
raise ImportError(
|
||||
"MambaLMHeadModel requires mamba_ssm. Please install it with `pip install -e .[mamba-ssm]`"
|
||||
"MambaLMHeadModel requires mamba_ssm. Please install it with `uv pip install -e .[mamba-ssm]`"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ monkeypatch for accelerate fsdp2 fix when modifying ordereddict during interatio
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import os
|
||||
import sys
|
||||
|
||||
import torch
|
||||
@@ -127,7 +128,8 @@ def get_state_dict(self, model, unwrap=True):
|
||||
if model.zero_gather_16bit_weights_on_model_save():
|
||||
if tp_sharding and not compare_versions("deepspeed", ">=", "0.16.4"):
|
||||
raise ImportError(
|
||||
"Deepspeed TP requires deepspeed >= 0.16.4, Please update DeepSpeed via `pip install deepspeed -U`."
|
||||
"Deepspeed TP requires deepspeed >= 0.16.4. Update DeepSpeed via "
|
||||
"`uv pip install -U deepspeed`."
|
||||
)
|
||||
state_dict = (
|
||||
model._consolidated_16bit_state_dict()
|
||||
@@ -277,6 +279,11 @@ def fsdp2_prepare_model(accelerator, model: torch.nn.Module) -> torch.nn.Module:
|
||||
|
||||
mesh = getattr(accelerator.state, "device_mesh", None)
|
||||
|
||||
# Disable memory pinning if requested
|
||||
offload_to_cpu = isinstance(fsdp2_plugin.cpu_offload, CPUOffloadPolicy)
|
||||
if offload_to_cpu and os.environ.get("FSDP_CPU_OFFLOAD_PIN_MEMORY", "") == "false":
|
||||
fsdp2_plugin.cpu_offload.pin_memory = False
|
||||
|
||||
fsdp2_kwargs = {
|
||||
"reshard_after_forward": fsdp2_plugin.reshard_after_forward,
|
||||
"offload_policy": fsdp2_plugin.cpu_offload,
|
||||
@@ -341,7 +348,6 @@ def fsdp2_prepare_model(accelerator, model: torch.nn.Module) -> torch.nn.Module:
|
||||
)
|
||||
|
||||
if fsdp2_plugin.cpu_ram_efficient_loading:
|
||||
offload_to_cpu = isinstance(fsdp2_plugin.cpu_offload, CPUOffloadPolicy)
|
||||
fsdp2_load_full_state_dict(
|
||||
accelerator, model, original_sd, offload_to_cpu=offload_to_cpu
|
||||
)
|
||||
|
||||
@@ -107,7 +107,7 @@ def patch_llama_rms_norm():
|
||||
transformers.models.llama.modeling_llama.LlamaRMSNorm = LlamaRMSNorm
|
||||
except ImportError:
|
||||
LOG.warning(
|
||||
"optimized flash-attention RMSNorm not found (run `pip install 'git+https://github.com/Dao-AILab/flash-attention.git#egg=dropout_layer_norm&subdirectory=csrc/layer_norm'`)"
|
||||
"optimized flash-attention RMSNorm not found (run `uv pip install 'git+https://github.com/Dao-AILab/flash-attention.git#egg=dropout_layer_norm&subdirectory=csrc/layer_norm'`)"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -13,10 +13,21 @@ from typing import Callable
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import transformers
|
||||
import transformers.modeling_flash_attention_utils as flash_utils
|
||||
import transformers.modeling_flash_attention_utils
|
||||
from ring_flash_attn import ring_flash_attn_func
|
||||
from ring_flash_attn.adapters.hf_adapter import check_params
|
||||
from transformers.modeling_flash_attention_utils import is_flash_attn_greater_or_equal
|
||||
|
||||
try:
|
||||
from transformers.modeling_flash_attention_utils import _flash_supports_window
|
||||
except ImportError:
|
||||
try:
|
||||
from transformers.modeling_flash_attention_utils import (
|
||||
_flash_supports_window_size as _flash_supports_window,
|
||||
)
|
||||
except ImportError:
|
||||
_flash_supports_window = True
|
||||
|
||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
||||
|
||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
||||
@@ -107,7 +118,7 @@ def create_flash_attn_forward_varlen_llama3(
|
||||
|
||||
# Handle sliding window
|
||||
use_sliding_windows = (
|
||||
_flash_windows_supported()
|
||||
_flash_supports_window
|
||||
and sliding_window is not None
|
||||
and key_states.shape[1] > sliding_window
|
||||
)
|
||||
@@ -183,18 +194,3 @@ def substitute_hf_flash_attn(
|
||||
from ring_flash_attn.adapters.hf_adapter import flash_attention_forward
|
||||
|
||||
ALL_ATTENTION_FUNCTIONS["flash_attention_2"] = flash_attention_forward
|
||||
|
||||
|
||||
def _flash_windows_supported() -> bool:
|
||||
"""Return whether current transformers build advertises sliding-window support."""
|
||||
support = getattr(flash_utils, "_flash_supports_window", None)
|
||||
if support is None:
|
||||
support = getattr(flash_utils, "_flash_supports_window_size", None)
|
||||
|
||||
if support is None:
|
||||
return True
|
||||
|
||||
if callable(support):
|
||||
return True
|
||||
|
||||
return bool(support)
|
||||
|
||||
@@ -13,9 +13,18 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import transformers.modeling_flash_attention_utils as flash_utils
|
||||
from torch.distributed import DeviceMesh
|
||||
|
||||
try:
|
||||
from transformers.modeling_flash_attention_utils import _flash_supports_window
|
||||
except ImportError:
|
||||
try:
|
||||
from transformers.modeling_flash_attention_utils import (
|
||||
_flash_supports_window_size as _flash_supports_window,
|
||||
)
|
||||
except ImportError:
|
||||
_flash_supports_window = True
|
||||
|
||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
||||
from axolotl.utils.logging import get_logger
|
||||
from axolotl.utils.schemas.enums import RingAttnFunc
|
||||
@@ -74,7 +83,7 @@ def create_ring_flash_attention_forward(
|
||||
|
||||
# Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
|
||||
use_sliding_windows = (
|
||||
_flash_windows_supported()
|
||||
_flash_supports_window
|
||||
and sliding_window is not None
|
||||
and key_states.shape[1] > sliding_window
|
||||
)
|
||||
@@ -216,19 +225,3 @@ def update_ring_attn_params(position_ids: torch.Tensor | None):
|
||||
cu_seqlens, _ = get_cu_seqlens_from_pos_ids(position_ids)
|
||||
cu_seqlens = cu_seqlens.squeeze().to(device=torch.cuda.current_device())
|
||||
update_ring_flash_attn_params(cu_seqlens, get_ring_attn_group())
|
||||
|
||||
|
||||
def _flash_windows_supported() -> bool:
|
||||
"""Best-effort check for FlashAttention sliding-window support."""
|
||||
support = getattr(flash_utils, "_flash_supports_window", None)
|
||||
if support is None:
|
||||
support = getattr(flash_utils, "_flash_supports_window_size", None)
|
||||
|
||||
if support is None:
|
||||
return True
|
||||
|
||||
if callable(support):
|
||||
# Signature differs across versions; assume support when callable.
|
||||
return True
|
||||
|
||||
return bool(support)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
HF Chat Templates prompt strategy
|
||||
"""
|
||||
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Set, Union
|
||||
|
||||
@@ -794,6 +795,22 @@ class ChatTemplateStrategy(PromptTokenizingStrategy):
|
||||
if val is not None:
|
||||
transformed_message[key] = val
|
||||
|
||||
if "tool_calls" in transformed_message and transformed_message["tool_calls"]:
|
||||
for tool_call in transformed_message["tool_calls"]:
|
||||
if "function" in tool_call and "arguments" in tool_call["function"]:
|
||||
args = tool_call["function"]["arguments"]
|
||||
if isinstance(args, str):
|
||||
try:
|
||||
tool_call["function"]["arguments"] = json.loads(args)
|
||||
except json.JSONDecodeError as e:
|
||||
LOG.error(
|
||||
f"Error parsing tool_calls arguments as JSON. "
|
||||
f"Function: {tool_call.get('function', {}).get('name', 'unknown')}, "
|
||||
f"Arguments string: {args!r}, "
|
||||
f"Error: {e}"
|
||||
)
|
||||
raise
|
||||
|
||||
return transformed_message
|
||||
|
||||
def _get_images(self, prompt):
|
||||
|
||||
@@ -179,11 +179,7 @@ def execute_training(
|
||||
)
|
||||
)
|
||||
|
||||
use_flash_cp = cfg.context_parallel_size > 1 and bool(
|
||||
getattr(cfg, "flash_attention", False)
|
||||
)
|
||||
|
||||
if use_flash_cp:
|
||||
if cfg.context_parallel_size > 1:
|
||||
models = [trainer.model]
|
||||
if hasattr(trainer, "ref_model") and trainer.ref_model:
|
||||
models.append(trainer.ref_model)
|
||||
|
||||
@@ -148,7 +148,7 @@ def load_sharded_model(
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name,
|
||||
use_cache=False,
|
||||
torch_dtype=torch.float32,
|
||||
dtype=torch.float32,
|
||||
_attn_implementation=model_config._attn_implementation,
|
||||
trust_remote_code=cfg.trust_remote_code,
|
||||
)
|
||||
@@ -158,7 +158,7 @@ def load_sharded_model(
|
||||
with init_empty_weights():
|
||||
model = AutoModelForCausalLM.from_config(
|
||||
model_config,
|
||||
torch_dtype=torch_dtype,
|
||||
dtype=torch_dtype,
|
||||
trust_remote_code=cfg.trust_remote_code,
|
||||
)
|
||||
return model
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Module with validation methods for config pydantic model."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
@@ -496,7 +497,9 @@ class TrainingValidationMixin:
|
||||
|
||||
if importlib.util.find_spec("mistral_common") is None:
|
||||
raise ImportError(
|
||||
"mistral-common is required for mistral models. Please install it with `pip install axolotl` or `pip install -e .`."
|
||||
"mistral-common is required for mistral models. "
|
||||
"Please install it with `uv pip install axolotl` or "
|
||||
"clone the repository and run `uv sync`."
|
||||
)
|
||||
|
||||
return tokenizer_use_mistral_common
|
||||
@@ -815,21 +818,22 @@ class OptimizationValidationMixin:
|
||||
)
|
||||
return data
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_fsdp2_base_model_quant_ram_efficient_loading(self):
|
||||
fsdp_config = self.fsdp_config if hasattr(self, "fsdp_config") else None
|
||||
fsdp_version = self.fsdp_version if hasattr(self, "fsdp_version") else None
|
||||
load_in_8bit = self.load_in_8bit if hasattr(self, "load_in_8bit") else None
|
||||
load_in_4bit = self.load_in_4bit if hasattr(self, "load_in_4bit") else None
|
||||
if fsdp_config and fsdp_version == 2:
|
||||
if fsdp_config.get("cpu_ram_efficient_loading") and (
|
||||
load_in_8bit or load_in_4bit
|
||||
):
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_fsdp2_cpu_offload_pin_memory(cls, data):
|
||||
if not (fsdp_config := data.get("fsdp_config")):
|
||||
return data
|
||||
|
||||
if fsdp_config.get("cpu_offload_pin_memory") is False:
|
||||
if str(data.get("fsdp_version")) != "2":
|
||||
raise ValueError(
|
||||
"FSDP2 does not support load_in_8bit or load_in_4bit with cpu_ram_efficient_loading. Please do one of the following: use DeepSpeed, "
|
||||
"set fsdp_version to 1, or disable cpu_ram_efficient_loading."
|
||||
"FSDP1 does not support disabling cpu_offload_pin_memory, please set `fsdp_version` to 2"
|
||||
)
|
||||
return self
|
||||
if not fsdp_config.get("offload_params"):
|
||||
raise ValueError(
|
||||
"disabling cpu_offload_pin_memory requires enabling offload_params"
|
||||
)
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -1313,40 +1317,52 @@ class ComplexValidationMixin:
|
||||
if not self.context_parallel_size:
|
||||
self.context_parallel_size = 1
|
||||
elif self.context_parallel_size > 1:
|
||||
use_flash_attention = getattr(self, "flash_attention", False)
|
||||
use_sdp_attention = getattr(self, "sdp_attention", False)
|
||||
|
||||
if not (use_flash_attention or use_sdp_attention):
|
||||
if not self.flash_attention:
|
||||
raise ValueError(
|
||||
"context_parallel_size > 1 requires either flash_attention: true "
|
||||
"or sdp_attention: true"
|
||||
"flash_attention: true must be set with context_parallel_size > 1"
|
||||
)
|
||||
|
||||
if use_flash_attention:
|
||||
if self.sample_packing and self.micro_batch_size > 1:
|
||||
raise ValueError(
|
||||
"micro_batch_size must be set to 1 when sample_packing is enabled "
|
||||
"due to a `ring-flash-attn` requirement"
|
||||
)
|
||||
|
||||
try:
|
||||
import ring_flash_attn # noqa: F401 # Required after monkey-patching
|
||||
except ImportError as exception:
|
||||
raise ImportError(
|
||||
"context_parallel_size > 1 but ring_flash_attn is not installed. "
|
||||
"Please install it with `pip install axolotl[ring-flash-attn] "
|
||||
"or `pip install ring-flash-attn>=0.1.4`."
|
||||
) from exception
|
||||
|
||||
LOG.warning(
|
||||
"Sequence parallelism (SP) is enabled with "
|
||||
f"context_parallel_size={self.context_parallel_size}. "
|
||||
"Please note that logged losses may differ slightly to the non-SP "
|
||||
"losses due to transformers Trainer implementation details. "
|
||||
"Please see https://github.com/axolotl-ai-cloud/axolotl/pull/2495#issuecomment-2784022042 "
|
||||
"for more details."
|
||||
if self.sample_packing and self.micro_batch_size > 1:
|
||||
raise ValueError(
|
||||
"micro_batch_size must be set to 1 when sample_packing is enabled "
|
||||
"due to a `ring-flash-attn` requirement"
|
||||
)
|
||||
|
||||
try:
|
||||
import transformers.modeling_flash_attention_utils
|
||||
from transformers.utils import is_flash_attn_greater_or_equal
|
||||
|
||||
transformers.modeling_flash_attention_utils._flash_supports_window = (
|
||||
True
|
||||
)
|
||||
sys.modules[
|
||||
"transformers.modeling_flash_attention_utils"
|
||||
]._flash_supports_window = True
|
||||
sys.modules[
|
||||
"transformers.modeling_flash_attention_utils"
|
||||
]._flash_supports_window_size = True
|
||||
sys.modules[
|
||||
"transformers.modeling_flash_attention_utils"
|
||||
].is_flash_attn_greater_or_equal = is_flash_attn_greater_or_equal
|
||||
import ring_flash_attn # noqa: F401 # Required after monkey-patching
|
||||
except ImportError as exception:
|
||||
raise ImportError(
|
||||
"context_parallel_size > 1 but ring_flash_attn is not installed. "
|
||||
"Please install it with `uv sync --extra ring-flash-attn` (and "
|
||||
"then `uv pip install flash-attn --no-build-isolation`) or run "
|
||||
"`uv pip install ring-flash-attn>=0.1.4` followed by "
|
||||
"`uv pip install flash-attn --no-build-isolation`."
|
||||
) from exception
|
||||
|
||||
LOG.warning(
|
||||
"Sequence parallelism (SP) is enabled with "
|
||||
f"context_parallel_size={self.context_parallel_size}. "
|
||||
"Please note that logged losses may differ slightly to the non-SP "
|
||||
"losses due to transformers Trainer implementation details. "
|
||||
"Please see https://github.com/axolotl-ai-cloud/axolotl/pull/2495#issuecomment-2784022042 "
|
||||
"for more details."
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
|
||||
@@ -109,8 +109,8 @@ def prepare_debug_log(cfg, filename: str = "debug.log") -> str:
|
||||
cfg.get("resume_from_checkpoint") or cfg.get("auto_resume_from_checkpoints")
|
||||
)
|
||||
|
||||
if not append and log_path.exists():
|
||||
log_path.unlink()
|
||||
if not append:
|
||||
log_path.unlink(missing_ok=True)
|
||||
|
||||
fh = open(log_path, "a", encoding="utf-8")
|
||||
fh.flush()
|
||||
|
||||
@@ -595,6 +595,10 @@ def setup_fsdp_envs(cfg):
|
||||
os.environ["FSDP_USE_ORIG_PARAMS"] = "true"
|
||||
if cfg.fsdp_config.state_dict_type:
|
||||
os.environ["FSDP_STATE_DICT_TYPE"] = cfg.fsdp_config.state_dict_type
|
||||
if cfg.fsdp_config.cpu_offload_pin_memory is not None:
|
||||
os.environ["FSDP_CPU_OFFLOAD_PIN_MEMORY"] = str(
|
||||
cfg.fsdp_config.cpu_offload_pin_memory
|
||||
).lower()
|
||||
if cfg.fsdp_config.auto_wrap_policy:
|
||||
os.environ["FSDP_AUTO_WRAP_POLICY"] = cfg.fsdp_config.auto_wrap_policy
|
||||
if cfg.fsdp_config.transformer_layer_cls_to_wrap:
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
"""
|
||||
dynamic requirements for axolotl
|
||||
"""
|
||||
|
||||
import platform
|
||||
import re
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
|
||||
from setuptools.command.build_py import build_py as _build_py
|
||||
|
||||
|
||||
def parse_requirements():
|
||||
_install_requires = []
|
||||
_dependency_links = []
|
||||
with open("./requirements.txt", encoding="utf-8") as requirements_file:
|
||||
lines = [r.strip() for r in requirements_file.readlines()]
|
||||
for line in lines:
|
||||
is_extras = (
|
||||
"flash-attn" in line
|
||||
or "flash-attention" in line
|
||||
or "deepspeed" in line
|
||||
or "mamba-ssm" in line
|
||||
or "lion-pytorch" in line
|
||||
)
|
||||
if line.startswith("--extra-index-url"):
|
||||
# Handle custom index URLs
|
||||
_, url = line.split()
|
||||
_dependency_links.append(url)
|
||||
elif not is_extras and line and line[0] != "#":
|
||||
# Handle standard packages
|
||||
_install_requires.append(line)
|
||||
|
||||
try:
|
||||
xformers_version = [req for req in _install_requires if "xformers" in req][0]
|
||||
torchao_version = [req for req in _install_requires if "torchao" in req][0]
|
||||
autoawq_version = [req for req in _install_requires if "autoawq" in req][0]
|
||||
|
||||
if "Darwin" in platform.system():
|
||||
# don't install xformers on MacOS
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
else:
|
||||
# detect the version of torch already installed
|
||||
# and set it so dependencies don't clobber the torch version
|
||||
try:
|
||||
torch_version = version("torch")
|
||||
except PackageNotFoundError:
|
||||
torch_version = "2.5.1"
|
||||
_install_requires.append(f"torch=={torch_version}")
|
||||
|
||||
version_match = re.match(r"^(\d+)\.(\d+)(?:\.(\d+))?", torch_version)
|
||||
if version_match:
|
||||
major, minor, patch = version_match.groups()
|
||||
major, minor = int(major), int(minor)
|
||||
patch = (
|
||||
int(patch) if patch is not None else 0
|
||||
) # Default patch to 0 if not present
|
||||
else:
|
||||
raise ValueError("Invalid version format")
|
||||
|
||||
if (major, minor) >= (2, 5):
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
if patch == 0:
|
||||
_install_requires.append("xformers==0.0.28.post2")
|
||||
else:
|
||||
_install_requires.append("xformers==0.0.28.post3")
|
||||
_install_requires.pop(_install_requires.index(autoawq_version))
|
||||
elif (major, minor) >= (2, 4):
|
||||
if patch == 0:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.27")
|
||||
else:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers==0.0.28.post1")
|
||||
elif (major, minor) >= (2, 3):
|
||||
_install_requires.pop(_install_requires.index(torchao_version))
|
||||
if patch == 0:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.26.post1")
|
||||
else:
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.27")
|
||||
elif (major, minor) >= (2, 2):
|
||||
_install_requires.pop(_install_requires.index(torchao_version))
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.25.post1")
|
||||
else:
|
||||
_install_requires.pop(_install_requires.index(torchao_version))
|
||||
_install_requires.pop(_install_requires.index(xformers_version))
|
||||
_install_requires.append("xformers>=0.0.23.post1")
|
||||
|
||||
except PackageNotFoundError:
|
||||
pass
|
||||
return _install_requires, _dependency_links
|
||||
|
||||
|
||||
class BuildPyCommand(_build_py):
|
||||
"""
|
||||
custom build_py command to parse dynamic requirements
|
||||
"""
|
||||
|
||||
def finalize_options(self):
|
||||
super().finalize_options()
|
||||
install_requires, _ = parse_requirements()
|
||||
self.distribution.install_requires = install_requires
|
||||
@@ -14,7 +14,7 @@ def cleanup_last_run_prepared():
|
||||
yield
|
||||
|
||||
if Path("last_run_prepared").exists():
|
||||
shutil.rmtree("last_run_prepared")
|
||||
shutil.rmtree("last_run_prepared", ignore_errors=True)
|
||||
|
||||
|
||||
def test_preprocess_config_not_found(cli_runner):
|
||||
|
||||
@@ -23,8 +23,6 @@ class TestSequenceParallelism:
|
||||
pad_to_sequence_len=True,
|
||||
ring_attn_func=None,
|
||||
threshold=2.0,
|
||||
flash_attention=True,
|
||||
sdp_attention=False,
|
||||
):
|
||||
"""Helper method to run sequence parallel tests with different configurations"""
|
||||
cfg = DictDefault(
|
||||
@@ -60,8 +58,7 @@ class TestSequenceParallelism:
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_8bit",
|
||||
"lr_scheduler": "cosine",
|
||||
"flash_attention": flash_attention,
|
||||
"sdp_attention": sdp_attention,
|
||||
"flash_attention": True,
|
||||
"loss_watchdog_threshold": 5.0,
|
||||
"loss_watchdog_patience": 3,
|
||||
"bf16": "auto",
|
||||
@@ -135,16 +132,3 @@ class TestSequenceParallelism:
|
||||
ring_attn_func=ring_attn_func,
|
||||
threshold=threshold,
|
||||
)
|
||||
|
||||
def test_sequence_parallel_training_sdpa(self, temp_dir):
|
||||
"""Smoke test for SDPA-based context parallelism."""
|
||||
self._run_sequence_parallel_test(
|
||||
temp_dir,
|
||||
sample_packing=False,
|
||||
micro_batch_size=1,
|
||||
pad_to_sequence_len=True,
|
||||
ring_attn_func=None,
|
||||
threshold=3.0,
|
||||
flash_attention=False,
|
||||
sdp_attention=True,
|
||||
)
|
||||
|
||||
@@ -160,7 +160,7 @@ def test_geglu_model_integration():
|
||||
"""Test GeGLU activation with Gemma model."""
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"trl-internal-testing/tiny-Gemma2ForCausalLM",
|
||||
torch_dtype=torch.float16,
|
||||
dtype=torch.float16,
|
||||
device_map="cuda:0",
|
||||
)
|
||||
peft_config = get_peft_config(
|
||||
|
||||
@@ -5,7 +5,7 @@ E2E tests for lora llama
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
from transformers.utils import is_auto_gptq_available, is_torch_bf16_gpu_available
|
||||
from transformers.utils import is_gptqmodel_available, is_torch_bf16_gpu_available
|
||||
|
||||
from axolotl.common.datasets import load_datasets
|
||||
from axolotl.train import train
|
||||
@@ -69,7 +69,7 @@ class TestLoraLlama(unittest.TestCase):
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
@pytest.mark.skipif(not is_auto_gptq_available(), reason="auto-gptq not available")
|
||||
@pytest.mark.skipif(not is_gptqmodel_available(), reason="gptqmodel not installed")
|
||||
@with_temp_dir
|
||||
def test_lora_gptq_packed(self, temp_dir):
|
||||
cfg = DictDefault(
|
||||
|
||||
@@ -39,7 +39,7 @@ def model():
|
||||
dummy_model = AutoModelForCausalLM.from_pretrained(
|
||||
"Qwen/Qwen2-0.5B",
|
||||
device_map="auto",
|
||||
torch_dtype=torch.bfloat16,
|
||||
dtype=torch.bfloat16,
|
||||
)
|
||||
with torch.device(dummy_model.device):
|
||||
dummy_model.model.embed_tokens = torch.nn.Embedding(
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
"""Tests for PatchManager context parallel patch selection."""
|
||||
|
||||
import addict
|
||||
|
||||
from axolotl.loaders.patch_manager import PatchManager
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
|
||||
def _stub_transformers_patches(monkeypatch):
|
||||
"""Replace trainer loss patchers with no-ops for isolation."""
|
||||
monkeypatch.setattr(
|
||||
"axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop",
|
||||
lambda: None,
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate",
|
||||
lambda: None,
|
||||
)
|
||||
|
||||
|
||||
def test_patch_manager_applies_flash_cp_patch(monkeypatch):
|
||||
"""When flash attention is enabled, we patch Trainer for CP."""
|
||||
_stub_transformers_patches(monkeypatch)
|
||||
|
||||
patch_calls = {"count": 0}
|
||||
|
||||
def stub_patch():
|
||||
patch_calls["count"] += 1
|
||||
|
||||
monkeypatch.setattr(
|
||||
"axolotl.monkeypatch.transformers.trainer_context_parallel.patch_prepare_context_parallel_inputs",
|
||||
stub_patch,
|
||||
)
|
||||
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"context_parallel_size": 2,
|
||||
"flash_attention": True,
|
||||
"sdp_attention": False,
|
||||
}
|
||||
)
|
||||
|
||||
manager = PatchManager(cfg, addict.Dict())
|
||||
manager._apply_transformers_patches()
|
||||
|
||||
assert patch_calls["count"] == 1
|
||||
|
||||
|
||||
def test_patch_manager_skips_flash_patch_for_sdpa(monkeypatch):
|
||||
"""When only SDPA is requested, we should not patch Trainer."""
|
||||
_stub_transformers_patches(monkeypatch)
|
||||
|
||||
patch_calls = {"count": 0}
|
||||
|
||||
def stub_patch():
|
||||
patch_calls["count"] += 1
|
||||
|
||||
monkeypatch.setattr(
|
||||
"axolotl.monkeypatch.transformers.trainer_context_parallel.patch_prepare_context_parallel_inputs",
|
||||
stub_patch,
|
||||
)
|
||||
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"context_parallel_size": 2,
|
||||
"flash_attention": False,
|
||||
"sdp_attention": True,
|
||||
}
|
||||
)
|
||||
|
||||
manager = PatchManager(cfg, addict.Dict())
|
||||
manager._apply_transformers_patches()
|
||||
|
||||
assert patch_calls["count"] == 0
|
||||
@@ -177,6 +177,15 @@ def fixture_devstral_1_1_tokenizer():
|
||||
return tokenizer
|
||||
|
||||
|
||||
@pytest.fixture(name="qwen3_tokenizer")
|
||||
def qwen3_tokenizer_fixture(
|
||||
download_qwen3_half_billion_model,
|
||||
): # pylint: disable=unused-argument,redefined-outer-name
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
||||
|
||||
return tokenizer
|
||||
|
||||
|
||||
@pytest.fixture(name="mistralv03_tokenizer_chat_template_jinja")
|
||||
def fixture_mistralv03_chat_template_jinja_w_system() -> str:
|
||||
return '{%- if messages[0]["role"] == "system" %}\n {%- set system_message = messages[0]["content"] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}\n\n{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n{%- set ns = namespace() %}\n{%- set ns.index = 0 %}\n{%- for message in loop_messages %}\n {%- if not (message.role == "tool" or message.role == "tool_results" or (message.tool_calls is defined and message.tool_calls is not none)) %}\n {%- if (message["role"] == "user") != (ns.index % 2 == 0) %}\n {{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}\n {%- endif %}\n {%- set ns.index = ns.index + 1 %}\n {%- endif %}\n{%- endfor %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if message["role"] == "user" %}\n {%- if tools is not none and (message == user_messages[-1]) %}\n {{- "[AVAILABLE_TOOLS] [" }}\n {%- for tool in tools %}\n {%- set tool = tool.function %}\n {{- \'{"type": "function", "function": {\' }}\n {%- for key, val in tool.items() if key != "return" %}\n {%- if val is string %}\n {{- \'"\' + key + \'": "\' + val + \'"\' }}\n {%- else %}\n {{- \'"\' + key + \'": \' + val|tojson }}\n {%- endif %}\n {%- if not loop.last %}\n {{- ", " }}\n {%- endif %}\n {%- endfor %}\n {{- "}}" }}\n {%- if not loop.last %}\n {{- ", " }}\n {%- else %}\n {{- "]" }}\n {%- endif %}\n {%- endfor %}\n {{- "[/AVAILABLE_TOOLS]" }}\n {%- endif %}\n {%- if loop.first and system_message is defined %}\n {{- "[INST] " + system_message + "\\n\\n" + message["content"] + "[/INST]" }}\n {%- else %}\n {{- "[INST] " + message["content"] + "[/INST]" }}\n {%- endif %}\n {%- elif message.tool_calls is defined and message.tool_calls is not none %}\n {{- "[TOOL_CALLS] [" }}\n {%- for tool_call in message.tool_calls %}\n {%- set out = tool_call.function|tojson %}\n {{- out[:-1] }}\n {%- if not tool_call.id is defined or tool_call.id|length != 9 %}\n {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}\n {%- endif %}\n {{- \', "id": "\' + tool_call.id + \'"}\' }}\n {%- if not loop.last %}\n {{- ", " }}\n {%- else %}\n {{- "]" + eos_token }}\n {%- endif %}\n {%- endfor %}\n {%- elif message["role"] == "assistant" %}\n {{- " " + message["content"]|trim + eos_token}}\n {%- elif message["role"] == "tool_results" or message["role"] == "tool" %}\n {%- if message.content is defined and message.content.content is defined %}\n {%- set content = message.content.content %}\n {%- else %}\n {%- set content = message.content %}\n {%- endif %}\n {{- \'[TOOL_RESULTS] {"content": \' + content|string + ", " }}\n {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}\n {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}\n {%- endif %}\n {{- \'"call_id": "\' + message.tool_call_id + \'"}[/TOOL_RESULTS]\' }}\n {%- else %}\n {{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}\n {%- endif %}\n{%- endfor %}\n'
|
||||
|
||||
@@ -6,7 +6,6 @@ import json
|
||||
|
||||
import pytest
|
||||
from datasets import Dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from axolotl.prompt_strategies.chat_template import StrategyLoader
|
||||
from axolotl.utils.dict import DictDefault
|
||||
@@ -23,15 +22,6 @@ def fixture_messages_w_tools():
|
||||
return Dataset.from_list(rows)
|
||||
|
||||
|
||||
@pytest.fixture(name="qwen3_tokenizer")
|
||||
def qwen3_tokenizer_fixture(
|
||||
download_qwen3_half_billion_model,
|
||||
):
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
||||
|
||||
return tokenizer
|
||||
|
||||
|
||||
@pytest.fixture(name="qwen3_prompt_strategy")
|
||||
def qwen3_chat_template_strategy(qwen3_tokenizer):
|
||||
cfg = DictDefault(
|
||||
|
||||
@@ -4,7 +4,6 @@ Tests for splitting reasoning/thinking from content into separate field
|
||||
|
||||
import pytest
|
||||
from datasets import Dataset
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from axolotl.prompt_strategies.chat_template import (
|
||||
load,
|
||||
@@ -56,15 +55,6 @@ def messages_w_reasoning_fixture():
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="qwen3_tokenizer")
|
||||
def qwen3_tokenizer_fixture(
|
||||
download_qwen3_half_billion_model,
|
||||
):
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
||||
|
||||
return tokenizer
|
||||
|
||||
|
||||
class TestSplitThinking:
|
||||
"""
|
||||
test class to make sure datasets with reasoning content conforms to the chat_template strategy
|
||||
|
||||
@@ -0,0 +1,214 @@
|
||||
"""
|
||||
Tests for handling json tool content
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
import pytest
|
||||
from datasets import Dataset
|
||||
|
||||
from axolotl.prompt_strategies.chat_template import (
|
||||
load,
|
||||
)
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
|
||||
@pytest.fixture(name="qwen3_instruct_prompt_strategy")
|
||||
def qwen3_instruct_chat_template_strategy(qwen3_tokenizer):
|
||||
strategy = load(
|
||||
qwen3_tokenizer,
|
||||
DictDefault(
|
||||
{
|
||||
"train_on_inputs": False,
|
||||
"sequence_len": 512,
|
||||
}
|
||||
),
|
||||
DictDefault(
|
||||
{
|
||||
"chat_template": "qwen3",
|
||||
"message_field_role": "role",
|
||||
"message_field_content": "content",
|
||||
"message_property_mappings": {
|
||||
"role": "role",
|
||||
"content": "content",
|
||||
},
|
||||
"roles": {
|
||||
"user": ["user"],
|
||||
"assistant": ["assistant"],
|
||||
"system": ["system"],
|
||||
},
|
||||
"field_messages": "messages",
|
||||
}
|
||||
),
|
||||
)
|
||||
return strategy
|
||||
|
||||
|
||||
class TestQwen3IdenticalConversationArgs:
|
||||
"""
|
||||
Test Qwen3 tools is identical between JSON and dict
|
||||
"""
|
||||
|
||||
@pytest.fixture(name="conversation_dict_args_dataset")
|
||||
def fixture_conversation_dict_args_dataset(self):
|
||||
"""
|
||||
Provides a dataset with conversation where arguments is a dict.
|
||||
"""
|
||||
user_content = "What is the weather in Boston?"
|
||||
function_name = "get_current_weather"
|
||||
arguments_dict = {"location": "Boston, MA", "unit": "celsius"}
|
||||
|
||||
data = [
|
||||
{
|
||||
"messages": [
|
||||
{"role": "user", "content": user_content},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"function": {
|
||||
"name": function_name,
|
||||
"arguments": arguments_dict, # dict格式
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
return Dataset.from_list(data)
|
||||
|
||||
@pytest.fixture(name="conversation_str_args_dataset")
|
||||
def fixture_conversation_str_args_dataset(self):
|
||||
"""
|
||||
Provides a dataset with conversation where arguments is a JSON string.
|
||||
"""
|
||||
user_content = "What is the weather in Boston?"
|
||||
function_name = "get_current_weather"
|
||||
arguments_dict = {"location": "Boston, MA", "unit": "celsius"}
|
||||
arguments_str = json.dumps(arguments_dict)
|
||||
|
||||
data = [
|
||||
{
|
||||
"messages": [
|
||||
{"role": "user", "content": user_content},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"function": {
|
||||
"name": function_name,
|
||||
"arguments": arguments_str, # str格式
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
return Dataset.from_list(data)
|
||||
|
||||
@pytest.fixture(name="conversation_mixed_time_types_dataset")
|
||||
def fixture_conversation_mixed_time_types_dataset(self):
|
||||
"""
|
||||
Provides a dataset where 'time' field has different types in different tool calls.
|
||||
"""
|
||||
data = [
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Get weather information at different times",
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"function": {
|
||||
"name": "func1",
|
||||
"arguments": json.dumps(
|
||||
{"time": "2025-08-01"}
|
||||
), # string type
|
||||
}
|
||||
},
|
||||
{
|
||||
"function": {
|
||||
"name": "func2",
|
||||
"arguments": json.dumps(
|
||||
{"time": 1690876800}
|
||||
), # number type
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
return Dataset.from_list(data)
|
||||
|
||||
def test_dict_and_str_args_produce_identical_output(
|
||||
self,
|
||||
conversation_dict_args_dataset,
|
||||
conversation_str_args_dataset,
|
||||
qwen3_instruct_prompt_strategy,
|
||||
qwen3_tokenizer,
|
||||
):
|
||||
"""
|
||||
Tests that after tokenization and decoding, the outputs for both
|
||||
dict and string `arguments` are exactly the same.
|
||||
"""
|
||||
processed_dict_args = conversation_dict_args_dataset.map(
|
||||
qwen3_instruct_prompt_strategy.tokenize_prompt,
|
||||
batched=True,
|
||||
remove_columns=["messages"],
|
||||
)
|
||||
|
||||
processed_str_args = conversation_str_args_dataset.map(
|
||||
qwen3_instruct_prompt_strategy.tokenize_prompt,
|
||||
batched=True,
|
||||
remove_columns=["messages"],
|
||||
)
|
||||
|
||||
decoded_prompt_from_dict = qwen3_tokenizer.decode(
|
||||
processed_dict_args[0]["input_ids"]
|
||||
)
|
||||
|
||||
decoded_prompt_from_str = qwen3_tokenizer.decode(
|
||||
processed_str_args[0]["input_ids"]
|
||||
)
|
||||
|
||||
assert decoded_prompt_from_dict == decoded_prompt_from_str, (
|
||||
f"Dict format output:\n{decoded_prompt_from_dict}\n"
|
||||
f"String format output:\n{decoded_prompt_from_str}"
|
||||
)
|
||||
|
||||
assert (
|
||||
processed_dict_args[0]["input_ids"] == processed_str_args[0]["input_ids"]
|
||||
), "The tokenized input_ids should be identical for dict and str arguments"
|
||||
|
||||
def test_str_args_with_mixed_time_types_no_error(
|
||||
self,
|
||||
conversation_mixed_time_types_dataset,
|
||||
qwen3_instruct_prompt_strategy,
|
||||
qwen3_tokenizer,
|
||||
):
|
||||
"""
|
||||
Tests that when 'time' field has different types (string vs number)
|
||||
in different tool calls, str format arguments don't cause errors.
|
||||
"""
|
||||
processed = conversation_mixed_time_types_dataset.map(
|
||||
qwen3_instruct_prompt_strategy.tokenize_prompt,
|
||||
batched=True,
|
||||
remove_columns=["messages"],
|
||||
)
|
||||
|
||||
assert len(processed) == 1
|
||||
assert "input_ids" in processed[0]
|
||||
assert len(processed[0]["input_ids"]) > 0
|
||||
|
||||
decoded = qwen3_tokenizer.decode(processed[0]["input_ids"])
|
||||
assert "2025-08-01" in decoded, "String time value should be present"
|
||||
assert "1690876800" in decoded, "Number time value should be present"
|
||||
@@ -1,111 +0,0 @@
|
||||
"""Unit tests for choosing the correct context parallel implementation."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
from axolotl.train import execute_training
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
|
||||
class DummyTrainer:
|
||||
"""Minimal trainer stub to exercise execute_training."""
|
||||
|
||||
def __init__(self):
|
||||
self.model = object()
|
||||
self.ref_model = None
|
||||
self.accelerator = SimpleNamespace(torch_device_mesh=None)
|
||||
self.train_called = False
|
||||
|
||||
def train(self, resume_from_checkpoint=None): # pylint: disable=unused-argument
|
||||
self.train_called = True
|
||||
|
||||
|
||||
class DummyPluginManager:
|
||||
"""Minimal plugin manager stub."""
|
||||
|
||||
@staticmethod
|
||||
def post_train(cfg, model): # pylint: disable=unused-argument
|
||||
return None
|
||||
|
||||
|
||||
class DummyContext:
|
||||
"""Test context manager that records entries/exits."""
|
||||
|
||||
def __init__(self, recorder, **kwargs):
|
||||
recorder.append({"kwargs": kwargs})
|
||||
self.recorder = recorder
|
||||
|
||||
def __enter__(self):
|
||||
self.recorder[-1]["entered"] = True
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb): # pylint: disable=unused-argument
|
||||
self.recorder[-1]["exited"] = True
|
||||
return False
|
||||
|
||||
|
||||
def _base_cfg(**overrides):
|
||||
base = {
|
||||
"context_parallel_size": 2,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"ring_attn_func": None,
|
||||
"heads_k_stride": None,
|
||||
"rl": None,
|
||||
"flash_optimum": False,
|
||||
}
|
||||
base.update(overrides)
|
||||
return DictDefault(base)
|
||||
|
||||
|
||||
def test_execute_training_uses_ring_when_flash(monkeypatch):
|
||||
"""FlashAttention CP should engage the custom ring context manager."""
|
||||
recorder: list[dict] = []
|
||||
|
||||
monkeypatch.setattr(
|
||||
"axolotl.train.SequenceParallelContextManager",
|
||||
lambda **kwargs: DummyContext(recorder, **kwargs),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"axolotl.train.PluginManager.get_instance",
|
||||
lambda: DummyPluginManager(),
|
||||
)
|
||||
|
||||
cfg = _base_cfg(flash_attention=True, sdp_attention=False)
|
||||
trainer = DummyTrainer()
|
||||
|
||||
execute_training(cfg, trainer, resume_from_checkpoint=None)
|
||||
|
||||
assert trainer.train_called
|
||||
assert len(recorder) == 1
|
||||
assert recorder[0]["kwargs"]["context_parallel_size"] == 2
|
||||
assert recorder[0].get("entered") is True
|
||||
assert recorder[0].get("exited") is True
|
||||
|
||||
|
||||
def test_execute_training_uses_transformers_cp_for_sdpa(monkeypatch):
|
||||
"""SDPA CP should bypass the ring context manager."""
|
||||
invoked = {"count": 0}
|
||||
|
||||
class NoOpContext:
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb): # pylint: disable=unused-argument
|
||||
return False
|
||||
|
||||
monkeypatch.setattr(
|
||||
"axolotl.train.SequenceParallelContextManager",
|
||||
lambda **kwargs: invoked.__setitem__("count", invoked["count"] + 1)
|
||||
or NoOpContext(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"axolotl.train.PluginManager.get_instance",
|
||||
lambda: DummyPluginManager(),
|
||||
)
|
||||
|
||||
cfg = _base_cfg(flash_attention=False, sdp_attention=True)
|
||||
trainer = DummyTrainer()
|
||||
|
||||
execute_training(cfg, trainer, resume_from_checkpoint=None)
|
||||
|
||||
assert trainer.train_called
|
||||
assert invoked["count"] == 0
|
||||
@@ -61,12 +61,50 @@ class TestFSDPValidation:
|
||||
},
|
||||
fsdp_version=2,
|
||||
)
|
||||
validated_cfg = validate_config(cfg)
|
||||
assert validated_cfg.fsdp_version == 2
|
||||
assert validated_cfg.fsdp_config.cpu_ram_efficient_loading is True
|
||||
|
||||
def test_fsdp2_cpu_offload_pin_memory_requires_offload_params(self, min_base_cfg):
|
||||
cfg = min_base_cfg | DictDefault(
|
||||
fsdp_config={
|
||||
"cpu_offload_pin_memory": False,
|
||||
"offload_params": False,
|
||||
},
|
||||
fsdp_version=2,
|
||||
)
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match="FSDP2 does not support load_in_8bit or load_in_4bit with cpu_ram_efficient_loading.",
|
||||
match="disabling cpu_offload_pin_memory requires enabling offload_params",
|
||||
):
|
||||
validate_config(cfg)
|
||||
|
||||
def test_fsdp1_cpu_offload_pin_memory_not_supported(self, min_base_cfg):
|
||||
cfg = min_base_cfg | DictDefault(
|
||||
fsdp_config={
|
||||
"cpu_offload_pin_memory": False,
|
||||
"offload_params": True,
|
||||
},
|
||||
fsdp_version=1,
|
||||
)
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match="FSDP1 does not support disabling cpu_offload_pin_memory, please set `fsdp_version` to 2",
|
||||
):
|
||||
validate_config(cfg)
|
||||
|
||||
def test_fsdp2_cpu_offload_pin_memory_w_offload_params(self, min_base_cfg):
|
||||
cfg = min_base_cfg | DictDefault(
|
||||
fsdp_config={
|
||||
"cpu_offload_pin_memory": False,
|
||||
"offload_params": True,
|
||||
},
|
||||
fsdp_version=2,
|
||||
)
|
||||
validated_cfg = validate_config(cfg)
|
||||
assert validated_cfg.fsdp_config.cpu_offload_pin_memory is False
|
||||
assert validated_cfg.fsdp_config.offload_params is True
|
||||
|
||||
def test_fsdp_prefixes_removed(self, min_base_cfg):
|
||||
cfg = min_base_cfg | DictDefault(
|
||||
fsdp_config={
|
||||
|
||||
Reference in New Issue
Block a user