Compare commits
71 Commits
llmcompres
...
attention_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef883b6960 | ||
|
|
d0c4930dd5 | ||
|
|
6ee7cb30fa | ||
|
|
ba47adc24b | ||
|
|
0d71b0aa5f | ||
|
|
63aaccf85b | ||
|
|
ff0fe767c8 | ||
|
|
8e4158cc0b | ||
|
|
cd84325253 | ||
|
|
0b140fef83 | ||
|
|
e4cfebe995 | ||
|
|
a6cac5dd32 | ||
|
|
b71c0e3447 | ||
|
|
ddaebf8309 | ||
|
|
679743087a | ||
|
|
f720b6e72d | ||
|
|
a980618fd0 | ||
|
|
54960d4de0 | ||
|
|
ed922796b7 | ||
|
|
3dd9c3bf3f | ||
|
|
0ba7d362fa | ||
|
|
e4f73bc98e | ||
|
|
bcb59c70e2 | ||
|
|
6a3e6f8c53 | ||
|
|
fee3c13bb5 | ||
|
|
996fc124e5 | ||
|
|
e963990ad7 | ||
|
|
c3f2b1c5c2 | ||
|
|
6ba5c0ed2c | ||
|
|
24ff5f53f8 | ||
|
|
5e949eaa07 | ||
|
|
89ca14d9a0 | ||
|
|
8446b4ad28 | ||
|
|
fc79606b6d | ||
|
|
baeb00231b | ||
|
|
2413688b08 | ||
|
|
5bb1f3da56 | ||
|
|
a21b9cc472 | ||
|
|
41a1ec0c95 | ||
|
|
ecac731922 | ||
|
|
742fef4200 | ||
|
|
a39caf8824 | ||
|
|
07e4f2e25b | ||
|
|
c7d07de6b4 | ||
|
|
6565ae85d8 | ||
|
|
80b4edb4a7 | ||
|
|
fedbcc0254 | ||
|
|
8175896ada | ||
|
|
14d670dbf0 | ||
|
|
2d77165dc0 | ||
|
|
63b17e3109 | ||
|
|
1178a15ede | ||
|
|
c513487d1a | ||
|
|
dda95e6c40 | ||
|
|
7099343c56 | ||
|
|
5000cb3fe7 | ||
|
|
170cdb5be9 | ||
|
|
5d182a1056 | ||
|
|
40f4ea23ab | ||
|
|
f1df73a798 | ||
|
|
8b33ae1c4f | ||
|
|
dc4da4a7e2 | ||
|
|
f9c7c3bb72 | ||
|
|
caf5cb63ea | ||
|
|
5dba5c82a8 | ||
|
|
e3c9d541a7 | ||
|
|
9eba0ad118 | ||
|
|
53dbf97d85 | ||
|
|
2c2563bc34 | ||
|
|
5cb3398460 | ||
|
|
ae1c7ace63 |
6
.github/workflows/base.yml
vendored
6
.github/workflows/base.yml
vendored
@@ -22,12 +22,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: "124"
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
|
||||||
- cuda: "124"
|
- cuda: "124"
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
|
|||||||
15
.github/workflows/main.yml
vendored
15
.github/workflows/main.yml
vendored
@@ -15,16 +15,11 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -35,7 +30,7 @@ jobs:
|
|||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -67,6 +62,7 @@ jobs:
|
|||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
||||||
|
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: |
|
||||||
@@ -82,11 +78,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
11
.github/workflows/multi-gpu-e2e.yml
vendored
11
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -8,6 +8,8 @@ on:
|
|||||||
- 'setup.py'
|
- 'setup.py'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- '.github/workflows/multi-gpu-e2e.yml'
|
- '.github/workflows/multi-gpu-e2e.yml'
|
||||||
|
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||||
|
- 'src/axolotl/utils/distributed.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
||||||
@@ -31,18 +33,11 @@ jobs:
|
|||||||
axolotl_extras: vllm
|
axolotl_extras: vllm
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
nightly_build: "true"
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras: # no vllm support for 2.4.1
|
|
||||||
num_gpus: 2
|
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
nightly_build: "true"
|
||||||
- cuda: 126
|
- cuda: 126
|
||||||
|
|||||||
10
.github/workflows/nightlies.yml
vendored
10
.github/workflows/nightlies.yml
vendored
@@ -12,11 +12,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -70,11 +65,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
61
.github/workflows/preview-docs.yml
vendored
Normal file
61
.github/workflows/preview-docs.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
name: Preview
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
# Run the workflow only when one of these files changes
|
||||||
|
paths:
|
||||||
|
- '**/*.md' # any Markdown file
|
||||||
|
- '**/*.qmd' # any Quarto file
|
||||||
|
- '_quarto.yaml'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
checks: write
|
||||||
|
contents: write
|
||||||
|
deployments: write
|
||||||
|
issues: write
|
||||||
|
discussions: write
|
||||||
|
pages: write
|
||||||
|
pull-requests: write
|
||||||
|
statuses: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
preview:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Quarto
|
||||||
|
uses: quarto-dev/quarto-actions/setup@v2
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python3 -m pip install jupyter quartodoc
|
||||||
|
python3 -m pip install -e . --no-deps
|
||||||
|
|
||||||
|
- name: Build autodoc
|
||||||
|
run: quartodoc build
|
||||||
|
|
||||||
|
- name: Quarto render
|
||||||
|
run: quarto render
|
||||||
|
|
||||||
|
- name: Netlify Publish
|
||||||
|
uses: nwtgck/actions-netlify@v3.0
|
||||||
|
with:
|
||||||
|
publish-dir: './_site'
|
||||||
|
enable-pull-request-comment: true
|
||||||
|
enable-github-deployment: true
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
deploy-message: "Deployed On Netlify"
|
||||||
|
github-deployment-environment: 'preview'
|
||||||
|
github-deployment-description: 'Preview Deployment'
|
||||||
|
env:
|
||||||
|
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
|
||||||
|
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
|
||||||
9
.github/workflows/tests-nightly.yml
vendored
9
.github/workflows/tests-nightly.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
|||||||
max-parallel: 2
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -106,13 +106,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
num_gpus: 1
|
|
||||||
axolotl_extras:
|
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
127
.github/workflows/tests.yml
vendored
127
.github/workflows/tests.yml
vendored
@@ -27,6 +27,9 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||||
|
|
||||||
|
env:
|
||||||
|
TRANSFORMERS_IS_CI: "yes"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pre-commit:
|
pre-commit:
|
||||||
name: pre-commit
|
name: pre-commit
|
||||||
@@ -41,15 +44,101 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SKIP: no-commit-to-branch
|
SKIP: no-commit-to-branch
|
||||||
|
|
||||||
pytest:
|
preload-cache:
|
||||||
name: PyTest
|
name: Preload HF cache
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 2
|
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.6.0"]
|
||||||
|
timeout-minutes: 20
|
||||||
|
|
||||||
|
env:
|
||||||
|
AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Restore HF cache
|
||||||
|
id: hf-cache-restore
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ runner.os }}-hf-hub-cache-v2
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python_version }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: upgrade pip
|
||||||
|
run: |
|
||||||
|
pip3 install --upgrade pip
|
||||||
|
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||||
|
|
||||||
|
- name: Install PyTorch
|
||||||
|
run: |
|
||||||
|
pip3 install torch==${{ matrix.pytorch_version }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip3 show torch
|
||||||
|
pip3 install --no-build-isolation -U -e .
|
||||||
|
python scripts/unsloth_install.py | sh
|
||||||
|
python scripts/cutcrossentropy_install.py | sh
|
||||||
|
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||||
|
|
||||||
|
- name: Make sure PyTorch version wasn't clobbered
|
||||||
|
run: |
|
||||||
|
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||||
|
|
||||||
|
- name: Ensure axolotl CLI was installed
|
||||||
|
run: |
|
||||||
|
axolotl --help
|
||||||
|
|
||||||
|
- name: Pre-Download dataset fixture
|
||||||
|
run: |
|
||||||
|
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
pytest -v tests/conftest.py
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.xml
|
||||||
|
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||||
|
fail_ci_if_error: false
|
||||||
|
|
||||||
|
- name: cleanup pip cache
|
||||||
|
run: |
|
||||||
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
|
- name: Save HF cache
|
||||||
|
id: hf-cache
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||||
|
|
||||||
|
pytest:
|
||||||
|
name: PyTest
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [preload-cache]
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python_version: ["3.11"]
|
||||||
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -118,24 +207,15 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
pytest-sdist:
|
pytest-sdist:
|
||||||
name: PyTest from Source Dist
|
name: PyTest from Source Dist
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: [preload-cache]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 1
|
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -196,15 +276,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
docker-e2e-tests-1st:
|
docker-e2e-tests-1st:
|
||||||
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
@@ -258,6 +329,12 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
- cuda: 124
|
||||||
|
cuda_version: 12.4.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
num_gpus: 1
|
||||||
|
axolotl_extras: llmcompressor
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -269,7 +346,7 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
- cuda: 126
|
- cuda: 126
|
||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
161
.runpod/.gitignore
vendored
Normal file
161
.runpod/.gitignore
vendored
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
pod/scripts/config.yaml
|
||||||
18
.runpod/Dockerfile
Normal file
18
.runpod/Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
FROM axolotlai/axolotl-cloud:main-py3.11-cu124-2.6.0
|
||||||
|
|
||||||
|
COPY .runpod/requirements.txt /requirements.txt
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
python3 -m pip install --upgrade pip && \
|
||||||
|
python3 -m pip install --upgrade -r /requirements.txt
|
||||||
|
|
||||||
|
# Environment settings
|
||||||
|
ARG BASE_VOLUME="/runpod-volume"
|
||||||
|
ENV BASE_VOLUME=$BASE_VOLUME
|
||||||
|
ENV HF_DATASETS_CACHE="${BASE_VOLUME}/huggingface-cache/datasets"
|
||||||
|
ENV HUGGINGFACE_HUB_CACHE="${BASE_VOLUME}/huggingface-cache/hub"
|
||||||
|
ENV TRANSFORMERS_CACHE="${BASE_VOLUME}/huggingface-cache/hub"
|
||||||
|
|
||||||
|
COPY .runpod/src /src
|
||||||
|
|
||||||
|
WORKDIR /src
|
||||||
|
CMD ["python3", "/src/handler.py"]
|
||||||
335
.runpod/README.md
Normal file
335
.runpod/README.md
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
<h1>LLM Post Training- Full fine-tune, LoRA, QLoRa etc. Llama/Mistral/Gemma and more</h1>
|
||||||
|
|
||||||
|
# Configuration Options
|
||||||
|
|
||||||
|
This document outlines all available configuration options for training models. The configuration can be provided as a JSON request.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
You can use these configuration Options:
|
||||||
|
|
||||||
|
1. As a JSON request body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "model-name",
|
||||||
|
"run_id": "run-id",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "", # add your Weights & biases key. TODO: you will be able to set this in Enviornment variables.
|
||||||
|
"hf_token": "", # add your HF_token. TODO: you will be able to set this in Enviornment variables.
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "NousResearch/Llama-3.2-1B",
|
||||||
|
// ... other options
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
### Model Configuration
|
||||||
|
|
||||||
|
| Option | Description | Default |
|
||||||
|
| ------------------- | --------------------------------------------------------------------------------------------- | -------------------- |
|
||||||
|
| `base_model` | Path to the base model (local or HuggingFace) | Required |
|
||||||
|
| `base_model_config` | Configuration path for the base model | Same as base_model |
|
||||||
|
| `revision_of_model` | Specific model revision from HuggingFace hub | Latest |
|
||||||
|
| `tokenizer_config` | Custom tokenizer configuration path | Optional |
|
||||||
|
| `model_type` | Type of model to load | AutoModelForCausalLM |
|
||||||
|
| `tokenizer_type` | Type of tokenizer to use | AutoTokenizer |
|
||||||
|
| `hub_model_id` | Repository ID where the model will be pushed on Hugging Face Hub (format: username/repo-name) | Optional |
|
||||||
|
|
||||||
|
## Model Family Identification
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ------- | ------------------------------ |
|
||||||
|
| `is_falcon_derived_model` | `false` | Whether model is Falcon-based |
|
||||||
|
| `is_llama_derived_model` | `false` | Whether model is LLaMA-based |
|
||||||
|
| `is_qwen_derived_model` | `false` | Whether model is Qwen-based |
|
||||||
|
| `is_mistral_derived_model` | `false` | Whether model is Mistral-based |
|
||||||
|
|
||||||
|
## Model Configuration Overrides
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------------------------------- | ---------- | ---------------------------------- |
|
||||||
|
| `overrides_of_model_config.rope_scaling.type` | `"linear"` | RoPE scaling type (linear/dynamic) |
|
||||||
|
| `overrides_of_model_config.rope_scaling.factor` | `1.0` | RoPE scaling factor |
|
||||||
|
|
||||||
|
### Model Loading Options
|
||||||
|
|
||||||
|
| Option | Description | Default |
|
||||||
|
| -------------- | ----------------------------- | ------- |
|
||||||
|
| `load_in_8bit` | Load model in 8-bit precision | false |
|
||||||
|
| `load_in_4bit` | Load model in 4-bit precision | false |
|
||||||
|
| `bf16` | Use bfloat16 precision | false |
|
||||||
|
| `fp16` | Use float16 precision | false |
|
||||||
|
| `tf32` | Use tensor float 32 precision | false |
|
||||||
|
|
||||||
|
## Memory and Device Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ------------------ | --------- | ----------------------- |
|
||||||
|
| `gpu_memory_limit` | `"20GiB"` | GPU memory limit |
|
||||||
|
| `lora_on_cpu` | `false` | Load LoRA on CPU |
|
||||||
|
| `device_map` | `"auto"` | Device mapping strategy |
|
||||||
|
| `max_memory` | `null` | Max memory per device |
|
||||||
|
|
||||||
|
## Training Hyperparameters
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------------- | --------- | --------------------------- |
|
||||||
|
| `gradient_accumulation_steps` | `1` | Gradient accumulation steps |
|
||||||
|
| `micro_batch_size` | `2` | Batch size per GPU |
|
||||||
|
| `eval_batch_size` | `null` | Evaluation batch size |
|
||||||
|
| `num_epochs` | `4` | Number of training epochs |
|
||||||
|
| `warmup_steps` | `100` | Warmup steps |
|
||||||
|
| `warmup_ratio` | `0.05` | Warmup ratio |
|
||||||
|
| `learning_rate` | `0.00003` | Learning rate |
|
||||||
|
| `lr_quadratic_warmup` | `false` | Quadratic warmup |
|
||||||
|
| `logging_steps` | `null` | Logging frequency |
|
||||||
|
| `eval_steps` | `null` | Evaluation frequency |
|
||||||
|
| `evals_per_epoch` | `null` | Evaluations per epoch |
|
||||||
|
| `save_strategy` | `"epoch"` | Checkpoint saving strategy |
|
||||||
|
| `save_steps` | `null` | Saving frequency |
|
||||||
|
| `saves_per_epoch` | `null` | Saves per epoch |
|
||||||
|
| `save_total_limit` | `null` | Maximum checkpoints to keep |
|
||||||
|
| `max_steps` | `null` | Maximum training steps |
|
||||||
|
|
||||||
|
### Dataset Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
datasets:
|
||||||
|
- path: vicgalle/alpaca-gpt4 # HuggingFace dataset or TODO: You will be able to add the local path.
|
||||||
|
type: alpaca # Format type (alpaca, gpteacher, oasst, etc.)
|
||||||
|
ds_type: json # Dataset type
|
||||||
|
data_files: path/to/data # Source data files
|
||||||
|
train_on_split: train # Dataset split to use
|
||||||
|
```
|
||||||
|
|
||||||
|
## Chat Template Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ------------------------ | -------------------------------- | ---------------------- |
|
||||||
|
| `chat_template` | `"tokenizer_default"` | Chat template type |
|
||||||
|
| `chat_template_jinja` | `null` | Custom Jinja template |
|
||||||
|
| `default_system_message` | `"You are a helpful assistant."` | Default system message |
|
||||||
|
|
||||||
|
## Dataset Processing
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------------- | -------------------------- | --------------------------------- |
|
||||||
|
| `dataset_prepared_path` | `"data/last_run_prepared"` | Path for prepared dataset |
|
||||||
|
| `push_dataset_to_hub` | `""` | Push dataset to HF hub |
|
||||||
|
| `dataset_processes` | `4` | Number of preprocessing processes |
|
||||||
|
| `dataset_keep_in_memory` | `false` | Keep dataset in memory |
|
||||||
|
| `shuffle_merged_datasets` | `true` | Shuffle merged datasets |
|
||||||
|
| `dataset_exact_deduplication` | `true` | Deduplicate datasets |
|
||||||
|
|
||||||
|
## LoRA Configuration
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ---------------------- | ------------------------------ |
|
||||||
|
| `adapter` | `"lora"` | Adapter type (lora/qlora) |
|
||||||
|
| `lora_model_dir` | `""` | Directory with pretrained LoRA |
|
||||||
|
| `lora_r` | `8` | LoRA attention dimension |
|
||||||
|
| `lora_alpha` | `16` | LoRA alpha parameter |
|
||||||
|
| `lora_dropout` | `0.05` | LoRA dropout |
|
||||||
|
| `lora_target_modules` | `["q_proj", "v_proj"]` | Modules to apply LoRA |
|
||||||
|
| `lora_target_linear` | `false` | Target all linear modules |
|
||||||
|
| `peft_layers_to_transform` | `[]` | Layers to transform |
|
||||||
|
| `lora_modules_to_save` | `[]` | Modules to save |
|
||||||
|
| `lora_fan_in_fan_out` | `false` | Fan in/out structure |
|
||||||
|
|
||||||
|
## Optimization Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ------------------------- | ------- | -------------------------- |
|
||||||
|
| `train_on_inputs` | `false` | Train on input prompts |
|
||||||
|
| `group_by_length` | `false` | Group by sequence length |
|
||||||
|
| `gradient_checkpointing` | `false` | Use gradient checkpointing |
|
||||||
|
| `early_stopping_patience` | `3` | Early stopping patience |
|
||||||
|
|
||||||
|
## Learning Rate Scheduling
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ---------- | -------------------- |
|
||||||
|
| `lr_scheduler` | `"cosine"` | Scheduler type |
|
||||||
|
| `lr_scheduler_kwargs` | `{}` | Scheduler parameters |
|
||||||
|
| `cosine_min_lr_ratio` | `null` | Minimum LR ratio |
|
||||||
|
| `cosine_constant_lr_ratio` | `null` | Constant LR ratio |
|
||||||
|
| `lr_div_factor` | `null` | LR division factor |
|
||||||
|
|
||||||
|
## Optimizer Settings
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ---------------------- | ------------ | ------------------- |
|
||||||
|
| `optimizer` | `"adamw_hf"` | Optimizer choice |
|
||||||
|
| `optim_args` | `{}` | Optimizer arguments |
|
||||||
|
| `optim_target_modules` | `[]` | Target modules |
|
||||||
|
| `weight_decay` | `null` | Weight decay |
|
||||||
|
| `adam_beta1` | `null` | Adam beta1 |
|
||||||
|
| `adam_beta2` | `null` | Adam beta2 |
|
||||||
|
| `adam_epsilon` | `null` | Adam epsilon |
|
||||||
|
| `max_grad_norm` | `null` | Gradient clipping |
|
||||||
|
|
||||||
|
## Attention Implementations
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| -------------------------- | ------- | ----------------------------- |
|
||||||
|
| `flash_optimum` | `false` | Use better transformers |
|
||||||
|
| `xformers_attention` | `false` | Use xformers |
|
||||||
|
| `flash_attention` | `false` | Use flash attention |
|
||||||
|
| `flash_attn_cross_entropy` | `false` | Flash attention cross entropy |
|
||||||
|
| `flash_attn_rms_norm` | `false` | Flash attention RMS norm |
|
||||||
|
| `flash_attn_fuse_qkv` | `false` | Fuse QKV operations |
|
||||||
|
| `flash_attn_fuse_mlp` | `false` | Fuse MLP operations |
|
||||||
|
| `sdp_attention` | `false` | Use scaled dot product |
|
||||||
|
| `s2_attention` | `false` | Use shifted sparse attention |
|
||||||
|
|
||||||
|
## Tokenizer Modifications
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ---------------- | ------- | ---------------------------- |
|
||||||
|
| `special_tokens` | - | Special tokens to add/modify |
|
||||||
|
| `tokens` | `[]` | Additional tokens |
|
||||||
|
|
||||||
|
## Distributed Training
|
||||||
|
|
||||||
|
| Option | Default | Description |
|
||||||
|
| ----------------------- | ------- | --------------------- |
|
||||||
|
| `fsdp` | `null` | FSDP configuration |
|
||||||
|
| `fsdp_config` | `null` | FSDP config options |
|
||||||
|
| `deepspeed` | `null` | Deepspeed config path |
|
||||||
|
| `ddp_timeout` | `null` | DDP timeout |
|
||||||
|
| `ddp_bucket_cap_mb` | `null` | DDP bucket capacity |
|
||||||
|
| `ddp_broadcast_buffers` | `null` | DDP broadcast buffers |
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary><h3>Example Configuration Request:</h3></summary>
|
||||||
|
|
||||||
|
Here's a complete example for fine-tuning a LLaMA model using LoRA:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "test-run",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "NousResearch/Llama-3.2-1B",
|
||||||
|
"load_in_8bit": false,
|
||||||
|
"load_in_4bit": false,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "teknium/GPT4-LLM-Cleaned",
|
||||||
|
"type": "alpaca"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"dataset_prepared_path": "last_run_prepared",
|
||||||
|
"val_set_size": 0.1,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"adapter": "lora",
|
||||||
|
"sequence_len": 2048,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": true,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"lora_r": 16,
|
||||||
|
"lora_alpha": 32,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_modules": [
|
||||||
|
"gate_proj",
|
||||||
|
"down_proj",
|
||||||
|
"up_proj",
|
||||||
|
"q_proj",
|
||||||
|
"v_proj",
|
||||||
|
"k_proj",
|
||||||
|
"o_proj"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_8bit",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": false,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"loss_watchdog_threshold": 5,
|
||||||
|
"loss_watchdog_patience": 3,
|
||||||
|
"warmup_steps": 10,
|
||||||
|
"evals_per_epoch": 4,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0,
|
||||||
|
"hub_model_id": "runpod/llama-fr-lora",
|
||||||
|
"wandb_name": "test-run-1",
|
||||||
|
"wandb_project": "test-run-1",
|
||||||
|
"wandb_entity": "axo-test",
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|end_of_text|>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Advanced Features
|
||||||
|
|
||||||
|
#### Wandb Integration
|
||||||
|
|
||||||
|
- `wandb_project`: Project name for Weights & Biases
|
||||||
|
- `wandb_entity`: Team name in W&B
|
||||||
|
- `wandb_watch`: Monitor model with W&B
|
||||||
|
- `wandb_name`: Name of the W&B run
|
||||||
|
- `wandb_run_id`: ID for the W&B run
|
||||||
|
|
||||||
|
#### Performance Optimization
|
||||||
|
|
||||||
|
- `sample_packing`: Enable efficient sequence packing
|
||||||
|
- `eval_sample_packing`: Use sequence packing during evaluation
|
||||||
|
- `torch_compile`: Enable PyTorch 2.0 compilation
|
||||||
|
- `flash_attention`: Use Flash Attention implementation
|
||||||
|
- `xformers_attention`: Use xFormers attention implementation
|
||||||
|
|
||||||
|
### Available Optimizers
|
||||||
|
|
||||||
|
The following optimizers are supported:
|
||||||
|
|
||||||
|
- `adamw_hf`: HuggingFace's AdamW implementation
|
||||||
|
- `adamw_torch`: PyTorch's AdamW
|
||||||
|
- `adamw_torch_fused`: Fused AdamW implementation
|
||||||
|
- `adamw_torch_xla`: XLA-optimized AdamW
|
||||||
|
- `adamw_apex_fused`: NVIDIA Apex fused AdamW
|
||||||
|
- `adafactor`: Adafactor optimizer
|
||||||
|
- `adamw_anyprecision`: Anyprecision AdamW
|
||||||
|
- `adamw_bnb_8bit`: 8-bit AdamW from bitsandbytes
|
||||||
|
- `lion_8bit`: 8-bit Lion optimizer
|
||||||
|
- `lion_32bit`: 32-bit Lion optimizer
|
||||||
|
- `sgd`: Stochastic Gradient Descent
|
||||||
|
- `adagrad`: Adagrad optimizer
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Set `load_in_8bit: true` or `load_in_4bit: true` for memory-efficient training
|
||||||
|
- Enable `flash_attention: true` for faster training on modern GPUs
|
||||||
|
- Use `gradient_checkpointing: true` to reduce memory usage
|
||||||
|
- Adjust `micro_batch_size` and `gradient_accumulation_steps` based on your GPU memory
|
||||||
|
|
||||||
|
For more detailed information, please refer to the [documentation](https://axolotl-ai-cloud.github.io/axolotl/docs/config.html).
|
||||||
|
|
||||||
|
### Errors:
|
||||||
|
|
||||||
|
- if you face any issues with the Flash Attention-2, Delete yoor worker and Re-start.
|
||||||
93
.runpod/hub.json
Normal file
93
.runpod/hub.json
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
{
|
||||||
|
"title": "Axolotl Fine-Tuning",
|
||||||
|
"description": "Serverless fine-tuning of open-source LLMs with Axolotl. Supports LoRA, QLoRA, DPO, and more using Hugging Face models and datasets.",
|
||||||
|
"type": "serverless",
|
||||||
|
"category": "language",
|
||||||
|
"iconUrl": "https://avatars.githubusercontent.com/u/167502477",
|
||||||
|
"config": {
|
||||||
|
"runsOn": "GPU",
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"gpuCount": 1,
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
],
|
||||||
|
"presets": [],
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"input": {
|
||||||
|
"name": "Tokenizer",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name or path of the Hugging Face tokenizer to use.",
|
||||||
|
"default": "",
|
||||||
|
"advanced": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "MAX_NUM_SEQS",
|
||||||
|
"input": {
|
||||||
|
"name": "Max Num Seqs",
|
||||||
|
"type": "number",
|
||||||
|
"description": "Maximum number of sequences per iteration.",
|
||||||
|
"default": 256,
|
||||||
|
"advanced": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"input": {
|
||||||
|
"name": "Disable Log Stats",
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Disable logging statistics.",
|
||||||
|
"default": false,
|
||||||
|
"trueValue": "true",
|
||||||
|
"falseValue": "false"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "LOAD_FORMAT",
|
||||||
|
"input": {
|
||||||
|
"name": "Load Format",
|
||||||
|
"type": "string",
|
||||||
|
"description": "The format of the model weights to load.",
|
||||||
|
"default": "auto",
|
||||||
|
"options": [
|
||||||
|
{
|
||||||
|
"label": "auto",
|
||||||
|
"value": "auto"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "pt",
|
||||||
|
"value": "pt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "safetensors",
|
||||||
|
"value": "safetensors"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "npcache",
|
||||||
|
"value": "npcache"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "dummy",
|
||||||
|
"value": "dummy"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "tensorizer",
|
||||||
|
"value": "tensorizer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "bitsandbytes",
|
||||||
|
"value": "bitsandbytes"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"advanced": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
7
.runpod/requirements.txt
Normal file
7
.runpod/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# Required Python packages get listed here, one per line.
|
||||||
|
# Reccomended to lock the version number to avoid unexpected changes.
|
||||||
|
|
||||||
|
# You can also install packages from a git repository, e.g.:
|
||||||
|
# git+https://github.com/runpod/runpod-python.git
|
||||||
|
# To learn more, see https://pip.pypa.io/en/stable/reference/requirements-file-format/
|
||||||
|
runpod~=1.7.0
|
||||||
577
.runpod/src/config/config.yaml
Normal file
577
.runpod/src/config/config.yaml
Normal file
@@ -0,0 +1,577 @@
|
|||||||
|
# # This is the huggingface model that contains *.pt, *.safetensors, or *.bin files
|
||||||
|
# # This can also be a relative path to a model on disk
|
||||||
|
# base_model: ./llama-7b-hf
|
||||||
|
# # You can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)
|
||||||
|
# base_model_ignore_patterns:
|
||||||
|
# # If the base_model repo on hf hub doesn't include configuration .json files,
|
||||||
|
# # You can set that here, or leave this empty to default to base_model
|
||||||
|
# base_model_config: ./llama-7b-hf
|
||||||
|
# # You can specify to choose a specific model revision from huggingface hub
|
||||||
|
# model_revision:
|
||||||
|
# # Optional tokenizer configuration override in case you want to use a different tokenizer
|
||||||
|
# # than the one defined in the base model
|
||||||
|
# tokenizer_config:
|
||||||
|
# # If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
|
||||||
|
# model_type: AutoModelForCausalLM
|
||||||
|
# # Corresponding tokenizer for the model AutoTokenizer is a good choice
|
||||||
|
# tokenizer_type: AutoTokenizer
|
||||||
|
# # Trust remote code for untrusted source
|
||||||
|
# trust_remote_code:
|
||||||
|
# # use_fast option for tokenizer loading from_pretrained, default to True
|
||||||
|
# tokenizer_use_fast:
|
||||||
|
# # Whether to use the legacy tokenizer setting, defaults to True
|
||||||
|
# tokenizer_legacy:
|
||||||
|
# # Resize the model embeddings when new tokens are added to multiples of 32
|
||||||
|
# # This is reported to improve training speed on some models
|
||||||
|
# resize_token_embeddings_to_32x:
|
||||||
|
|
||||||
|
# # Used to identify which the model is based on
|
||||||
|
# is_falcon_derived_model:
|
||||||
|
# is_llama_derived_model:
|
||||||
|
# # Please note that if you set this to true, `padding_side` will be set to "left" by default
|
||||||
|
# is_mistral_derived_model:
|
||||||
|
# is_qwen_derived_model:
|
||||||
|
|
||||||
|
# # optional overrides to the base model configuration
|
||||||
|
# model_config:
|
||||||
|
# # RoPE Scaling https://github.com/huggingface/transformers/pull/24653
|
||||||
|
# rope_scaling:
|
||||||
|
# type: # linear | dynamic
|
||||||
|
# factor: # float
|
||||||
|
|
||||||
|
|
||||||
|
# # Whether you are training a 4-bit GPTQ quantized model
|
||||||
|
# gptq: true
|
||||||
|
# gptq_groupsize: 128 # group size
|
||||||
|
# gptq_model_v1: false # v1 or v2
|
||||||
|
|
||||||
|
# # This will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
|
||||||
|
# load_in_8bit: true
|
||||||
|
# # Use bitsandbytes 4 bit
|
||||||
|
# load_in_4bit:
|
||||||
|
|
||||||
|
# # Use CUDA bf16
|
||||||
|
# bf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere
|
||||||
|
# # Use CUDA fp16
|
||||||
|
# fp16: true
|
||||||
|
# # Use CUDA tf32
|
||||||
|
# tf32: true # require >=ampere
|
||||||
|
|
||||||
|
# # No AMP (automatic mixed precision)
|
||||||
|
# bfloat16: true # require >=ampere
|
||||||
|
# float16: true
|
||||||
|
|
||||||
|
# # A list of one or more datasets to finetune the model with
|
||||||
|
# datasets:
|
||||||
|
# # HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files
|
||||||
|
# - path: vicgalle/alpaca-gpt4
|
||||||
|
# # The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
||||||
|
# type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
||||||
|
# ds_type: # Optional[str] (json|arrow|parquet|text|csv) defines the datatype when path is a file
|
||||||
|
# data_files: # Optional[str] path to source data files
|
||||||
|
# shards: # Optional[int] number of shards to split data into
|
||||||
|
# name: # Optional[str] name of dataset configuration to load
|
||||||
|
# train_on_split: train # Optional[str] name of dataset split to load from
|
||||||
|
|
||||||
|
# # Optional[str] fastchat conversation type, only used with type: sharegpt
|
||||||
|
# conversation: # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
||||||
|
# field_human: # Optional[str]. Human key to use for conversation.
|
||||||
|
# field_model: # Optional[str]. Assistant key to use for conversation.
|
||||||
|
|
||||||
|
# # Custom user prompt
|
||||||
|
# - path: repo
|
||||||
|
# type:
|
||||||
|
# # The below are defaults. only set what's needed.
|
||||||
|
# system_prompt: ""
|
||||||
|
# system_format: "{system}"
|
||||||
|
# field_system: system
|
||||||
|
# field_instruction: instruction
|
||||||
|
# field_input: input
|
||||||
|
# field_output: output
|
||||||
|
|
||||||
|
# # Customizable to be single line or multi-line
|
||||||
|
# # 'format' can include {input}
|
||||||
|
# format: |-
|
||||||
|
# User: {instruction} {input}
|
||||||
|
# Assistant:
|
||||||
|
# # 'no_input_format' cannot include {input}
|
||||||
|
# no_input_format: "{instruction} "
|
||||||
|
|
||||||
|
# # For `completion` datsets only, uses the provided field instead of `text` column
|
||||||
|
# field:
|
||||||
|
|
||||||
|
# # Axolotl attempts to save the dataset as an arrow after packing the data together so
|
||||||
|
# # subsequent training attempts load faster, relative path
|
||||||
|
# dataset_prepared_path: data/last_run_prepared
|
||||||
|
# # Push prepared dataset to hub
|
||||||
|
# push_dataset_to_hub: # repo path
|
||||||
|
# # The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`
|
||||||
|
# # if not set.
|
||||||
|
# dataset_processes: # defaults to os.cpu_count() if not set
|
||||||
|
# # push checkpoints to hub
|
||||||
|
# hub_model_id: # repo path to push finetuned model
|
||||||
|
# # how to push checkpoints to hub
|
||||||
|
# # https://huggingface.co/docs/transformers/v4.31.0/en/main_classes/trainer#transformers.TrainingArguments.hub_strategy
|
||||||
|
# hub_strategy:
|
||||||
|
# # Whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
||||||
|
# # Required to be true when used in combination with `push_dataset_to_hub`
|
||||||
|
# hf_use_auth_token: # boolean
|
||||||
|
# # How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc. 0 for no eval.
|
||||||
|
# val_set_size: 0.04
|
||||||
|
# # Num shards for whole dataset
|
||||||
|
# dataset_shard_num:
|
||||||
|
# # Index of shard to use for whole dataset
|
||||||
|
# dataset_shard_idx:
|
||||||
|
|
||||||
|
# # The maximum length of an input to train with, this should typically be less than 2048
|
||||||
|
# # as most models have a token/context limit of 2048
|
||||||
|
# sequence_len: 2048
|
||||||
|
# # Pad inputs so each step uses constant sized buffers
|
||||||
|
# # This will reduce memory fragmentation and may prevent OOMs, by re-using memory more efficiently
|
||||||
|
# pad_to_sequence_len:
|
||||||
|
# # Max sequence length to concatenate training samples together up to
|
||||||
|
# # Inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
|
||||||
|
# # FutureWarning: This will soon be DEPRECATED
|
||||||
|
# max_packed_sequence_len: 1024
|
||||||
|
# # Use efficient multi-packing with block diagonal attention and per sequence position_ids. Recommend set to 'true'
|
||||||
|
# sample_packing:
|
||||||
|
# # Set to 'false' if getting errors during eval with sample_packing on.
|
||||||
|
# eval_sample_packing:
|
||||||
|
# # You can set these packing optimizations AFTER starting a training at least once.
|
||||||
|
# # The trainer will provide recommended values for these values.
|
||||||
|
# sample_packing_eff_est:
|
||||||
|
# total_num_tokens:
|
||||||
|
|
||||||
|
# # If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
||||||
|
# adapter: lora
|
||||||
|
# # If you already have a lora model trained that you want to load, put that here.
|
||||||
|
# # This means after training, if you want to test the model, you should set this to the value of `lora_out_dir`.
|
||||||
|
# lora_model_dir:
|
||||||
|
|
||||||
|
# # LoRA hyperparameters
|
||||||
|
# # For more details about the following options, see:
|
||||||
|
# # https://www.anyscale.com/blog/fine-tuning-llms-lora-or-full-parameter-an-in-depth-analysis-with-llama-2
|
||||||
|
# lora_r: 8
|
||||||
|
# lora_alpha: 16
|
||||||
|
# lora_dropout: 0.05
|
||||||
|
# lora_target_modules:
|
||||||
|
# - q_proj
|
||||||
|
# - v_proj
|
||||||
|
# # - k_proj
|
||||||
|
# # - o_proj
|
||||||
|
# # - gate_proj
|
||||||
|
# # - down_proj
|
||||||
|
# # - up_proj
|
||||||
|
# lora_target_linear: # If true, will target all linear layers
|
||||||
|
|
||||||
|
# # If you added new tokens to the tokenizer, you may need to save some LoRA modules because they need to know the new tokens.
|
||||||
|
# # For LLaMA and Mistral, you need to save `embed_tokens` and `lm_head`. It may vary for other models.
|
||||||
|
# # `embed_tokens` converts tokens to embeddings, and `lm_head` converts embeddings to token probabilities.
|
||||||
|
# # https://github.com/huggingface/peft/issues/334#issuecomment-1561727994
|
||||||
|
# lora_modules_to_save:
|
||||||
|
# # - embed_tokens
|
||||||
|
# # - lm_head
|
||||||
|
|
||||||
|
# # Once you complete training, the model will be saved to the following directory.
|
||||||
|
# # If you merge the adapter to the base model, a subdirectory `merged` will be created under this directory.
|
||||||
|
# # Make sure `lora_model_dir` points to this directory if you want to use the trained model.
|
||||||
|
# lora_out_dir:
|
||||||
|
# lora_fan_in_fan_out: false
|
||||||
|
|
||||||
|
# # ReLoRA configuration
|
||||||
|
# # Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed
|
||||||
|
# relora_steps: # Number of steps per ReLoRA restart
|
||||||
|
# relora_warmup_steps: # Number of per-restart warmup steps
|
||||||
|
# relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings
|
||||||
|
|
||||||
|
# # wandb configuration if you're using it
|
||||||
|
# wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
|
||||||
|
# wandb_project: # Your wandb project name
|
||||||
|
# wandb_entity: # A wandb Team name if using a Team
|
||||||
|
# wandb_watch:
|
||||||
|
# wandb_run_id: # Set the name of your wandb run
|
||||||
|
# wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
||||||
|
|
||||||
|
# # Where to save the full-finetuned model to
|
||||||
|
# output_dir: ./completed-model
|
||||||
|
|
||||||
|
# # Whether to use torch.compile and which backend to use
|
||||||
|
# torch_compile: # bool
|
||||||
|
# torch_compile_backend: # Optional[str]
|
||||||
|
|
||||||
|
# # Training hyperparameters
|
||||||
|
|
||||||
|
# # If greater than 1, backpropagation will be skipped and the gradients will be accumulated for the given number of steps.
|
||||||
|
# gradient_accumulation_steps: 1
|
||||||
|
# # The number of samples to include in each batch. This is the number of samples sent to each GPU.
|
||||||
|
# micro_batch_size: 2
|
||||||
|
# eval_batch_size:
|
||||||
|
# num_epochs: 4
|
||||||
|
# warmup_steps: 100 # cannot use with warmup_ratio
|
||||||
|
# warmup_ratio: 0.05 # cannot use with warmup_steps
|
||||||
|
# learning_rate: 0.00003
|
||||||
|
# lr_quadratic_warmup:
|
||||||
|
# logging_steps:
|
||||||
|
# save_strategy: # Set to `no` to skip checkpoint saves
|
||||||
|
# save_steps: # Leave empty to save at each epoch
|
||||||
|
# eval_steps: # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps
|
||||||
|
# save_total_limit: # Checkpoints saved at a time
|
||||||
|
# # Maximum number of iterations to train for. It precedes num_epochs which means that
|
||||||
|
# # if both are set, num_epochs will not be guaranteed.
|
||||||
|
# # e.g., when 1 epoch is 1000 steps => `num_epochs: 2` and `max_steps: 100` will train for 100 steps
|
||||||
|
# max_steps:
|
||||||
|
|
||||||
|
# eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
||||||
|
# eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
||||||
|
|
||||||
|
# # Save model as safetensors (require safetensors package)
|
||||||
|
# save_safetensors:
|
||||||
|
|
||||||
|
# # Whether to mask out or include the human's prompt from the training labels
|
||||||
|
# train_on_inputs: false
|
||||||
|
# # Group similarly sized data to minimize padding.
|
||||||
|
# # May be slower to start, as it must download and sort the entire dataset.
|
||||||
|
# # Note that training loss may have an oscillating pattern with this enabled.
|
||||||
|
# group_by_length: false
|
||||||
|
|
||||||
|
# # Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
||||||
|
# gradient_checkpointing: false
|
||||||
|
|
||||||
|
# # Stop training after this many evaluation losses have increased in a row
|
||||||
|
# # https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
||||||
|
# early_stopping_patience: 3
|
||||||
|
|
||||||
|
# # Specify a scheduler and kwargs to use with the optimizer
|
||||||
|
# lr_scheduler: # 'one_cycle' | 'log_sweep' | empty for cosine
|
||||||
|
# lr_scheduler_kwargs:
|
||||||
|
|
||||||
|
# # For one_cycle optim
|
||||||
|
# lr_div_factor: # Learning rate div factor
|
||||||
|
|
||||||
|
# # For log_sweep optim
|
||||||
|
# log_sweep_min_lr:
|
||||||
|
# log_sweep_max_lr:
|
||||||
|
|
||||||
|
# # Specify optimizer
|
||||||
|
# # Valid values are driven by the Transformers OptimizerNames class, see:
|
||||||
|
# # https://github.com/huggingface/transformers/blob/95b374952dc27d8511541d6f5a4e22c9ec11fb24/src/transformers/training_args.py#L134
|
||||||
|
# #
|
||||||
|
# # Note that not all optimizers may be available in your environment, ex: 'adamw_anyprecision' is part of
|
||||||
|
# # torchdistx, 'adamw_bnb_8bit' is part of bnb.optim.Adam8bit, etc. When in doubt, it is recommended to start with the optimizer used
|
||||||
|
# # in the examples/ for your model and fine-tuning use case.
|
||||||
|
# #
|
||||||
|
# # Valid values for 'optimizer' include:
|
||||||
|
# # - adamw_hf
|
||||||
|
# # - adamw_torch
|
||||||
|
# # - adamw_torch_fused
|
||||||
|
# # - adamw_torch_xla
|
||||||
|
# # - adamw_apex_fused
|
||||||
|
# # - adafactor
|
||||||
|
# # - adamw_anyprecision
|
||||||
|
# # - sgd
|
||||||
|
# # - adagrad
|
||||||
|
# # - adamw_bnb_8bit
|
||||||
|
# # - lion_8bit
|
||||||
|
# # - lion_32bit
|
||||||
|
# # - paged_adamw_32bit
|
||||||
|
# # - paged_adamw_8bit
|
||||||
|
# # - paged_lion_32bit
|
||||||
|
# # - paged_lion_8bit
|
||||||
|
# optimizer:
|
||||||
|
# # Specify weight decay
|
||||||
|
# weight_decay:
|
||||||
|
# # adamw hyperparams
|
||||||
|
# adam_beta1:
|
||||||
|
# adam_beta2:
|
||||||
|
# adam_epsilon:
|
||||||
|
# # Gradient clipping max norm
|
||||||
|
# max_grad_norm:
|
||||||
|
|
||||||
|
# # Augmentation techniques
|
||||||
|
# # NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings
|
||||||
|
# # currently only supported on Llama and Mistral
|
||||||
|
# noisy_embedding_alpha:
|
||||||
|
|
||||||
|
# # Whether to bettertransformers
|
||||||
|
# flash_optimum:
|
||||||
|
# # Whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
||||||
|
# xformers_attention:
|
||||||
|
# # Whether to use flash attention patch https://github.com/Dao-AILab/flash-attention:
|
||||||
|
# flash_attention:
|
||||||
|
# flash_attn_cross_entropy: # Whether to use flash-attention cross entropy implementation - advanced use only
|
||||||
|
# flash_attn_rms_norm: # Whether to use flash-attention rms norm implementation - advanced use only
|
||||||
|
# flash_attn_fuse_qkv: # Whether to fuse QKV into a single operation
|
||||||
|
# flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation
|
||||||
|
# # Whether to use scaled-dot-product attention
|
||||||
|
# # https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
||||||
|
# sdp_attention:
|
||||||
|
# # Landmark attention (only llama)
|
||||||
|
# landmark_attention:
|
||||||
|
# # xpos RoPE see https://github.com/kaiokendev/cutoff-len-is-context-len/blob/main/util/xpos_rope_llama_monkey_patch.py
|
||||||
|
# # LLaMA only
|
||||||
|
# xpos_rope:
|
||||||
|
|
||||||
|
# # Resume from a specific checkpoint dir
|
||||||
|
# resume_from_checkpoint:
|
||||||
|
# # If resume_from_checkpoint isn't set and you simply want it to start where it left off.
|
||||||
|
# # Be careful with this being turned on between different models.
|
||||||
|
# auto_resume_from_checkpoints: false
|
||||||
|
|
||||||
|
# # Don't mess with this, it's here for accelerate and torchrun
|
||||||
|
# local_rank:
|
||||||
|
|
||||||
|
# # Add or change special tokens.
|
||||||
|
# # If you add tokens here, you don't need to add them to the `tokens` list.
|
||||||
|
# special_tokens:
|
||||||
|
# # bos_token: "<s>"
|
||||||
|
# # eos_token: "</s>"
|
||||||
|
# # unk_token: "<unk>"
|
||||||
|
|
||||||
|
# # Add extra tokens.
|
||||||
|
# tokens:
|
||||||
|
|
||||||
|
# # FSDP
|
||||||
|
# fsdp:
|
||||||
|
# fsdp_config:
|
||||||
|
|
||||||
|
# # Deepspeed config path. e.g., deepspeed/zero3.json
|
||||||
|
# deepspeed:
|
||||||
|
|
||||||
|
# # Advanced DDP Arguments
|
||||||
|
# ddp_timeout:
|
||||||
|
# ddp_bucket_cap_mb:
|
||||||
|
# ddp_broadcast_buffers:
|
||||||
|
|
||||||
|
# # Path to torch distx for optim 'adamw_anyprecision'
|
||||||
|
# torchdistx_path:
|
||||||
|
|
||||||
|
# # Set to HF dataset for type: 'completion' for streaming instead of pre-tokenize
|
||||||
|
# pretraining_dataset:
|
||||||
|
|
||||||
|
# # Debug mode
|
||||||
|
# debug:
|
||||||
|
|
||||||
|
# # Seed
|
||||||
|
# seed:
|
||||||
|
|
||||||
|
# # Allow overwrite yml config using from cli
|
||||||
|
# strict:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
base_model: ${BASE_MODEL}
|
||||||
|
base_model_ignore_patterns: ${BASE_MODEL_IGNORE_PATTERNS}
|
||||||
|
base_model_config: ${BASE_MODEL_CONFIG}
|
||||||
|
revision_of_model: ${REVISION_OF_MODEL}
|
||||||
|
tokenizer_config: ${TOKENIZER_CONFIG}
|
||||||
|
model_type: ${MODEL_TYPE}
|
||||||
|
tokenizer_type: ${TOKENIZER_TYPE}
|
||||||
|
trust_remote_code: ${TRUST_REMOTE_CODE}
|
||||||
|
tokenizer_use_fast: ${TOKENIZER_USE_FAST}
|
||||||
|
tokenizer_legacy: ${TOKENIZER_LEGACY}
|
||||||
|
resize_token_embeddings_to_32x: ${RESIZE_TOKEN_EMBEDDINGS_TO_32X}
|
||||||
|
|
||||||
|
is_falcon_derived_model: ${IS_FALCON_DERIVED_MODEL}
|
||||||
|
is_llama_derived_model: ${IS_LLAMA_DERIVED_MODEL}
|
||||||
|
is_qwen_derived_model: ${IS_QWEN_DERIVED_MODEL}
|
||||||
|
is_mistral_derived_model: ${IS_MISTRAL_DERIVED_MODEL}
|
||||||
|
|
||||||
|
overrides_of_model_config:
|
||||||
|
rope_scaling:
|
||||||
|
type: ${ROPE_SCALING_TYPE}
|
||||||
|
factor: ${ROPE_SCALING_FACTOR}
|
||||||
|
|
||||||
|
bnb_config_kwargs:
|
||||||
|
llm_int8_has_fp16_weight: ${BNB_LLM_INT8_HAS_FP16_WEIGHT}
|
||||||
|
bnb_4bit_quant_type: ${BNB_4BIT_QUANT_TYPE}
|
||||||
|
bnb_4bit_use_double_quant: ${BNB_4BIT_USE_DOUBLE_QUANT}
|
||||||
|
|
||||||
|
gptq: ${GPTQ}
|
||||||
|
load_in_8bit: ${LOAD_IN_8BIT}
|
||||||
|
load_in_4bit: ${LOAD_IN_4BIT}
|
||||||
|
bf16: ${BF16}
|
||||||
|
fp16: ${FP16}
|
||||||
|
tf32: ${TF32}
|
||||||
|
bfloat16: ${BFLOAT16}
|
||||||
|
float16: ${FLOAT16}
|
||||||
|
|
||||||
|
gpu_memory_limit: ${GPU_MEMORY_LIMIT}
|
||||||
|
lora_on_cpu: ${LORA_ON_CPU}
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: ${DATASET_PATH}
|
||||||
|
type: ${DATASET_TYPE}
|
||||||
|
ds_type: ${DATASET_DS_TYPE}
|
||||||
|
data_files: ${DATASET_DATA_FILES}
|
||||||
|
shards: ${DATASET_SHARDS}
|
||||||
|
name: ${DATASET_NAME}
|
||||||
|
train_on_split: ${DATASET_TRAIN_ON_SPLIT}
|
||||||
|
revision: ${DATASET_REVISION}
|
||||||
|
trust_remote_code: ${DATASET_TRUST_REMOTE_CODE}
|
||||||
|
|
||||||
|
rl: ${RL}
|
||||||
|
dpo_use_weighting: ${DPO_USE_WEIGHTING}
|
||||||
|
|
||||||
|
chat_template: ${CHAT_TEMPLATE}
|
||||||
|
chat_template_jinja: ${CHAT_TEMPLATE_JINJA}
|
||||||
|
default_system_message: ${DEFAULT_SYSTEM_MESSAGE}
|
||||||
|
dataset_prepared_path: ${DATASET_PREPARED_PATH}
|
||||||
|
push_dataset_to_hub: ${PUSH_DATASET_TO_HUB}
|
||||||
|
dataset_processes: ${DATASET_PROCESSES}
|
||||||
|
dataset_keep_in_memory: ${DATASET_KEEP_IN_MEMORY}
|
||||||
|
hub_model_id: ${HUB_MODEL_ID}
|
||||||
|
hub_strategy: ${HUB_STRATEGY}
|
||||||
|
hf_use_auth_token: ${HF_USE_AUTH_TOKEN}
|
||||||
|
val_set_size: ${VAL_SET_SIZE}
|
||||||
|
dataset_shard_num: ${DATASET_SHARD_NUM}
|
||||||
|
dataset_shard_idx: ${DATASET_SHARD_IDX}
|
||||||
|
|
||||||
|
sequence_len: ${SEQUENCE_LEN}
|
||||||
|
pad_to_sequence_len: ${PAD_TO_SEQUENCE_LEN}
|
||||||
|
sample_packing: ${SAMPLE_PACKING}
|
||||||
|
eval_sample_packing: ${EVAL_SAMPLE_PACKING}
|
||||||
|
sample_packing_eff_est: ${SAMPLE_PACKING_EFF_EST}
|
||||||
|
total_num_tokens: ${TOTAL_NUM_TOKENS}
|
||||||
|
sample_packing_group_size: ${SAMPLE_PACKING_GROUP_SIZE}
|
||||||
|
sample_packing_bin_size: ${SAMPLE_PACKING_BIN_SIZE}
|
||||||
|
|
||||||
|
batch_flattening: ${BATCH_FLATTENING}
|
||||||
|
device_map: ${DEVICE_MAP}
|
||||||
|
max_memory: ${MAX_MEMORY}
|
||||||
|
|
||||||
|
adapter: ${ADAPTER}
|
||||||
|
lora_model_dir: ${LORA_MODEL_DIR}
|
||||||
|
|
||||||
|
lora_r: ${LORA_R}
|
||||||
|
lora_alpha: ${LORA_ALPHA}
|
||||||
|
lora_dropout: ${LORA_DROPOUT}
|
||||||
|
lora_target_modules:
|
||||||
|
- ${LORA_TARGET_MODULES}
|
||||||
|
lora_target_linear: ${LORA_TARGET_LINEAR}
|
||||||
|
peft_layers_to_transform: ${PEFT_LAYERS_TO_TRANSFORM}
|
||||||
|
lora_modules_to_save: ${LORA_MODULES_TO_SAVE}
|
||||||
|
lora_fan_in_fan_out: ${LORA_FAN_IN_FAN_OUT}
|
||||||
|
|
||||||
|
loraplus_lr_ratio: ${LORAPLUS_LR_RATIO}
|
||||||
|
loraplus_lr_embedding: ${LORAPLUS_LR_EMBEDDING}
|
||||||
|
|
||||||
|
peft:
|
||||||
|
loftq_config:
|
||||||
|
loftq_bits: ${LOFTQ_BITS}
|
||||||
|
|
||||||
|
relora_steps: ${RELORA_STEPS}
|
||||||
|
relora_warmup_steps: ${RELORA_WARMUP_STEPS}
|
||||||
|
relora_anneal_steps: ${RELORA_ANNEAL_STEPS}
|
||||||
|
relora_prune_ratio: ${RELORA_PRUNE_RATIO}
|
||||||
|
relora_cpu_offload: ${RELORA_CPU_OFFLOAD}
|
||||||
|
|
||||||
|
wandb_mode: ${WANDB_MODE}
|
||||||
|
wandb_project: ${WANDB_PROJECT}
|
||||||
|
wandb_entity: ${WANDB_ENTITY}
|
||||||
|
wandb_watch: ${WANDB_WATCH}
|
||||||
|
wandb_name: ${WANDB_NAME}
|
||||||
|
wandb_run_id: ${WANDB_RUN_ID}
|
||||||
|
wandb_log_model: ${WANDB_LOG_MODEL}
|
||||||
|
|
||||||
|
mlflow_tracking_uri: ${MLFLOW_TRACKING_URI}
|
||||||
|
mlflow_experiment_name: ${MLFLOW_EXPERIMENT_NAME}
|
||||||
|
mlflow_run_name: ${MLFLOW_RUN_NAME}
|
||||||
|
hf_mlflow_log_artifacts: ${HF_MLFLOW_LOG_ARTIFACTS}
|
||||||
|
|
||||||
|
use_comet: ${USE_COMET}
|
||||||
|
comet_api_key: ${COMET_API_KEY}
|
||||||
|
comet_workspace: ${COMET_WORKSPACE}
|
||||||
|
comet_project_name: ${COMET_PROJECT_NAME}
|
||||||
|
comet_experiment_key: ${COMET_EXPERIMENT_KEY}
|
||||||
|
comet_mode: ${COMET_MODE}
|
||||||
|
comet_online: ${COMET_ONLINE}
|
||||||
|
comet_experiment_config: ${COMET_EXPERIMENT_CONFIG}
|
||||||
|
|
||||||
|
output_dir: ${OUTPUT_DIR}
|
||||||
|
|
||||||
|
torch_compile: ${TORCH_COMPILE}
|
||||||
|
torch_compile_backend: ${TORCH_COMPILE_BACKEND}
|
||||||
|
|
||||||
|
gradient_accumulation_steps: ${GRADIENT_ACCUMULATION_STEPS}
|
||||||
|
micro_batch_size: ${MICRO_BATCH_SIZE}
|
||||||
|
eval_batch_size: ${EVAL_BATCH_SIZE}
|
||||||
|
num_epochs: ${NUM_EPOCHS}
|
||||||
|
warmup_steps: ${WARMUP_STEPS}
|
||||||
|
warmup_ratio: ${WARMUP_RATIO}
|
||||||
|
learning_rate: ${LEARNING_RATE}
|
||||||
|
lr_quadratic_warmup: ${LR_QUADRATIC_WARMUP}
|
||||||
|
logging_steps: ${LOGGING_STEPS}
|
||||||
|
eval_steps: ${EVAL_STEPS}
|
||||||
|
evals_per_epoch: ${EVALS_PER_EPOCH}
|
||||||
|
save_strategy: ${SAVE_STRATEGY}
|
||||||
|
save_steps: ${SAVE_STEPS}
|
||||||
|
saves_per_epoch: ${SAVES_PER_EPOCH}
|
||||||
|
save_total_limit: ${SAVE_TOTAL_LIMIT}
|
||||||
|
max_steps: ${MAX_STEPS}
|
||||||
|
|
||||||
|
eval_table_size: ${EVAL_TABLE_SIZE}
|
||||||
|
eval_max_new_tokens: ${EVAL_MAX_NEW_TOKENS}
|
||||||
|
eval_causal_lm_metrics: ${EVAL_CAUSAL_LM_METRICS}
|
||||||
|
|
||||||
|
profiler_steps: ${PROFILER_STEPS}
|
||||||
|
loss_watchdog_threshold: ${LOSS_WATCHDOG_THRESHOLD}
|
||||||
|
loss_watchdog_patience: ${LOSS_WATCHDOG_PATIENCE}
|
||||||
|
|
||||||
|
save_safetensors: ${SAVE_SAFETENSORS}
|
||||||
|
train_on_inputs: ${TRAIN_ON_INPUTS}
|
||||||
|
group_by_length: ${GROUP_BY_LENGTH}
|
||||||
|
gradient_checkpointing: ${GRADIENT_CHECKPOINTING}
|
||||||
|
early_stopping_patience: ${EARLY_STOPPING_PATIENCE}
|
||||||
|
|
||||||
|
lr_scheduler: ${LR_SCHEDULER}
|
||||||
|
lr_scheduler_kwargs: ${LR_SCHEDULER_KWARGS}
|
||||||
|
cosine_min_lr_ratio: ${COSINE_MIN_LR_RATIO}
|
||||||
|
cosine_constant_lr_ratio: ${COSINE_CONSTANT_LR_RATIO}
|
||||||
|
lr_div_factor: ${LR_DIV_FACTOR}
|
||||||
|
|
||||||
|
optimizer: ${OPTIMIZER}
|
||||||
|
optim_args: ${OPTIM_ARGS}
|
||||||
|
optim_target_modules: ${OPTIM_TARGET_MODULES}
|
||||||
|
weight_decay: ${WEIGHT_DECAY}
|
||||||
|
adam_beta1: ${ADAM_BETA1}
|
||||||
|
adam_beta2: ${ADAM_BETA2}
|
||||||
|
adam_epsilon: ${ADAM_EPSILON}
|
||||||
|
max_grad_norm: ${MAX_GRAD_NORM}
|
||||||
|
|
||||||
|
neftune_noise_alpha: ${NEFTUNE_NOISE_ALPHA}
|
||||||
|
|
||||||
|
flash_optimum: ${FLASH_OPTIMUM}
|
||||||
|
xformers_attention: ${XFORMERS_ATTENTION}
|
||||||
|
flash_attention: ${FLASH_ATTENTION}
|
||||||
|
flash_attn_cross_entropy: ${FLASH_ATTN_CROSS_ENTROPY}
|
||||||
|
flash_attn_rms_norm: ${FLASH_ATTN_RMS_NORM}
|
||||||
|
flash_attn_fuse_qkv: ${FLASH_ATTN_FUSE_QKV}
|
||||||
|
flash_attn_fuse_mlp: ${FLASH_ATTN_FUSE_MLP}
|
||||||
|
sdp_attention: ${SDP_ATTENTION}
|
||||||
|
s2_attention: ${S2_ATTENTION}
|
||||||
|
resume_from_checkpoint: ${RESUME_FROM_CHECKPOINT}
|
||||||
|
auto_resume_from_checkpoints: ${AUTO_RESUME_FROM_CHECKPOINTS}
|
||||||
|
|
||||||
|
local_rank: ${LOCAL_RANK}
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
bos_token: ${SPECIAL_TOKEN_BOS}
|
||||||
|
eos_token: ${SPECIAL_TOKEN_EOS}
|
||||||
|
unk_token: ${SPECIAL_TOKEN_UNK}
|
||||||
|
pad_token: ${SPECIAL_TOKEN_PAD}
|
||||||
|
|
||||||
|
tokens: ${TOKENS}
|
||||||
|
|
||||||
|
fsdp: ${FSDP}
|
||||||
|
fsdp_config: ${FSDP_CONFIG}
|
||||||
|
deepspeed: ${DEEPSPEED}
|
||||||
|
|
||||||
|
ddp_timeout: ${DDP_TIMEOUT}
|
||||||
|
ddp_bucket_cap_mb: ${DDP_BUCKET_CAP_MB}
|
||||||
|
ddp_broadcast_buffers: ${DDP_BROADCAST_BUFFERS}
|
||||||
|
|
||||||
|
torchdistx_path: ${TORCHDISTX_PATH}
|
||||||
|
pretraining_dataset: ${PRETRAINING_DATASET}
|
||||||
|
debug: ${DEBUG}
|
||||||
|
seed: ${SEED}
|
||||||
|
strict: ${STRICT}
|
||||||
64
.runpod/src/handler.py
Normal file
64
.runpod/src/handler.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
"""
|
||||||
|
Runpod serverless entrypoint handler
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import runpod
|
||||||
|
import yaml
|
||||||
|
from huggingface_hub._login import login
|
||||||
|
from train import train
|
||||||
|
from utils import get_output_dir
|
||||||
|
|
||||||
|
BASE_VOLUME = os.environ.get("BASE_VOLUME", "/runpod-volume")
|
||||||
|
if not os.path.exists(BASE_VOLUME):
|
||||||
|
os.makedirs(BASE_VOLUME)
|
||||||
|
|
||||||
|
logger = runpod.RunPodLogger()
|
||||||
|
|
||||||
|
|
||||||
|
async def handler(job):
|
||||||
|
runpod_job_id = job["id"]
|
||||||
|
inputs = job["input"]
|
||||||
|
run_id = inputs.get("run_id", "default_run_id")
|
||||||
|
args = inputs.get("args", {})
|
||||||
|
|
||||||
|
# Set output directory
|
||||||
|
output_dir = os.path.join(BASE_VOLUME, get_output_dir(run_id))
|
||||||
|
args["output_dir"] = output_dir
|
||||||
|
|
||||||
|
# First save args to a temporary config file
|
||||||
|
config_path = "/workspace/test_config.yaml"
|
||||||
|
|
||||||
|
# Add run_name and job_id to args before saving
|
||||||
|
args["run_name"] = run_id
|
||||||
|
args["runpod_job_id"] = runpod_job_id
|
||||||
|
|
||||||
|
yaml_data = yaml.dump(args, default_flow_style=False)
|
||||||
|
with open(config_path, "w", encoding="utf-8") as file:
|
||||||
|
file.write(yaml_data)
|
||||||
|
|
||||||
|
# Handle credentials
|
||||||
|
credentials = inputs.get("credentials", {})
|
||||||
|
|
||||||
|
if "wandb_api_key" in credentials:
|
||||||
|
os.environ["WANDB_API_KEY"] = credentials["wandb_api_key"]
|
||||||
|
if "hf_token" in credentials:
|
||||||
|
os.environ["HF_TOKEN"] = credentials["hf_token"]
|
||||||
|
|
||||||
|
if os.environ.get("HF_TOKEN"):
|
||||||
|
login(token=os.environ["HF_TOKEN"])
|
||||||
|
else:
|
||||||
|
logger.info("No HF_TOKEN provided. Skipping login.")
|
||||||
|
|
||||||
|
logger.info("Starting Training.")
|
||||||
|
async for result in train(config_path): # Pass the config path instead of args
|
||||||
|
logger.info(result)
|
||||||
|
logger.info("Training Complete.")
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
del os.environ["WANDB_API_KEY"]
|
||||||
|
del os.environ["HF_TOKEN"]
|
||||||
|
|
||||||
|
|
||||||
|
runpod.serverless.start({"handler": handler, "return_aggregate_stream": True})
|
||||||
61
.runpod/src/test_input.json
Normal file
61
.runpod/src/test_input.json
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "NousResearch/Meta-Llama-3-8B",
|
||||||
|
"model_type": "LlamaForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_8bit": true,
|
||||||
|
"load_in_4bit": false,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.05,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "lora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 16,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 4,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_bnb_8bit",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": false,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|end_of_text|>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
45
.runpod/src/train.py
Normal file
45
.runpod/src/train.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
"""
|
||||||
|
Runpod train entrypoint
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
|
||||||
|
async def train(config_path: str, gpu_id: str = "0", preprocess: bool = True):
|
||||||
|
"""
|
||||||
|
Run preprocessing (if enabled) and training with the given config file
|
||||||
|
:param config_path: Path to the YAML config file
|
||||||
|
:param gpu_id: GPU ID to use (default: "0")
|
||||||
|
:param preprocess: Whether to run preprocessing (default: True)
|
||||||
|
|
||||||
|
"""
|
||||||
|
# First check if preprocessing is needed
|
||||||
|
if preprocess:
|
||||||
|
# Preprocess command
|
||||||
|
preprocess_cmd = (
|
||||||
|
f"CUDA_VISIBLE_DEVICES={gpu_id} axolotl preprocess {config_path}"
|
||||||
|
)
|
||||||
|
process = await asyncio.create_subprocess_shell(
|
||||||
|
preprocess_cmd,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.STDOUT,
|
||||||
|
)
|
||||||
|
|
||||||
|
if process.stdout is not None:
|
||||||
|
async for line in process.stdout:
|
||||||
|
yield f"Preprocessing: {line.decode().strip()}"
|
||||||
|
await process.wait()
|
||||||
|
yield "Preprocessing completed."
|
||||||
|
else:
|
||||||
|
yield "Skipping preprocessing step."
|
||||||
|
|
||||||
|
# Training command
|
||||||
|
train_cmd = f"axolotl train {config_path}"
|
||||||
|
process = await asyncio.create_subprocess_shell(
|
||||||
|
train_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT
|
||||||
|
)
|
||||||
|
|
||||||
|
if process.stdout is not None:
|
||||||
|
async for line in process.stdout:
|
||||||
|
yield f"Training: {line.decode().strip()}"
|
||||||
|
await process.wait()
|
||||||
89
.runpod/src/utils.py
Normal file
89
.runpod/src/utils.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
"""
|
||||||
|
Runpod launcher utils
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_output_dir(run_id):
|
||||||
|
path = f"fine-tuning/{run_id}"
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def make_valid_config(input_args):
|
||||||
|
"""
|
||||||
|
Creates and saves updated config file, returns the path to the new config
|
||||||
|
:param input_args: dict of input args
|
||||||
|
:return: str, path to the updated config file
|
||||||
|
"""
|
||||||
|
# Load default config
|
||||||
|
with open("config/config.yaml", "r", encoding="utf-8") as fin:
|
||||||
|
all_args = yaml.safe_load(fin)
|
||||||
|
|
||||||
|
if not input_args:
|
||||||
|
print("No args provided, using defaults")
|
||||||
|
else:
|
||||||
|
all_args.update(input_args)
|
||||||
|
|
||||||
|
# Create updated config path
|
||||||
|
updated_config_path = "config/updated_config.yaml"
|
||||||
|
|
||||||
|
# Save updated config to new file
|
||||||
|
with open(updated_config_path, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(all_args, f)
|
||||||
|
|
||||||
|
return updated_config_path
|
||||||
|
|
||||||
|
|
||||||
|
def set_config_env_vars(args: dict):
|
||||||
|
"""
|
||||||
|
Convert API arguments into environment variables.
|
||||||
|
Handles nested dictionaries, lists, and special values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (dict): The arguments dictionary from the API request
|
||||||
|
"""
|
||||||
|
|
||||||
|
def process_value(value):
|
||||||
|
"""Convert Python values to string format for environment variables"""
|
||||||
|
if value is None:
|
||||||
|
return ""
|
||||||
|
if isinstance(value, bool):
|
||||||
|
return str(value).lower()
|
||||||
|
if isinstance(value, (list, dict)):
|
||||||
|
return str(value)
|
||||||
|
return str(value)
|
||||||
|
|
||||||
|
def set_env_vars(data, prefix=""):
|
||||||
|
"""Recursively set environment variables from nested dictionary"""
|
||||||
|
for key, value in data.items():
|
||||||
|
env_key = prefix + key.upper()
|
||||||
|
|
||||||
|
# Handle special cases
|
||||||
|
if isinstance(value, dict):
|
||||||
|
# For nested dictionaries (like special_tokens)
|
||||||
|
set_env_vars(value, f"{env_key}_")
|
||||||
|
elif isinstance(value, list):
|
||||||
|
# Handle list of dictionaries (like datasets)
|
||||||
|
if value and isinstance(value[0], dict):
|
||||||
|
for i, item in enumerate(value):
|
||||||
|
set_env_vars(item, f"{env_key}_{i}_")
|
||||||
|
else:
|
||||||
|
# For simple lists (like lora_target_modules)
|
||||||
|
os.environ[env_key] = process_value(value)
|
||||||
|
else:
|
||||||
|
# Handle all other cases
|
||||||
|
os.environ[env_key] = process_value(value)
|
||||||
|
|
||||||
|
# Clear any existing related environment variables
|
||||||
|
# This prevents old values from persisting
|
||||||
|
for key in list(os.environ.keys()):
|
||||||
|
if key.startswith(
|
||||||
|
("BASE_MODEL", "MODEL_TYPE", "TOKENIZER_TYPE", "DATASET", "LORA_", "WANDB_")
|
||||||
|
):
|
||||||
|
del os.environ[key]
|
||||||
|
|
||||||
|
# Set new environment variables
|
||||||
|
set_env_vars(args)
|
||||||
86
.runpod/test-input.json
Normal file
86
.runpod/test-input.json
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"name": "quick_smoke_test_sft",
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"model_type": "AutoModelForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_4bit": true,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
"split": "train[:10%]"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "qlora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 64,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": true,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>"
|
||||||
|
},
|
||||||
|
"max_steps": 20
|
||||||
|
},
|
||||||
|
"timeout": 100000
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
||||||
|
"gpuCount": 1,
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"value": "true"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
90
.runpod/tests.json
Normal file
90
.runpod/tests.json
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
{
|
||||||
|
"tests": [
|
||||||
|
{
|
||||||
|
"name": "quick_smoke_test_sft",
|
||||||
|
"input": {
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"model_type": "AutoModelForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_4bit": true,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
"split": "train[:10%]"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "qlora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 64,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": true,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>"
|
||||||
|
},
|
||||||
|
"max_steps": 20
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timeout": 100000
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"config": {
|
||||||
|
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
||||||
|
"gpuCount": 1,
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"value": "true"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -52,4 +52,4 @@ pytest -v --durations=10 \
|
|||||||
--cov-append \
|
--cov-append \
|
||||||
--cov-report=xml:e2e-coverage.xml
|
--cov-report=xml:e2e-coverage.xml
|
||||||
|
|
||||||
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION}
|
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION} || true
|
||||||
|
|||||||
@@ -20,4 +20,4 @@ pytest -v --durations=10 -n1 /workspace/axolotl/tests/e2e/multigpu/patched/ \
|
|||||||
--cov-report=xml:multigpu-coverage.xml
|
--cov-report=xml:multigpu-coverage.xml
|
||||||
|
|
||||||
# Upload coverage to Codecov
|
# Upload coverage to Codecov
|
||||||
codecov upload-process -t $CODECOV_TOKEN -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION}
|
codecov upload-process -t "${CODECOV_TOKEN}" -f multigpu-coverage.xml -F multigpu,docker-tests,pytorch-${PYTORCH_VERSION} || true
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
codecov:
|
codecov:
|
||||||
require_ci_to_pass: yes
|
require_ci_to_pass: yes
|
||||||
|
notify:
|
||||||
|
wait_for_ci: true
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
precision: 2
|
precision: 2
|
||||||
|
|||||||
@@ -32,6 +32,8 @@ tokenizer_legacy:
|
|||||||
resize_token_embeddings_to_32x:
|
resize_token_embeddings_to_32x:
|
||||||
# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.
|
# Optional[bool] Whether to shrink the embeddings to len(tokenizer). By default, we won't shrink.
|
||||||
shrink_embeddings:
|
shrink_embeddings:
|
||||||
|
# Optional[bool] Don't upcast the embeddings to float32 when using PEFT. Useful for low-VRAM GPUs
|
||||||
|
embeddings_skip_upcast:
|
||||||
# Whether to load the model with randomly initialized weights. Useful for
|
# Whether to load the model with randomly initialized weights. Useful for
|
||||||
# pre-training a model from scratch or debugging purposes.
|
# pre-training a model from scratch or debugging purposes.
|
||||||
random_init_weights:
|
random_init_weights:
|
||||||
@@ -73,11 +75,12 @@ load_in_8bit: true
|
|||||||
load_in_4bit:
|
load_in_4bit:
|
||||||
|
|
||||||
# Use CUDA bf16
|
# Use CUDA bf16
|
||||||
bf16: true # bool or 'full' for `bf16_full_eval`. require >=ampere
|
bf16: true # bool or 'full' for `bf16_full_eval`, or 'auto' for automatic detection. require >=ampere
|
||||||
# Use CUDA fp16
|
# Use CUDA fp16
|
||||||
fp16: true
|
fp16: true
|
||||||
# Use CUDA tf32
|
# Use CUDA tf32
|
||||||
tf32: true # require >=ampere
|
tf32: true # require >=ampere
|
||||||
|
# Note: if bf16 is set to 'auto', and fp16 is set to true, we will prefer the explict fp16 setting
|
||||||
|
|
||||||
# No AMP (automatic mixed precision)
|
# No AMP (automatic mixed precision)
|
||||||
bfloat16: true # require >=ampere
|
bfloat16: true # require >=ampere
|
||||||
@@ -154,6 +157,10 @@ datasets:
|
|||||||
# Key containing the messages (default: "messages")
|
# Key containing the messages (default: "messages")
|
||||||
field_messages: messages
|
field_messages: messages
|
||||||
|
|
||||||
|
# Key containing the system message (default: "system")
|
||||||
|
# If the system message is not present in the dataset sample, it will be loaded from the field_system property.
|
||||||
|
field_system: system
|
||||||
|
|
||||||
# Mapping of properties from the input dataset to the chat template.
|
# Mapping of properties from the input dataset to the chat template.
|
||||||
# (default: message_property_mappings={'role':'role', 'content':'content'})
|
# (default: message_property_mappings={'role':'role', 'content':'content'})
|
||||||
# If a property exists in the template but not in this mapping, the system will attempt
|
# If a property exists in the template but not in this mapping, the system will attempt
|
||||||
@@ -180,10 +187,14 @@ datasets:
|
|||||||
# adding a system turn with empty content.
|
# adding a system turn with empty content.
|
||||||
drop_system_message:
|
drop_system_message:
|
||||||
|
|
||||||
|
# Optional[bool]. (for Qwen3 template only) Whether to split the assistant content based on a reasoning trace inside delimited tags
|
||||||
|
# See example at `docs/dataset-formats/conversation.qmd`
|
||||||
|
split_thinking:
|
||||||
|
|
||||||
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
||||||
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
||||||
# See examples at `docs/dataset-formats/conversation.qmd`
|
# See examples at `docs/dataset-formats/conversation.qmd`
|
||||||
# Note: If the below 4 fields are set to empty, defaults to training only on the last message.
|
# Note: If the below 5 fields are empty, defaults to training only on the last message.
|
||||||
|
|
||||||
# Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.
|
# Optional[List[str]]. Roles to train on. The tokens from these roles will be considered for the loss.
|
||||||
roles_to_train: ["assistant"] # default
|
roles_to_train: ["assistant"] # default
|
||||||
@@ -192,7 +203,13 @@ datasets:
|
|||||||
# - turn (default): train on the EOS token at the end of each trainable turn
|
# - turn (default): train on the EOS token at the end of each trainable turn
|
||||||
# - last: train on the last EOS token in the conversation
|
# - last: train on the last EOS token in the conversation
|
||||||
# TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
# TIP: Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
||||||
train_on_eos: last
|
train_on_eos: turn
|
||||||
|
# Optional[str]. Which EOT (End-of-Turn) tokens to train on in the conversation. Possible values are:
|
||||||
|
# - all: train on all EOT tokens
|
||||||
|
# - turn: train on the EOT token at the end of each trainable turn
|
||||||
|
# - last: train on the last EOT token in the conversation
|
||||||
|
# If not specified, defaults to the value of train_on_eos for backward compatibility.
|
||||||
|
train_on_eot:
|
||||||
# The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.
|
# The key in the message turn that indicates via boolean whether tokens of a turn should be considered for training. Useful to selectively train on certain turns besides the `roles_to_train`.
|
||||||
message_field_training: training
|
message_field_training: training
|
||||||
# The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.
|
# The key in the message turn that contains the training details. Useful to selectively train on certain tokens in a turn.
|
||||||
@@ -275,8 +292,17 @@ process_reward_model:
|
|||||||
chat_template: tokenizer_default
|
chat_template: tokenizer_default
|
||||||
# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.
|
# custom jinja template for chat template. This will be only used if chat_template is set to `jinja` or `null` (in which case chat_template is automatically set to `jinja`). Default is null.
|
||||||
chat_template_jinja: null
|
chat_template_jinja: null
|
||||||
# Changes the default system message. Currently only supports chatml.
|
# Optional[List[str]]. Custom EOT (End-of-Turn) tokens to mask/unmask during training.
|
||||||
default_system_message: You are a helpful assistant. Please give a long and detailed answer.
|
# These tokens mark the boundaries between conversation turns.
|
||||||
|
# For example: ["/INST", "</s>", "[/SYSTEM_PROMPT]"]
|
||||||
|
# If not specified, defaults to just the model's eos_token.
|
||||||
|
# This is useful for templates that use multiple delimiter tokens.
|
||||||
|
eot_tokens:
|
||||||
|
# - "</s>"
|
||||||
|
# - "[/INST]"
|
||||||
|
# - "[/SYSTEM_PROMPT]"
|
||||||
|
# Changes the default system message
|
||||||
|
default_system_message: You are a helpful assistant. Please give a long and detailed answer. # Currently only supports chatml.
|
||||||
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
||||||
# subsequent training attempts load faster, relative path
|
# subsequent training attempts load faster, relative path
|
||||||
dataset_prepared_path: data/last_run_prepared
|
dataset_prepared_path: data/last_run_prepared
|
||||||
@@ -524,7 +550,7 @@ gradient_checkpointing: false
|
|||||||
early_stopping_patience: 3
|
early_stopping_patience: 3
|
||||||
|
|
||||||
# Specify a scheduler and kwargs to use with the optimizer
|
# Specify a scheduler and kwargs to use with the optimizer
|
||||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine
|
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
|
||||||
lr_scheduler_kwargs:
|
lr_scheduler_kwargs:
|
||||||
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
||||||
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
||||||
@@ -661,8 +687,10 @@ special_tokens:
|
|||||||
# unk_token: "<unk>"
|
# unk_token: "<unk>"
|
||||||
# pad_token: "[PAD]"
|
# pad_token: "[PAD]"
|
||||||
|
|
||||||
# Add extra tokens.
|
# Optional[list[str]]. Add extra tokens to the tokenizer.
|
||||||
tokens:
|
tokens:
|
||||||
|
# - "<|startoftext|>"
|
||||||
|
# - "<|endoftext|>"
|
||||||
|
|
||||||
# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.
|
# Mapping token_id to new_token_string to override reserved added_tokens in the tokenizer.
|
||||||
# Only works for tokens that are not part of the base vocab (aka are added_tokens).
|
# Only works for tokens that are not part of the base vocab (aka are added_tokens).
|
||||||
|
|||||||
@@ -4,18 +4,6 @@ description: Conversation format for supervised fine-tuning.
|
|||||||
order: 3
|
order: 3
|
||||||
---
|
---
|
||||||
|
|
||||||
## sharegpt
|
|
||||||
|
|
||||||
::: {.callout-important}
|
|
||||||
ShareGPT is deprecated!. Please see [chat_template](#chat_template) section below.
|
|
||||||
:::
|
|
||||||
|
|
||||||
## pygmalion
|
|
||||||
|
|
||||||
```{.json filename="data.jsonl"}
|
|
||||||
{"conversations": [{"role": "...", "value": "..."}]}
|
|
||||||
```
|
|
||||||
|
|
||||||
## chat_template
|
## chat_template
|
||||||
|
|
||||||
Chat Template strategy uses a jinja2 template that converts a list of messages into a prompt. Support using tokenizer's template, a supported template, or custom jinja2.
|
Chat Template strategy uses a jinja2 template that converts a list of messages into a prompt. Support using tokenizer's template, a supported template, or custom jinja2.
|
||||||
@@ -64,7 +52,7 @@ We recommend checking the below examples for other usecases.
|
|||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
1. Using the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.
|
1. (Legacy) Using the default chat template in the tokenizer_config.json on OpenAI messages format, training on only last message.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
datasets:
|
datasets:
|
||||||
@@ -109,10 +97,55 @@ datasets:
|
|||||||
```
|
```
|
||||||
|
|
||||||
::: {.callout-important}
|
::: {.callout-important}
|
||||||
Please make sure that your `tokenizer.eos_token` is same as EOS/EOT token in template. Otherwise, set `eos_token` under `special_tokens`.
|
Please make sure that your `tokenizer.eos_token` is same as EOS (End-of-Sequence) token in template. Otherwise, set `eos_token` under `special_tokens: `.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
5. (Advanced) Using fine-grained control over tokens and turns to train in a conversation
|
5. If you are using a template that has a different EOT (End-of-Turn) token from EOS token or multiple EOT tokens (like Mistral V7 Tekken), set the `eot_tokens: ` config. The handling of EOT tokens follows `train_on_eos: ` which defaults to turn.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
eot_tokens:
|
||||||
|
- "[/INST]"
|
||||||
|
# - "[/SYSTEM_PROMPT]"
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
|
||||||
|
# optional
|
||||||
|
train_on_eot: turn # defaults read from train_on_eos (which defaults to turn)
|
||||||
|
```
|
||||||
|
|
||||||
|
::: {.callout-tip}
|
||||||
|
See [config documentation](../config.qmd) for detailed explanations of "turn", "last", and "all" options for training on tokens.
|
||||||
|
:::
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
Using `eot_tokens` requires each token that exists in `chat_template` to be a single token in the tokenizer. Otherwise, the tokenizer will split the token and cause unexpected behavior.
|
||||||
|
|
||||||
|
You can add those tokens as new tokens under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `. See [config](../config.qmd) for more details.
|
||||||
|
:::
|
||||||
|
|
||||||
|
6. Continuing from the previous example, if you want to train on all EOT token trainable turns but only last EOS token, set `train_on_eos: last`.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
eot_tokens:
|
||||||
|
- "[/INST]"
|
||||||
|
# ...
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
|
||||||
|
train_on_eos: last
|
||||||
|
train_on_eot: turn
|
||||||
|
```
|
||||||
|
|
||||||
|
::: {.callout-tip}
|
||||||
|
If EOS token only appears at the end of a prompt, `train_on_eos: last` is equivalent to `train_on_eos: turn`. Therefore, generally, you can leave them to their defaults and omit them.
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
|
7. (Advanced) Using fine-grained control over tokens and turns to train in a conversation
|
||||||
|
|
||||||
For a data sample that looks like:
|
For a data sample that looks like:
|
||||||
|
|
||||||
@@ -162,3 +195,43 @@ datasets:
|
|||||||
::: {.callout-tip}
|
::: {.callout-tip}
|
||||||
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
It is not necessary to set both `message_field_training` and `message_field_training_detail` at once.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
8. (For Qwen3 template only) Enable reasoning split, where the reasoning is split from the content and passed as a separate field into the template.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
datasets:
|
||||||
|
- path: ...
|
||||||
|
type: chat_template
|
||||||
|
chat_template: qwen3
|
||||||
|
split_thinking: true
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, a content can look like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"content": "<think>Some thinking outputs</think>Output after thinking."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
After split, it will look like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"reasoning_content": "Some thinking outputs",
|
||||||
|
"content": "Output after thinking..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## sharegpt
|
||||||
|
|
||||||
|
::: {.callout-important}
|
||||||
|
ShareGPT is deprecated!. Please see [chat_template](#chat_template) section.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## pygmalion
|
||||||
|
|
||||||
|
```{.json filename="data.jsonl"}
|
||||||
|
{"conversations": [{"role": "...", "value": "..."}]}
|
||||||
|
```
|
||||||
|
|||||||
@@ -28,6 +28,8 @@ main-base-py{python_version}-cu{cuda_version}-{pytorch_version}
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
|
- `main-base-py3.11-cu128-2.7.0`
|
||||||
|
- `main-base-py3.11-cu126-2.7.0`
|
||||||
- `main-base-py3.11-cu124-2.6.0`
|
- `main-base-py3.11-cu124-2.6.0`
|
||||||
- `main-base-py3.11-cu124-2.5.1`
|
- `main-base-py3.11-cu124-2.5.1`
|
||||||
- `main-base-py3.11-cu124-2.4.1`
|
- `main-base-py3.11-cu124-2.4.1`
|
||||||
@@ -50,7 +52,7 @@ Link: [Docker Hub](https://hub.docker.com/r/axolotlai/axolotl)
|
|||||||
# on push to main
|
# on push to main
|
||||||
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
main-py{python_version}-cu{cuda_version}-{pytorch_version}
|
||||||
|
|
||||||
# latest main (currently torch 2.5.1, python 3.11, cuda 12.4)
|
# latest main (currently torch 2.6.0, python 3.11, cuda 12.4)
|
||||||
main-latest
|
main-latest
|
||||||
|
|
||||||
# nightly build
|
# nightly build
|
||||||
@@ -68,6 +70,7 @@ There may be some extra tags appended to the image, like `-vllm` which installs
|
|||||||
|
|
||||||
Tags examples:
|
Tags examples:
|
||||||
|
|
||||||
|
- `main-py3.11-cu126-2.7.0`
|
||||||
- `main-py3.11-cu124-2.6.0`
|
- `main-py3.11-cu124-2.6.0`
|
||||||
- `main-py3.11-cu124-2.5.1`
|
- `main-py3.11-cu124-2.5.1`
|
||||||
- `main-py3.11-cu124-2.4.1`
|
- `main-py3.11-cu124-2.4.1`
|
||||||
|
|||||||
34
docs/faq.qmd
34
docs/faq.qmd
@@ -73,10 +73,40 @@ description: Frequently asked questions
|
|||||||
|
|
||||||
> A: This is likely an empty turn.
|
> A: This is likely an empty turn.
|
||||||
|
|
||||||
**Q: The EOS/EOT token is incorrectly being masked or not being masked.**
|
**Q: The EOS token is incorrectly being masked or not being masked / `EOS token __ not found in chat template`.**
|
||||||
|
|
||||||
> A: This is because of the mismatch between `tokenizer.eos_token` and EOS/EOT token in template. Please make sure to set `eos_token` under `special_tokens` to the same EOS/EOT token as in template.
|
> A: There can be two reasons:
|
||||||
|
|
||||||
|
> 1. This is because of the mismatch between `tokenizer.eos_token` and EOS token in template. Please make sure to set `eos_token: ` under `special_tokens: ` to the same EOS token as in template.
|
||||||
|
|
||||||
|
> 2. The EOS token is not in the template. Please check if your template is correct. As an example, `phi_35` template does not use its dedicated EOS token `<|endoftext|>` at the end.
|
||||||
|
|
||||||
**Q: "`chat_template` choice is `tokenizer_default` but tokenizer's `chat_template` is null. Please add a `chat_template` in tokenizer config"**
|
**Q: "`chat_template` choice is `tokenizer_default` but tokenizer's `chat_template` is null. Please add a `chat_template` in tokenizer config"**
|
||||||
|
|
||||||
> A: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See [chat_template](dataset-formats/conversation.qmd#chat-template) for more details.
|
> A: This is because the tokenizer does not have a chat template. Please add a chat template in the tokenizer config. See [chat_template](dataset-formats/conversation.qmd#chat-template) for more details.
|
||||||
|
|
||||||
|
**Q: The EOT token(s) are incorrectly being masked or not being masked / `EOT token __ not found in chat template`.**
|
||||||
|
|
||||||
|
> A: There can be two reasons:
|
||||||
|
|
||||||
|
> 1. The EOT token is different from the EOS token and was not specified under `eot_tokens: `. Please set `eot_tokens: ` to the same EOT token(s) as in template.
|
||||||
|
|
||||||
|
> 2. There is more than one EOT token per turn in the template. Please raise an issue with examples as we recognize this as an edge case.
|
||||||
|
|
||||||
|
**Q: `EOT token encoding failed. Please check if the token is valid and can be encoded.`**
|
||||||
|
|
||||||
|
> A: There could be some issue with the tokenizer or unicode encoding. Please raise an issue with examples with the EOT token & tokenizer causing the issue.
|
||||||
|
|
||||||
|
**Q: `EOT token __ is encoded as multiple tokens.`**
|
||||||
|
|
||||||
|
> A: This is because the EOT token is encoded as multiple tokens which can cause unexpected behavior. Please add it under `tokens: ` or (recommended) override unused added_tokens via `added_tokens_overrides: `.
|
||||||
|
|
||||||
|
**Q: `Conflict between train_on_eos and train_on_eot. eos_token is in eot_tokens and train_on_eos != train_on_eot`**
|
||||||
|
|
||||||
|
> A: This is because the EOS token is in the `eot_tokens: ` while mismatch between `train_on_eos: ` and `train_on_eot: `. This will cause one to override the other. Please ensure that `train_on_eos: ` and `train_on_eot: ` are the same or remove the EOS token from `eot_tokens: `.
|
||||||
|
|
||||||
|
**Q: If `eot_tokens: ` is not provided, what happens?**
|
||||||
|
|
||||||
|
> A: If `eot_tokens: ` is not provided, the default behavior is the same as before. EOS tokens used to delimit turns are masked/unmasked depending on whether the turn is trainable.
|
||||||
|
|
||||||
|
> Internally, `eot_tokens: tokenizer.eos_token` and `train_on_eot: train_on_eos` (which defaults to `turn`). This transition helps clarify the naming and behavior of EOT/EOS tokens.
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ Here is an example of a multi-modal dataset:
|
|||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": [
|
"content": [
|
||||||
{"type": "image", "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
|
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
|
||||||
{"type": "text", "text": "Describe this image in detail."}
|
{"type": "text", "text": "Describe this image in detail."}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -502,9 +502,7 @@ The input format is a simple JSON input with customizable fields based on the ab
|
|||||||
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/axolotl-cookbook/tree/main/grpo#training-an-r1-style-large-language-model-using-grpo).
|
Check out our [GRPO cookbook](https://github.com/axolotl-ai-cloud/axolotl-cookbook/tree/main/grpo#training-an-r1-style-large-language-model-using-grpo).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
If you have multiple GPUs available, we reccomend using `vLLM` with the `GRPOTrainer` to significantly speedup trajectory generation during training.
|
In the latest GRPO implementation, `vLLM` is used to significantly speedup trajectory generation during training. In this example, we're using 4 GPUs - 2 for training, and 2 for vLLM:
|
||||||
First, launch a `vLLM` server using `trl vllm-serve` - you may use a config file or CLI overrides to configure your vLLM server. In this example, we're
|
|
||||||
using 4 GPUs - 2 for training, and 2 for vLLM:
|
|
||||||
|
|
||||||
::: {.callout-important}
|
::: {.callout-important}
|
||||||
Make sure you've installed the correct version of vLLM by including it as an extra when installing axolotl, e.g. `pip install axolotl[vllm]`.
|
Make sure you've installed the correct version of vLLM by including it as an extra when installing axolotl, e.g. `pip install axolotl[vllm]`.
|
||||||
@@ -539,6 +537,10 @@ Your `vLLM` instance will now attempt to spin up, and it's time to kick off trai
|
|||||||
CUDA_VISIBLE_DEVICES=0,1 axolotl train grpo.yaml --num-processes 2
|
CUDA_VISIBLE_DEVICES=0,1 axolotl train grpo.yaml --num-processes 2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
::: {.callout-note}
|
||||||
|
Due to TRL's implementation with vLLM, the vLLM instance must use the last N GPUs instead of the first N GPUs. This is why in the example above, we use `CUDA_VISIBLE_DEVICES=2,3` for the vLLM instance.
|
||||||
|
:::
|
||||||
|
|
||||||
#### Reward functions
|
#### Reward functions
|
||||||
|
|
||||||
GRPO uses custom reward functions and transformations. Please have them ready locally.
|
GRPO uses custom reward functions and transformations. Please have them ready locally.
|
||||||
|
|||||||
@@ -59,9 +59,7 @@ gradient_checkpointing: false
|
|||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
|
|
||||||
flash_attention: true
|
attention: flash
|
||||||
sdp_attention:
|
|
||||||
flash_optimum:
|
|
||||||
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
|
|||||||
@@ -39,8 +39,7 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
attention: xformers
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -112,9 +112,7 @@
|
|||||||
"early_stopping_patience:\n",
|
"early_stopping_patience:\n",
|
||||||
"resume_from_checkpoint:\n",
|
"resume_from_checkpoint:\n",
|
||||||
"logging_steps: 1\n",
|
"logging_steps: 1\n",
|
||||||
"xformers_attention:\n",
|
"attention: sdpa\n",
|
||||||
"flash_attention: false\n",
|
|
||||||
"sdp_attention: true\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"warmup_steps: 1\n",
|
"warmup_steps: 1\n",
|
||||||
"max_steps: 25\n",
|
"max_steps: 25\n",
|
||||||
|
|||||||
@@ -52,7 +52,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -55,7 +55,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -39,7 +39,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -35,7 +35,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 2
|
evals_per_epoch: 2
|
||||||
|
|||||||
@@ -59,7 +59,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 2
|
evals_per_epoch: 2
|
||||||
|
|||||||
@@ -43,8 +43,7 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
attention: xformers
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 40
|
warmup_steps: 40
|
||||||
|
|||||||
@@ -73,8 +73,7 @@ early_stopping_patience: 3
|
|||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
auto_resume_from_checkpoints: true
|
auto_resume_from_checkpoints: true
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
attention: xformers
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
|
|||||||
@@ -40,8 +40,7 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
attention: xformers
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 40
|
warmup_steps: 40
|
||||||
|
|||||||
@@ -47,7 +47,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -43,7 +43,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -57,7 +57,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -51,8 +51,7 @@ gradient_checkpointing: true
|
|||||||
gradient_checkpointing_kwargs:
|
gradient_checkpointing_kwargs:
|
||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
eager_attention:
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch: 1
|
evals_per_epoch: 1
|
||||||
|
|||||||
@@ -53,8 +53,7 @@ gradient_checkpointing: true
|
|||||||
gradient_checkpointing_kwargs:
|
gradient_checkpointing_kwargs:
|
||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
eager_attention:
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch: 1
|
evals_per_epoch: 1
|
||||||
|
|||||||
@@ -36,8 +36,7 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention: true
|
attention: xformers
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
|
|||||||
@@ -47,7 +47,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch:
|
evals_per_epoch:
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ gradient_checkpointing: true
|
|||||||
gradient_checkpointing_kwargs:
|
gradient_checkpointing_kwargs:
|
||||||
use_reentrant: true
|
use_reentrant: true
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 1
|
evals_per_epoch: 1
|
||||||
|
|||||||
@@ -37,8 +37,7 @@ bf16: auto
|
|||||||
tf32: true
|
tf32: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 5
|
logging_steps: 5
|
||||||
xformers_attention: true
|
attention: xformers
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
|
|||||||
@@ -42,7 +42,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
flash_attn_cross_entropy: false
|
flash_attn_cross_entropy: false
|
||||||
flash_attn_rms_norm: true
|
flash_attn_rms_norm: true
|
||||||
flash_attn_fuse_qkv: false
|
flash_attn_fuse_qkv: false
|
||||||
|
|||||||
@@ -53,9 +53,7 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention:
|
attention: flash
|
||||||
sdp_attention:
|
|
||||||
flash_optimum:
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
saves_per_epoch: 1
|
saves_per_epoch: 1
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
flash_attn_cross_entropy: false
|
flash_attn_cross_entropy: false
|
||||||
flash_attn_rms_norm: true
|
flash_attn_rms_norm: true
|
||||||
flash_attn_fuse_qkv: false
|
flash_attn_fuse_qkv: false
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -48,7 +48,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: true
|
use_reentrant: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -48,7 +48,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -50,8 +50,7 @@ tf32: true
|
|||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
eager_attention:
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch: 1
|
evals_per_epoch: 1
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 2
|
evals_per_epoch: 2
|
||||||
|
|||||||
@@ -34,7 +34,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 2
|
evals_per_epoch: 2
|
||||||
|
|||||||
@@ -61,7 +61,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -56,7 +56,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -77,7 +77,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -54,7 +54,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -48,7 +48,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -55,7 +55,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -48,7 +48,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: false
|
use_reentrant: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -51,7 +51,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -39,7 +39,8 @@ gradient_checkpointing: true
|
|||||||
gradient_checkpointing_kwargs:
|
gradient_checkpointing_kwargs:
|
||||||
use_reentrant: true
|
use_reentrant: true
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -48,7 +48,8 @@ gradient_checkpointing_kwargs:
|
|||||||
use_reentrant: true
|
use_reentrant: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -46,7 +46,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ plugins:
|
|||||||
liger_glu_activation: true
|
liger_glu_activation: true
|
||||||
liger_rms_norm: true
|
liger_rms_norm: true
|
||||||
liger_layer_norm: true
|
liger_layer_norm: true
|
||||||
cut_cross_entropy: true
|
|
||||||
|
|
||||||
llama4_linearized_experts: true # needed with custom linearized experts model
|
llama4_linearized_experts: true # needed with custom linearized experts model
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
|
|||||||
@@ -46,8 +46,7 @@ tf32: true
|
|||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
eager_attention:
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch: 1
|
evals_per_epoch: 1
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ tf32: true
|
|||||||
gradient_checkpointing: false
|
gradient_checkpointing: false
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention:
|
attention: eager
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -42,7 +42,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
save_total_limit: 1
|
save_total_limit: 1
|
||||||
save_steps:
|
save_steps:
|
||||||
|
|||||||
@@ -36,7 +36,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -53,8 +53,7 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: false
|
attention: sdpa
|
||||||
sdp_attention: true
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -54,7 +54,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: false
|
attention: eager
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
evals_per_epoch: 4
|
||||||
|
|||||||
@@ -51,7 +51,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -59,7 +59,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -48,9 +48,7 @@ tf32: true
|
|||||||
|
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: false # PixtralVisionModel does not support Flash Attention 2.0 yet.
|
attention: eager # PixtralVisionModel does not support Flash Attention 2.0 yet.
|
||||||
eager_attention:
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
warmup_ratio: 0.1
|
||||||
evals_per_epoch: 1
|
evals_per_epoch: 1
|
||||||
saves_per_epoch: 1
|
saves_per_epoch: 1
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -51,7 +51,8 @@ tf32: true
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -69,7 +69,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -40,7 +40,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
save_total_limit: 1
|
save_total_limit: 1
|
||||||
save_steps:
|
save_steps:
|
||||||
|
|||||||
@@ -54,7 +54,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
loss_watchdog_threshold: 5.0
|
||||||
loss_watchdog_patience: 3
|
loss_watchdog_patience: 3
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ bf16: auto
|
|||||||
tf32: true
|
tf32: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 5
|
logging_steps: 5
|
||||||
flash_attention:
|
attention: eager
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
|
|||||||
@@ -39,7 +39,8 @@ tf32: false
|
|||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
flash_attention: true
|
attention: flash
|
||||||
|
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user