Compare commits
4 Commits
transforme
...
version-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68fc0eeab3 | ||
|
|
729221e9bb | ||
|
|
5d0d76e4f4 | ||
|
|
3f9555822e |
32
.github/workflows/base.yml
vendored
32
.github/workflows/base.yml
vendored
@@ -51,14 +51,6 @@ jobs:
|
|||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
dockerfile: "Dockerfile-base"
|
dockerfile: "Dockerfile-base"
|
||||||
platforms: "linux/amd64,linux/arm64"
|
platforms: "linux/amd64,linux/arm64"
|
||||||
- cuda: "129"
|
|
||||||
cuda_version: 12.9.1
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
|
||||||
dockerfile: "Dockerfile-base"
|
|
||||||
platforms: "linux/amd64,linux/arm64"
|
|
||||||
- cuda: "130"
|
- cuda: "130"
|
||||||
cuda_version: 13.0.0
|
cuda_version: 13.0.0
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
@@ -67,14 +59,6 @@ jobs:
|
|||||||
torch_cuda_arch_list: "9.0+PTX"
|
torch_cuda_arch_list: "9.0+PTX"
|
||||||
dockerfile: "Dockerfile-base"
|
dockerfile: "Dockerfile-base"
|
||||||
platforms: "linux/amd64,linux/arm64"
|
platforms: "linux/amd64,linux/arm64"
|
||||||
- cuda: "130"
|
|
||||||
cuda_version: 13.0.0
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
torch_cuda_arch_list: "9.0+PTX"
|
|
||||||
dockerfile: "Dockerfile-base"
|
|
||||||
platforms: "linux/amd64,linux/arm64"
|
|
||||||
# - cuda: "128"
|
# - cuda: "128"
|
||||||
# cuda_version: 12.8.1
|
# cuda_version: 12.8.1
|
||||||
# cudnn_version: ""
|
# cudnn_version: ""
|
||||||
@@ -157,14 +141,6 @@ jobs:
|
|||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
||||||
dockerfile: "Dockerfile-uv-base"
|
dockerfile: "Dockerfile-uv-base"
|
||||||
platforms: "linux/amd64,linux/arm64"
|
platforms: "linux/amd64,linux/arm64"
|
||||||
- cuda: "129"
|
|
||||||
cuda_version: 12.9.1
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
|
||||||
dockerfile: "Dockerfile-uv-base"
|
|
||||||
platforms: "linux/amd64,linux/arm64"
|
|
||||||
- cuda: "130"
|
- cuda: "130"
|
||||||
cuda_version: 13.0.0
|
cuda_version: 13.0.0
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
@@ -173,14 +149,6 @@ jobs:
|
|||||||
torch_cuda_arch_list: "9.0+PTX"
|
torch_cuda_arch_list: "9.0+PTX"
|
||||||
dockerfile: "Dockerfile-uv-base"
|
dockerfile: "Dockerfile-uv-base"
|
||||||
platforms: "linux/amd64,linux/arm64"
|
platforms: "linux/amd64,linux/arm64"
|
||||||
- cuda: "130"
|
|
||||||
cuda_version: 13.0.0
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
torch_cuda_arch_list: "9.0+PTX"
|
|
||||||
dockerfile: "Dockerfile-uv-base"
|
|
||||||
platforms: "linux/amd64,linux/arm64"
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
12
.github/workflows/main.yml
vendored
12
.github/workflows/main.yml
vendored
@@ -34,12 +34,6 @@ jobs:
|
|||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
platforms: "linux/amd64,linux/arm64"
|
platforms: "linux/amd64,linux/arm64"
|
||||||
is_latest: true
|
is_latest: true
|
||||||
- cuda: 129
|
|
||||||
cuda_version: 12.9.1
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
axolotl_extras:
|
|
||||||
platforms: "linux/amd64,linux/arm64"
|
|
||||||
- cuda: 130
|
- cuda: 130
|
||||||
cuda_version: 13.0.0
|
cuda_version: 13.0.0
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -112,12 +106,6 @@ jobs:
|
|||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
is_latest: true
|
is_latest: true
|
||||||
platforms: "linux/amd64,linux/arm64"
|
platforms: "linux/amd64,linux/arm64"
|
||||||
- cuda: 129
|
|
||||||
cuda_version: 12.9.1
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
axolotl_extras:
|
|
||||||
platforms: "linux/amd64,linux/arm64"
|
|
||||||
- cuda: 130
|
- cuda: 130
|
||||||
cuda_version: 13.0.0
|
cuda_version: 13.0.0
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
14
.github/workflows/multi-gpu-e2e.yml
vendored
14
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -35,19 +35,14 @@ jobs:
|
|||||||
pytorch: 2.8.0
|
pytorch: 2.8.0
|
||||||
axolotl_extras: fbgemm-gpu
|
axolotl_extras: fbgemm-gpu
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
|
nightly_build: "true"
|
||||||
- cuda: 128
|
- cuda: 128
|
||||||
cuda_version: 12.8.1
|
cuda_version: 12.8.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.9.1
|
pytorch: 2.9.1
|
||||||
axolotl_extras: "fbgemm-gpu"
|
axolotl_extras: fbgemm-gpu
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
- cuda: 129
|
nightly_build: "true"
|
||||||
cuda_version: 12.9.1
|
|
||||||
python_version: "3.12"
|
|
||||||
pytorch: 2.9.1
|
|
||||||
axolotl_extras: "fbgemm-gpu"
|
|
||||||
num_gpus: 2
|
|
||||||
dockerfile: "Dockerfile-uv.jinja"
|
|
||||||
- cuda: 130
|
- cuda: 130
|
||||||
cuda_version: 13.0.0
|
cuda_version: 13.0.0
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -55,6 +50,7 @@ jobs:
|
|||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
# axolotl_extras: fbgemm-gpu
|
# axolotl_extras: fbgemm-gpu
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
|
nightly_build: "true"
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
steps:
|
steps:
|
||||||
@@ -76,8 +72,8 @@ jobs:
|
|||||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||||
|
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||||
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
||||||
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
|
||||||
- name: Run tests job on Modal
|
- name: Run tests job on Modal
|
||||||
run: |
|
run: |
|
||||||
modal run -m cicd.multigpu
|
modal run -m cicd.multigpu
|
||||||
|
|||||||
2
.github/workflows/pypi.yml
vendored
2
.github/workflows/pypi.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip3 install wheel packaging==26.0
|
pip3 install wheel packaging==23.2
|
||||||
pip3 install --no-build-isolation -e .
|
pip3 install --no-build-isolation -e .
|
||||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/tests-nightly.yml
vendored
2
.github/workflows/tests-nightly.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
|||||||
- name: upgrade pip
|
- name: upgrade pip
|
||||||
run: |
|
run: |
|
||||||
pip3 install --upgrade pip
|
pip3 install --upgrade pip
|
||||||
pip3 install --upgrade packaging==26.0 setuptools==75.8.0 wheel
|
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||||
|
|
||||||
- name: Install PyTorch
|
- name: Install PyTorch
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
46
.github/workflows/tests.yml
vendored
46
.github/workflows/tests.yml
vendored
@@ -54,13 +54,8 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11", "3.12"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.8.0", "2.9.0", "2.9.1"]
|
pytorch_version: ["2.8.0", "2.9.0", "2.9.1"]
|
||||||
exclude:
|
|
||||||
- python_version: "3.12"
|
|
||||||
pytorch_version: "2.8.0"
|
|
||||||
- python_version: "3.12"
|
|
||||||
pytorch_version: "2.9.0"
|
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -87,7 +82,7 @@ jobs:
|
|||||||
- name: upgrade pip
|
- name: upgrade pip
|
||||||
run: |
|
run: |
|
||||||
pip3 install --upgrade pip
|
pip3 install --upgrade pip
|
||||||
pip3 install --upgrade packaging==26.0 setuptools==75.8.0 wheel
|
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||||
|
|
||||||
- name: Install PyTorch
|
- name: Install PyTorch
|
||||||
run: |
|
run: |
|
||||||
@@ -115,10 +110,10 @@ jobs:
|
|||||||
|
|
||||||
- name: Pre-Download dataset fixture
|
- name: Pre-Download dataset fixture
|
||||||
run: |
|
run: |
|
||||||
hf download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||||
|
|
||||||
- name: Show HF cache
|
- name: Show HF cache
|
||||||
run: hf cache ls
|
run: hf cache scan
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
@@ -132,7 +127,7 @@ jobs:
|
|||||||
pytest -v --durations=10 tests/cli/ --cov=axolotl --cov-append --cov-report=xml
|
pytest -v --durations=10 tests/cli/ --cov=axolotl --cov-append --cov-report=xml
|
||||||
|
|
||||||
- name: Show HF cache
|
- name: Show HF cache
|
||||||
run: hf cache ls
|
run: hf cache scan
|
||||||
|
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v5
|
uses: codecov/codecov-action@v5
|
||||||
@@ -149,13 +144,8 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11", "3.12"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.8.0", "2.9.0", "2.9.1"]
|
pytorch_version: ["2.8.0", "2.9.0", "2.9.1"]
|
||||||
exclude:
|
|
||||||
- python_version: "3.12"
|
|
||||||
pytorch_version: "2.8.0"
|
|
||||||
- python_version: "3.12"
|
|
||||||
pytorch_version: "2.9.0"
|
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -182,7 +172,7 @@ jobs:
|
|||||||
- name: upgrade pip
|
- name: upgrade pip
|
||||||
run: |
|
run: |
|
||||||
pip3 install --upgrade pip
|
pip3 install --upgrade pip
|
||||||
pip3 install --upgrade packaging==26.0 setuptools==75.8.0 setuptools_scm build wheel psutil
|
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 setuptools_scm build wheel psutil
|
||||||
|
|
||||||
- name: Install PyTorch
|
- name: Install PyTorch
|
||||||
run: |
|
run: |
|
||||||
@@ -210,7 +200,7 @@ jobs:
|
|||||||
axolotl --help
|
axolotl --help
|
||||||
|
|
||||||
- name: Show HF cache
|
- name: Show HF cache
|
||||||
run: hf cache ls
|
run: hf cache scan
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
@@ -219,10 +209,10 @@ jobs:
|
|||||||
pytest -v --durations=10 tests/cli/
|
pytest -v --durations=10 tests/cli/
|
||||||
|
|
||||||
- name: Show HF cache
|
- name: Show HF cache
|
||||||
run: hf cache ls
|
run: hf cache scan
|
||||||
|
|
||||||
gate-skip-e2e:
|
gate-skip-e2e:
|
||||||
needs: [pre-commit]
|
needs: [pre-commit, pytest, pytest-sdist]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
skip: ${{ steps.compute.outputs.skip }}
|
skip: ${{ steps.compute.outputs.skip }}
|
||||||
@@ -258,16 +248,16 @@ jobs:
|
|||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: [self-hosted, modal]
|
runs-on: [self-hosted, modal]
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs: [pre-commit, pytest]
|
needs: [pre-commit, pytest, pytest-sdist, gate-skip-e2e]
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 129
|
- cuda: 128
|
||||||
cuda_version: 12.9.1
|
cuda_version: 12.8.1
|
||||||
python_version: "3.12"
|
python_version: "3.11"
|
||||||
pytorch: 2.9.1
|
pytorch: 2.8.0
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
dockerfile: "Dockerfile-uv.jinja"
|
dockerfile: "Dockerfile-uv.jinja"
|
||||||
@@ -369,9 +359,9 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 129
|
- cuda: 128
|
||||||
cuda_version: 12.9.1
|
cuda_version: 12.8.1
|
||||||
python_version: "3.12"
|
python_version: "3.11"
|
||||||
pytorch: 2.9.1
|
pytorch: 2.9.1
|
||||||
num_gpus: 1
|
num_gpus: 1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ datasets:
|
|||||||
| --------------------------------- | -------------------------- | ----------------------------------- |
|
| --------------------------------- | -------------------------- | ----------------------------------- |
|
||||||
| `dataset_prepared_path` | `"data/last_run_prepared"` | Path for prepared dataset |
|
| `dataset_prepared_path` | `"data/last_run_prepared"` | Path for prepared dataset |
|
||||||
| `push_dataset_to_hub` | `""` | Push dataset to HF hub |
|
| `push_dataset_to_hub` | `""` | Push dataset to HF hub |
|
||||||
| `dataset_num_proc` | `4` | Number of preprocessing processes |
|
| `dataset_processes` | `4` | Number of preprocessing processes |
|
||||||
| `dataset_keep_in_memory` | `false` | Keep dataset in memory |
|
| `dataset_keep_in_memory` | `false` | Keep dataset in memory |
|
||||||
| `shuffle_merged_datasets` | `true` | Shuffle merged datasets |
|
| `shuffle_merged_datasets` | `true` | Shuffle merged datasets |
|
||||||
| `shuffle_before_merging_datasets` | `false` | Shuffle each dataset before merging |
|
| `shuffle_before_merging_datasets` | `false` | Shuffle each dataset before merging |
|
||||||
|
|||||||
@@ -39,6 +39,7 @@
|
|||||||
# type: # linear | dynamic
|
# type: # linear | dynamic
|
||||||
# factor: # float
|
# factor: # float
|
||||||
|
|
||||||
|
|
||||||
# # Whether you are training a 4-bit GPTQ quantized model
|
# # Whether you are training a 4-bit GPTQ quantized model
|
||||||
# gptq: true
|
# gptq: true
|
||||||
# gptq_groupsize: 128 # group size
|
# gptq_groupsize: 128 # group size
|
||||||
@@ -106,7 +107,7 @@
|
|||||||
# push_dataset_to_hub: # repo path
|
# push_dataset_to_hub: # repo path
|
||||||
# # The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`
|
# # The maximum number of processes to use while preprocessing your input dataset. This defaults to `os.cpu_count()`
|
||||||
# # if not set.
|
# # if not set.
|
||||||
# dataset_num_proc: # defaults to os.cpu_count() if not set
|
# dataset_processes: # defaults to os.cpu_count() if not set
|
||||||
# # push checkpoints to hub
|
# # push checkpoints to hub
|
||||||
# hub_model_id: # repo path to push finetuned model
|
# hub_model_id: # repo path to push finetuned model
|
||||||
# # how to push checkpoints to hub
|
# # how to push checkpoints to hub
|
||||||
@@ -223,6 +224,9 @@
|
|||||||
# eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
# eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
||||||
# eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
# eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
||||||
|
|
||||||
|
# # Save model as safetensors (require safetensors package)
|
||||||
|
# save_safetensors:
|
||||||
|
|
||||||
# # Whether to mask out or include the human's prompt from the training labels
|
# # Whether to mask out or include the human's prompt from the training labels
|
||||||
# train_on_inputs: false
|
# train_on_inputs: false
|
||||||
# # Group similarly sized data to minimize padding.
|
# # Group similarly sized data to minimize padding.
|
||||||
@@ -348,6 +352,8 @@
|
|||||||
# # Allow overwrite yml config using from cli
|
# # Allow overwrite yml config using from cli
|
||||||
# strict:
|
# strict:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
base_model: ${BASE_MODEL}
|
base_model: ${BASE_MODEL}
|
||||||
base_model_ignore_patterns: ${BASE_MODEL_IGNORE_PATTERNS}
|
base_model_ignore_patterns: ${BASE_MODEL_IGNORE_PATTERNS}
|
||||||
base_model_config: ${BASE_MODEL_CONFIG}
|
base_model_config: ${BASE_MODEL_CONFIG}
|
||||||
@@ -406,7 +412,7 @@ chat_template_jinja: ${CHAT_TEMPLATE_JINJA}
|
|||||||
default_system_message: ${DEFAULT_SYSTEM_MESSAGE}
|
default_system_message: ${DEFAULT_SYSTEM_MESSAGE}
|
||||||
dataset_prepared_path: ${DATASET_PREPARED_PATH}
|
dataset_prepared_path: ${DATASET_PREPARED_PATH}
|
||||||
push_dataset_to_hub: ${PUSH_DATASET_TO_HUB}
|
push_dataset_to_hub: ${PUSH_DATASET_TO_HUB}
|
||||||
dataset_num_proc: ${DATASET_NUM_PROC}
|
dataset_processes: ${DATASET_PROCESSES}
|
||||||
dataset_keep_in_memory: ${DATASET_KEEP_IN_MEMORY}
|
dataset_keep_in_memory: ${DATASET_KEEP_IN_MEMORY}
|
||||||
hub_model_id: ${HUB_MODEL_ID}
|
hub_model_id: ${HUB_MODEL_ID}
|
||||||
hub_strategy: ${HUB_STRATEGY}
|
hub_strategy: ${HUB_STRATEGY}
|
||||||
@@ -506,6 +512,7 @@ profiler_steps: ${PROFILER_STEPS}
|
|||||||
loss_watchdog_threshold: ${LOSS_WATCHDOG_THRESHOLD}
|
loss_watchdog_threshold: ${LOSS_WATCHDOG_THRESHOLD}
|
||||||
loss_watchdog_patience: ${LOSS_WATCHDOG_PATIENCE}
|
loss_watchdog_patience: ${LOSS_WATCHDOG_PATIENCE}
|
||||||
|
|
||||||
|
save_safetensors: ${SAVE_SAFETENSORS}
|
||||||
train_on_inputs: ${TRAIN_ON_INPUTS}
|
train_on_inputs: ${TRAIN_ON_INPUTS}
|
||||||
group_by_length: ${GROUP_BY_LENGTH}
|
group_by_length: ${GROUP_BY_LENGTH}
|
||||||
gradient_checkpointing: ${GRADIENT_CHECKPOINTING}
|
gradient_checkpointing: ${GRADIENT_CHECKPOINTING}
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ Features:
|
|||||||
#### Using pip
|
#### Using pip
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip3 install -U packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install -U packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
pip3 install --no-build-isolation axolotl[flash-attn,deepspeed]
|
||||||
|
|
||||||
# Download example axolotl configs, deepspeed configs
|
# Download example axolotl configs, deepspeed configs
|
||||||
|
|||||||
@@ -251,6 +251,7 @@ website:
|
|||||||
- docs/models/olmo3.qmd
|
- docs/models/olmo3.qmd
|
||||||
- docs/models/trinity.qmd
|
- docs/models/trinity.qmd
|
||||||
- docs/models/arcee.qmd
|
- docs/models/arcee.qmd
|
||||||
|
- docs/models/mistral.qmd
|
||||||
- section: "Ministral3"
|
- section: "Ministral3"
|
||||||
contents:
|
contents:
|
||||||
- docs/models/ministral3.qmd
|
- docs/models/ministral3.qmd
|
||||||
@@ -265,7 +266,6 @@ website:
|
|||||||
- docs/models/mistral-small.qmd
|
- docs/models/mistral-small.qmd
|
||||||
- docs/models/voxtral.qmd
|
- docs/models/voxtral.qmd
|
||||||
- docs/models/devstral.qmd
|
- docs/models/devstral.qmd
|
||||||
- docs/models/mistral.qmd
|
|
||||||
- docs/models/llama-4.qmd
|
- docs/models/llama-4.qmd
|
||||||
- docs/models/llama-2.qmd
|
- docs/models/llama-2.qmd
|
||||||
- docs/models/qwen3-next.qmd
|
- docs/models/qwen3-next.qmd
|
||||||
@@ -320,7 +320,6 @@ website:
|
|||||||
- docs/multipack.qmd
|
- docs/multipack.qmd
|
||||||
- docs/mixed_precision.qmd
|
- docs/mixed_precision.qmd
|
||||||
- docs/optimizers.qmd
|
- docs/optimizers.qmd
|
||||||
- docs/attention.qmd
|
|
||||||
|
|
||||||
- section: "Advanced Features"
|
- section: "Advanced Features"
|
||||||
contents:
|
contents:
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
|||||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RUN uv pip install packaging==26.0 setuptools==75.8.0
|
RUN uv pip install packaging==23.2 setuptools==75.8.0
|
||||||
RUN uv pip install torchvision
|
RUN uv pip install torchvision
|
||||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
uv pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
|||||||
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
sed -i 's#^datasets.*#datasets @ git+https://github.com/huggingface/datasets.git@main#' requirements.txt; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RUN pip install packaging==26.0 setuptools==75.8.0 psutil
|
RUN pip install packaging==23.2 setuptools==75.8.0 psutil
|
||||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
pip install --no-build-isolation -e .[deepspeed,flash-attn,ring-flash-attn,optimizers,ray,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||||
else \
|
else \
|
||||||
|
|||||||
@@ -17,8 +17,7 @@ template_loader = jinja2.FileSystemLoader(searchpath=cicd_path)
|
|||||||
template_env = jinja2.Environment(
|
template_env = jinja2.Environment(
|
||||||
loader=template_loader, autoescape=select_autoescape()
|
loader=template_loader, autoescape=select_autoescape()
|
||||||
)
|
)
|
||||||
dockerfile = os.environ.get("E2E_DOCKERFILE", "Dockerfile.jinja")
|
df_template = template_env.get_template("Dockerfile.jinja")
|
||||||
df_template = template_env.get_template(dockerfile)
|
|
||||||
|
|
||||||
df_args = {
|
df_args = {
|
||||||
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
"AXOLOTL_EXTRAS": os.environ.get("AXOLOTL_EXTRAS", ""),
|
||||||
@@ -28,11 +27,8 @@ df_args = {
|
|||||||
"CUDA": os.environ.get("CUDA", "126"),
|
"CUDA": os.environ.get("CUDA", "126"),
|
||||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||||
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
|
||||||
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
"CODECOV_TOKEN": os.environ.get("CODECOV_TOKEN", ""),
|
||||||
"HF_HOME": "/workspace/data/huggingface-cache/hub",
|
"HF_HOME": "/workspace/data/huggingface-cache/hub",
|
||||||
"PYTHONUNBUFFERED": os.environ.get("PYTHONUNBUFFERED", "1"),
|
|
||||||
"DEEPSPEED_LOG_LEVEL": os.environ.get("DEEPSPEED_LOG_LEVEL", "WARNING"),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerfile_contents = df_template.render(**df_args)
|
dockerfile_contents = df_template.render(**df_args)
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Only run two tests at a time to avoid OOM on GPU (with coverage collection)
|
# Only run two tests at a time to avoid OOM on GPU (with coverage collection)
|
||||||
pytest -v --durations=10 -n2 --maxfail=3 \
|
pytest -v --durations=10 -n2 --maxfail=4 \
|
||||||
--ignore=/workspace/axolotl/tests/e2e/multigpu/solo/ \
|
--ignore=/workspace/axolotl/tests/e2e/multigpu/solo/ \
|
||||||
--ignore=/workspace/axolotl/tests/e2e/multigpu/patched/ \
|
--ignore=/workspace/axolotl/tests/e2e/multigpu/patched/ \
|
||||||
/workspace/axolotl/tests/e2e/multigpu/ \
|
/workspace/axolotl/tests/e2e/multigpu/ \
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
|
|||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN python3 -m pip install --upgrade pip && pip3 install -U packaging==26.0 setuptools==75.8.0 wheel psutil && \
|
RUN python3 -m pip install --upgrade pip && pip3 install -U packaging==23.2 setuptools==75.8.0 wheel psutil && \
|
||||||
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} torchvision --extra-index-url https://download.pytorch.org/whl/cu$CUDA && \
|
python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} torchvision --extra-index-url https://download.pytorch.org/whl/cu$CUDA && \
|
||||||
python3 -m pip cache purge
|
python3 -m pip cache purge
|
||||||
|
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
|
|||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN python3 -m pip install --upgrade pip && pip3 install -U packaging==26.0 setuptools==75.8.0 wheel && \
|
RUN python3 -m pip install --upgrade pip && pip3 install -U packaging==23.2 setuptools==75.8.0 wheel && \
|
||||||
python3 -m pip install --no-cache-dir -U torch --extra-index-url https://download.pytorch.org/whl/nightly/cu$CUDA && \
|
python3 -m pip install --no-cache-dir -U torch --extra-index-url https://download.pytorch.org/whl/nightly/cu$CUDA && \
|
||||||
python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \
|
python3 -m pip install --no-cache-dir "causal_conv1d @ git+https://github.com/Dao-AILab/causal-conv1d.git@main" && \
|
||||||
python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main" && \
|
python3 -m pip install --no-cache-dir "mamba_ssm @ git+https://github.com/state-spaces/mamba.git@main" && \
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ export HF_DATASETS_OFFLINE=1
|
|||||||
Download a base model using the Hugging Face CLI:
|
Download a base model using the Hugging Face CLI:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
hf download meta-llama/Meta-Llama-3.1-8B --local-dir ~/hfdata/llama3.1-8B
|
huggingface-cli download meta-llama/Meta-Llama-3.1-8B --local-dir ~/hfdata/llama3.1-8B
|
||||||
```
|
```
|
||||||
|
|
||||||
### 10. Create Axolotl Configuration
|
### 10. Create Axolotl Configuration
|
||||||
|
|||||||
@@ -1,140 +0,0 @@
|
|||||||
---
|
|
||||||
title: Attention
|
|
||||||
description: Supported attention modules in Axolotl
|
|
||||||
---
|
|
||||||
|
|
||||||
## SDP Attention
|
|
||||||
|
|
||||||
This is the default built-in attention in PyTorch.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
sdp_attention: true
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details: [PyTorch docs](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
|
||||||
|
|
||||||
## Flash Attention 2
|
|
||||||
|
|
||||||
Uses efficient kernels to compute attention.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
flash_attention: true
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details: [Flash Attention](https://github.com/Dao-AILab/flash-attention/)
|
|
||||||
|
|
||||||
### Nvidia
|
|
||||||
|
|
||||||
Requirements: Ampere, Ada, or Hopper GPUs
|
|
||||||
|
|
||||||
Note: For Turing GPUs or lower, please use other attention methods.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install flash-attn --no-build-isolation
|
|
||||||
```
|
|
||||||
|
|
||||||
::: {.callout-tip}
|
|
||||||
|
|
||||||
If you get `undefined symbol` while training, ensure you installed PyTorch prior to Axolotl. Alternatively, try reinstall or downgrade a version.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
#### Flash Attention 3
|
|
||||||
|
|
||||||
Requirements: Hopper only and CUDA 12.8 (recommended)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/Dao-AILab/flash-attention.git
|
|
||||||
cd flash-attention/hopper
|
|
||||||
|
|
||||||
python setup.py install
|
|
||||||
```
|
|
||||||
|
|
||||||
### AMD
|
|
||||||
|
|
||||||
Requirements: ROCm 6.0 and above.
|
|
||||||
|
|
||||||
See [Flash Attention AMD docs](https://github.com/Dao-AILab/flash-attention/tree/main?tab=readme-ov-file#amd-rocm-support).
|
|
||||||
|
|
||||||
## Flex Attention
|
|
||||||
|
|
||||||
A flexible PyTorch API for attention used in combination with `torch.compile`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
flex_attention: true
|
|
||||||
|
|
||||||
# recommended
|
|
||||||
torch_compile: true
|
|
||||||
```
|
|
||||||
|
|
||||||
::: {.callout-note}
|
|
||||||
|
|
||||||
We recommend using latest stable version of PyTorch for best performance.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
For more details: [PyTorch docs](https://pytorch.org/blog/flexattention/)
|
|
||||||
|
|
||||||
## SageAttention
|
|
||||||
|
|
||||||
Attention kernels with QK Int8 and PV FP16 accumulator.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
sage_attention: true
|
|
||||||
```
|
|
||||||
|
|
||||||
Requirements: Ampere, Ada, or Hopper GPUs
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install sageattention==2.2.0 --no-build-isolation
|
|
||||||
```
|
|
||||||
|
|
||||||
::: {.callout-warning}
|
|
||||||
|
|
||||||
Only LoRA/QLoRA recommended at the moment. We found loss drop to 0 for full finetuning. See [GitHub Issue](https://github.com/thu-ml/SageAttention/issues/198).
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
For more details: [Sage Attention](https://github.com/thu-ml/SageAttention)
|
|
||||||
|
|
||||||
::: {.callout-note}
|
|
||||||
|
|
||||||
We do not support SageAttention 3 at the moment. If you are interested on adding this or improving SageAttention implementation, please make an Issue.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
|
|
||||||
## xFormers
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
xformers_attention: true
|
|
||||||
```
|
|
||||||
|
|
||||||
::: {.callout-tip}
|
|
||||||
|
|
||||||
We recommend using with Turing GPUs or below (such as on Colab).
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
For more details: [xFormers](https://github.com/facebookresearch/xformers)
|
|
||||||
|
|
||||||
## Shifted Sparse Attention
|
|
||||||
|
|
||||||
::: {.callout-warning}
|
|
||||||
|
|
||||||
We plan to deprecate this! If you use this feature, we recommend switching to methods above.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
Requirements: LLaMA model architecture
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
flash_attention: true
|
|
||||||
s2_attention: true
|
|
||||||
```
|
|
||||||
|
|
||||||
::: {.callout-tip}
|
|
||||||
|
|
||||||
No sample packing support!
|
|
||||||
|
|
||||||
:::
|
|
||||||
@@ -210,8 +210,6 @@ axolotl lm-eval config.yml
|
|||||||
Configuration options:
|
Configuration options:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
lm_eval_model: # model to evaluate (local or hf path)
|
|
||||||
|
|
||||||
# List of tasks to evaluate
|
# List of tasks to evaluate
|
||||||
lm_eval_tasks:
|
lm_eval_tasks:
|
||||||
- arc_challenge
|
- arc_challenge
|
||||||
@@ -220,7 +218,7 @@ lm_eval_batch_size: # Batch size for evaluation
|
|||||||
output_dir: # Directory to save evaluation results
|
output_dir: # Directory to save evaluation results
|
||||||
```
|
```
|
||||||
|
|
||||||
See [LM Eval Harness integration docs](https://docs.axolotl.ai/docs/custom_integrations.html#language-model-evaluation-harness-lm-eval) for full configuration details.
|
See [LM Eval Harness](https://github.com/EleutherAI/lm-evaluation-harness) for more details.
|
||||||
|
|
||||||
### delinearize-llama4
|
### delinearize-llama4
|
||||||
|
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ We recommend using WSL2 (Windows Subsystem for Linux) or Docker.
|
|||||||
```
|
```
|
||||||
4. (Optional) Login to Hugging Face:
|
4. (Optional) Login to Hugging Face:
|
||||||
```{.bash}
|
```{.bash}
|
||||||
hf auth login
|
huggingface-cli login
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting {#sec-troubleshooting}
|
## Troubleshooting {#sec-troubleshooting}
|
||||||
|
|||||||
@@ -89,10 +89,6 @@ lora_o_kernel: true
|
|||||||
Currently, LoRA kernels are not supported for RLHF training, only SFT.
|
Currently, LoRA kernels are not supported for RLHF training, only SFT.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
::: {.callout-warning}
|
|
||||||
LoRA kernels do not support remote modeling code.
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- One or more NVIDIA or AMD GPUs (in order to use the Triton kernels)
|
- One or more NVIDIA or AMD GPUs (in order to use the Triton kernels)
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ format:
|
|||||||
- [Gemma-3n](#sec-gemma-3n)
|
- [Gemma-3n](#sec-gemma-3n)
|
||||||
- [Qwen2-VL](#sec-qwen2-vl)
|
- [Qwen2-VL](#sec-qwen2-vl)
|
||||||
- [Qwen2.5-VL](#sec-qwen25-vl)
|
- [Qwen2.5-VL](#sec-qwen25-vl)
|
||||||
- [GLM-4.6V](#sec-glm-4-6v)
|
|
||||||
- [SmolVLM2](#sec-smolvlm2)
|
- [SmolVLM2](#sec-smolvlm2)
|
||||||
- [LFM2-VL](#sec-lfm2-vl)
|
- [LFM2-VL](#sec-lfm2-vl)
|
||||||
- [Intern-VL](#sec-intern-vl)
|
- [Intern-VL](#sec-intern-vl)
|
||||||
@@ -184,18 +183,6 @@ base_model: Qwen/Qwen3-VL-4B-Instruct
|
|||||||
chat_template: qwen2_vl # same as qwen2-vl
|
chat_template: qwen2_vl # same as qwen2-vl
|
||||||
```
|
```
|
||||||
|
|
||||||
### GLM-4.6V {#sec-glm-4-6v}
|
|
||||||
|
|
||||||
Both GLM-4.6V (106B MoE) and GLM-4.6V-Flash (9B) are supported.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# GLM-4.6V (106B MoE version)
|
|
||||||
base_model: zai-org/GLM-4.6V
|
|
||||||
|
|
||||||
# OR GLM-4.6V-Flash (9B version)
|
|
||||||
base_model: zai-org/GLM-4.6V-Flash
|
|
||||||
```
|
|
||||||
|
|
||||||
### SmolVLM2 {#sec-smolvlm2}
|
### SmolVLM2 {#sec-smolvlm2}
|
||||||
|
|
||||||
::: {.callout-tip}
|
::: {.callout-tip}
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ feedback. Various methods include, but not limited to:
|
|||||||
- [Kahneman-Tversky Optimization (KTO)](#kto)
|
- [Kahneman-Tversky Optimization (KTO)](#kto)
|
||||||
- [Odds Ratio Preference Optimization (ORPO)](#orpo)
|
- [Odds Ratio Preference Optimization (ORPO)](#orpo)
|
||||||
- [Group Relative Policy Optimization (GRPO)](#grpo)
|
- [Group Relative Policy Optimization (GRPO)](#grpo)
|
||||||
- [Group Reward-Decoupled Policy Optimization (GDPO)](#gdpo)
|
|
||||||
|
|
||||||
|
|
||||||
## RLHF using Axolotl
|
## RLHF using Axolotl
|
||||||
@@ -721,102 +720,6 @@ trl:
|
|||||||
|
|
||||||
For more information, see [GRPO docs](https://huggingface.co/docs/trl/v0.17.0/en/grpo_trainer#loss-types).
|
For more information, see [GRPO docs](https://huggingface.co/docs/trl/v0.17.0/en/grpo_trainer#loss-types).
|
||||||
|
|
||||||
### GDPO
|
|
||||||
|
|
||||||
GDPO (Group Reward-Decoupled Policy Optimization) extends GRPO for multi-reward training. It addresses the **reward advantage collapse** problem by normalizing each reward function independently before combining them.
|
|
||||||
|
|
||||||
::: {.callout-tip}
|
|
||||||
Use GDPO when training with multiple reward functions. For single reward, GRPO and GDPO produce equivalent results.
|
|
||||||
:::
|
|
||||||
|
|
||||||
Paper: [https://arxiv.org/pdf/2501.05242](https://arxiv.org/pdf/2501.05242)
|
|
||||||
|
|
||||||
GDPO uses TRL's native `multi_objective_aggregation` parameter under the hood. When you set `rl: gdpo`, axolotl automatically configures TRL to use `normalize_then_sum` aggregation.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
base_model: Qwen/Qwen2.5-1.5B-Instruct
|
|
||||||
|
|
||||||
vllm:
|
|
||||||
host: 0.0.0.0
|
|
||||||
port: 8000
|
|
||||||
tensor_parallel_size: 2
|
|
||||||
gpu_memory_utilization: 0.85
|
|
||||||
|
|
||||||
rl: gdpo
|
|
||||||
|
|
||||||
trl:
|
|
||||||
beta: 0.001
|
|
||||||
max_completion_length: 256
|
|
||||||
use_vllm: true
|
|
||||||
num_generations: 4
|
|
||||||
reward_funcs:
|
|
||||||
- rewards.format_reward
|
|
||||||
- rewards.correctness_reward
|
|
||||||
reward_weights: [1.0, 2.0]
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: openai/gsm8k
|
|
||||||
name: main
|
|
||||||
type: rewards.oai_gsm8k_transform
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use GRPO with explicit aggregation control:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rl: grpo
|
|
||||||
trl:
|
|
||||||
multi_objective_aggregation: normalize_then_sum # GDPO behavior
|
|
||||||
# or: sum_then_normalize # Default GRPO behavior
|
|
||||||
```
|
|
||||||
|
|
||||||
#### GDPO vs GRPO
|
|
||||||
|
|
||||||
| Aspect | GRPO | GDPO |
|
|
||||||
|--------|------|------|
|
|
||||||
| **Aggregation** | `sum_then_normalize` | `normalize_then_sum` |
|
|
||||||
| **Multi-reward** | May collapse advantages | Preserves reward signals |
|
|
||||||
| **Single reward** | Standard behavior | Equivalent to GRPO |
|
|
||||||
|
|
||||||
#### Why GDPO?
|
|
||||||
|
|
||||||
When using multiple rewards with GRPO, different reward combinations can produce identical advantages:
|
|
||||||
|
|
||||||
```
|
|
||||||
# Example: format + correctness rewards
|
|
||||||
[format=0, correct=3] → sum=3
|
|
||||||
[format=1, correct=2] → sum=3 ← GRPO sees these as equal!
|
|
||||||
[format=2, correct=1] → sum=3
|
|
||||||
[format=3, correct=0] → sum=3
|
|
||||||
```
|
|
||||||
|
|
||||||
GDPO normalizes each reward independently, preserving their relative differences.
|
|
||||||
|
|
||||||
#### Reward Functions
|
|
||||||
|
|
||||||
GDPO uses the same reward function format as GRPO:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# rewards.py
|
|
||||||
def format_reward(completions, **kwargs) -> list[float]:
|
|
||||||
return [1.0 if len(c) > 10 else 0.0 for c in completions]
|
|
||||||
|
|
||||||
def correctness_reward(completions, answers, **kwargs) -> list[float]:
|
|
||||||
rewards = []
|
|
||||||
for completion, answer in zip(completions, answers):
|
|
||||||
# Your scoring logic here
|
|
||||||
rewards.append(score)
|
|
||||||
return rewards
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Sequence Parallelism
|
|
||||||
|
|
||||||
GDPO supports sequence parallelism for long-context training:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rl: gdpo
|
|
||||||
context_parallel_size: 2
|
|
||||||
```
|
|
||||||
|
|
||||||
### SimPO
|
### SimPO
|
||||||
|
|
||||||
SimPO uses [CPOTrainer](https://huggingface.co/docs/trl/main/en/cpo_trainer) but with alternative loss function.
|
SimPO uses [CPOTrainer](https://huggingface.co/docs/trl/main/en/cpo_trainer) but with alternative loss function.
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
|||||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||||
|
|
||||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ Thanks to the team at Arcee.ai for using Axolotl in supervised fine-tuning the A
|
|||||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||||
|
|
||||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||||
|
|||||||
@@ -40,7 +40,7 @@
|
|||||||
"%%capture\n",
|
"%%capture\n",
|
||||||
"# This step can take ~5-10 minutes to install dependencies\n",
|
"# This step can take ~5-10 minutes to install dependencies\n",
|
||||||
"!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1\n",
|
"!pip install --no-build-isolation axolotl[flash-attn]>=0.9.1\n",
|
||||||
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@0d4ce4b\""
|
"!pip install \"cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@318b7e2\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ Thanks to the team at MistralAI for giving us early access to prepare for this r
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,77 +0,0 @@
|
|||||||
base_model: google/gemma-3-1b-it
|
|
||||||
|
|
||||||
model_type: Gemma3ForCausalLM
|
|
||||||
cls_model_config: Gemma3TextConfig
|
|
||||||
|
|
||||||
# gemma3 doesn't seem to play nice with ddp
|
|
||||||
ddp_find_unused_parameters: true
|
|
||||||
|
|
||||||
chat_template: gemma3
|
|
||||||
eot_tokens:
|
|
||||||
- <end_of_turn>
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: cgato/SlimOrcaDedupCleaned
|
|
||||||
type: chat_template
|
|
||||||
field_messages: conversations
|
|
||||||
message_property_mappings:
|
|
||||||
role: from
|
|
||||||
content: value
|
|
||||||
|
|
||||||
dataset_prepared_path:
|
|
||||||
val_set_size: 0
|
|
||||||
output_dir: ./outputs/eaft-gemma-3-1b
|
|
||||||
|
|
||||||
use_eaft: true
|
|
||||||
eaft_alpha: 1.0
|
|
||||||
eaft_k: 20
|
|
||||||
|
|
||||||
sequence_len: 1024
|
|
||||||
sample_packing: false
|
|
||||||
|
|
||||||
adapter:
|
|
||||||
lora_model_dir:
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 1
|
|
||||||
eval_batch_size: 1
|
|
||||||
max_steps: 1000
|
|
||||||
evaluation_strategy: "no"
|
|
||||||
optimizer: adamw_torch_fused
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 5e-5
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: auto
|
|
||||||
fp16:
|
|
||||||
tf32: true
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
gradient_checkpointing_kwargs:
|
|
||||||
use_reentrant: false
|
|
||||||
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: true
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
|
||||||
weight_decay: 0.0
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
@@ -10,7 +10,7 @@ Gemma-3n is a family of multimodal models from Google found on [HuggingFace](htt
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
# Finetune GLM-4.6V with Axolotl
|
|
||||||
|
|
||||||
GLM-4.6V is a family of vision-language models from ZhipuAI found on [HuggingFace](https://huggingface.co/zai-org/GLM-4.6V). This guide shows how to fine-tune it with Axolotl for vision-language tasks.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Getting started
|
|
||||||
|
|
||||||
1. Install Axolotl from source following the [installation guide](https://docs.axolotl.ai/docs/installation.html#sec-edge-build).
|
|
||||||
|
|
||||||
2. Install [Cut Cross Entropy](https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy) to reduce training VRAM usage.
|
|
||||||
|
|
||||||
|
|
||||||
3. Run the fine-tuning:
|
|
||||||
|
|
||||||
glm-4-6v-flash(9B)
|
|
||||||
```bash
|
|
||||||
axolotl train examples/glm46v/glm-4-6v-flash-qlora.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Let us know how it goes. Happy finetuning! 🚀
|
|
||||||
|
|
||||||
## Tips
|
|
||||||
|
|
||||||
- Vision datasets should follow the format described in the [multimodal docs](https://docs.axolotl.ai/docs/multimodal.html#dataset-format)
|
|
||||||
- You can run a **full finetuning** by removing the `adapter: qlora` and `load_in_4bit: true` from the config.
|
|
||||||
- Read more on how to load your own dataset in the [dataset loading docs](https://docs.axolotl.ai/docs/dataset_loading.html).
|
|
||||||
|
|
||||||
## Supported Models
|
|
||||||
|
|
||||||
- **GLM-4.6V**: Full vision-language model (`zai-org/GLM-4.6V`)
|
|
||||||
- **GLM-4.6V-Flash**: Faster variant (`zai-org/GLM-4.6V-Flash`)
|
|
||||||
|
|
||||||
## Optimization Guides
|
|
||||||
|
|
||||||
Please check the [Optimizations doc](https://docs.axolotl.ai/docs/optimizations.html).
|
|
||||||
|
|
||||||
## Related Resources
|
|
||||||
|
|
||||||
- [ZhipuAI GLM-4.6V](https://huggingface.co/zai-org/GLM-4.6V)
|
|
||||||
- [Axolotl Docs](https://docs.axolotl.ai)
|
|
||||||
- [Axolotl Website](https://axolotl.ai)
|
|
||||||
- [Axolotl GitHub](https://github.com/axolotl-ai-cloud/axolotl)
|
|
||||||
- [Axolotl Discord](https://discord.gg/7m9sfhzaf3)
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
base_model: zai-org/GLM-4.6V-Flash
|
|
||||||
trust_remote_code: true
|
|
||||||
|
|
||||||
processor_type: AutoProcessor
|
|
||||||
load_in_4bit: true
|
|
||||||
|
|
||||||
# these 3 lines are needed for now to handle vision chat templates w images
|
|
||||||
skip_prepare_dataset: true
|
|
||||||
remove_unused_columns: false
|
|
||||||
sample_packing: false
|
|
||||||
ddp_find_unused_parameters: true
|
|
||||||
|
|
||||||
output_dir: ./outputs/glm-4-6v-flash-qlora
|
|
||||||
datasets:
|
|
||||||
- path: HuggingFaceH4/llava-instruct-mix-vsft
|
|
||||||
type: chat_template
|
|
||||||
split: train[:1%]
|
|
||||||
|
|
||||||
adapter: qlora
|
|
||||||
lora_r: 16
|
|
||||||
lora_alpha: 32
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- gate_proj
|
|
||||||
- down_proj
|
|
||||||
- up_proj
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
- k_proj
|
|
||||||
- o_proj
|
|
||||||
|
|
||||||
sequence_len: 2048
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: adamw_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
bf16: auto
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
gradient_checkpointing_kwargs:
|
|
||||||
use_reentrant: false
|
|
||||||
logging_steps: 1
|
|
||||||
sdp_attention: true
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
|
||||||
evals_per_epoch: 0
|
|
||||||
saves_per_epoch: 1
|
|
||||||
weight_decay: 0.0
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
base_model: zai-org/GLM-4.6V-Flash
|
|
||||||
trust_remote_code: true
|
|
||||||
|
|
||||||
processor_type: AutoProcessor
|
|
||||||
load_in_4bit: true
|
|
||||||
|
|
||||||
# these 3 lines are needed for now to handle vision chat templates w images
|
|
||||||
skip_prepare_dataset: true
|
|
||||||
remove_unused_columns: false
|
|
||||||
sample_packing: false
|
|
||||||
|
|
||||||
output_dir: ./outputs/glm-4-6v-flash-qlora
|
|
||||||
datasets:
|
|
||||||
- path: HuggingFaceH4/llava-instruct-mix-vsft
|
|
||||||
type: chat_template
|
|
||||||
split: train[:1%]
|
|
||||||
|
|
||||||
adapter: qlora
|
|
||||||
lora_r: 16
|
|
||||||
lora_alpha: 32
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- gate_proj
|
|
||||||
- down_proj
|
|
||||||
- up_proj
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
- k_proj
|
|
||||||
- o_proj
|
|
||||||
|
|
||||||
sequence_len: 2048
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: adamw_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
bf16: auto
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
logging_steps: 1
|
|
||||||
sdp_attention: true
|
|
||||||
|
|
||||||
warmup_ratio: 0.1
|
|
||||||
evals_per_epoch: 0
|
|
||||||
saves_per_epoch: 1
|
|
||||||
weight_decay: 0.0
|
|
||||||
@@ -14,7 +14,7 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
|||||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||||
|
|
||||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ Tencent released a family of opensource models called HunYuan with varying param
|
|||||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||||
|
|
||||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ datasets:
|
|||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path: last_run_prepared
|
||||||
val_set_size: 0.0
|
val_set_size: 0.0
|
||||||
output_dir: jamba-large-fsdp-qlora-ft
|
output_dir: jamba-large-fsdp-qlora-ft
|
||||||
|
save_safetensors: true
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
|||||||
@@ -1,68 +0,0 @@
|
|||||||
base_model: meta-llama/Llama-3.2-1B-Instruct
|
|
||||||
|
|
||||||
chat_template: llama3
|
|
||||||
|
|
||||||
rl: gdpo
|
|
||||||
|
|
||||||
trl:
|
|
||||||
beta: 0.001
|
|
||||||
max_completion_length: 128
|
|
||||||
num_generations: 2
|
|
||||||
temperature: 0.7
|
|
||||||
top_p: 0.95
|
|
||||||
|
|
||||||
use_vllm: false
|
|
||||||
|
|
||||||
|
|
||||||
multi_objective_aggregation: normalize_then_sum
|
|
||||||
|
|
||||||
reward_funcs:
|
|
||||||
- rwd.format_reward
|
|
||||||
- rwd.correctness_reward
|
|
||||||
reward_weights: [1.0, 2.0]
|
|
||||||
|
|
||||||
log_completions: true
|
|
||||||
num_completions_to_print: 3
|
|
||||||
scale_rewards: true
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: openai/gsm8k
|
|
||||||
name: main
|
|
||||||
split: train[:1000]
|
|
||||||
type: rwd.gsm8k_transform
|
|
||||||
|
|
||||||
val_set_size: 0.0
|
|
||||||
output_dir: ./outputs/llama3-gdpo-out
|
|
||||||
|
|
||||||
sequence_len: 512
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len: false
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
max_steps: 100
|
|
||||||
|
|
||||||
optimizer: adamw_torch_fused
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 5e-5
|
|
||||||
weight_decay: 0.01
|
|
||||||
warmup_steps: 10
|
|
||||||
|
|
||||||
bf16: auto
|
|
||||||
tf32: true
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
gradient_checkpointing_kwargs:
|
|
||||||
use_reentrant: false
|
|
||||||
|
|
||||||
flash_attention: true
|
|
||||||
logging_steps: 1
|
|
||||||
save_steps: 50
|
|
||||||
save_safetensors: true
|
|
||||||
|
|
||||||
special_tokens:
|
|
||||||
pad_token: "<|end_of_text|>"
|
|
||||||
|
|
||||||
|
|
||||||
seed: 42
|
|
||||||
@@ -12,6 +12,7 @@ datasets:
|
|||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path: last_run_prepared
|
||||||
val_set_size: 0.0
|
val_set_size: 0.0
|
||||||
output_dir: ./outputs/out/qlora-llama3_1-405b
|
output_dir: ./outputs/out/qlora-llama3_1-405b
|
||||||
|
save_safetensors: true
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ Thanks to the team at MistralAI for giving us early access to prepare for these
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure you have Pytorch installed (Pytorch 2.7.0 min)
|
# Ensure you have Pytorch installed (Pytorch 2.7.0 min)
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -47,5 +47,6 @@ saves_per_epoch: 1
|
|||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
special_tokens:
|
special_tokens:
|
||||||
tokens:
|
tokens:
|
||||||
|
save_safetensors: False
|
||||||
|
|
||||||
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
# save_first_step: true # uncomment this to validate checkpoint saving works with your config
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ This guide shows how to fine-tune it with Axolotl with multi-turn conversations
|
|||||||
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
git clone https://github.com/axolotl-ai-cloud/axolotl.git
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation -e '.[flash-attn]'
|
pip3 install --no-build-isolation -e '.[flash-attn]'
|
||||||
|
|
||||||
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
# Install CCE https://docs.axolotl.ai/docs/custom_integrations.html#cut-cross-entropy
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Thanks to the team at MistralAI for giving us early access to prepare for this r
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
# Ensure you have Pytorch installed (Pytorch 2.6.0 min)
|
||||||
pip3 install packaging==26.0 setuptools==75.8.0 wheel ninja
|
pip3 install packaging==23.2 setuptools==75.8.0 wheel ninja
|
||||||
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
pip3 install --no-build-isolation 'axolotl[flash-attn]>=0.12.0'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools>=64", "wheel", "setuptools_scm>=8", "packaging==26.0"]
|
requires = ["setuptools>=64", "wheel", "setuptools_scm>=8", "packaging==23.2"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
@@ -60,6 +60,3 @@ indent-style = "space"
|
|||||||
skip-magic-trailing-comma = false
|
skip-magic-trailing-comma = false
|
||||||
line-ending = "auto"
|
line-ending = "auto"
|
||||||
docstring-code-format = false
|
docstring-code-format = false
|
||||||
|
|
||||||
[tool.uv.extra-build-dependencies]
|
|
||||||
axolotl = ["huggingface_hub"]
|
|
||||||
|
|||||||
@@ -2,24 +2,24 @@
|
|||||||
|
|
||||||
# START section of dependencies that don't install on Darwin/MacOS
|
# START section of dependencies that don't install on Darwin/MacOS
|
||||||
bitsandbytes==0.49.1
|
bitsandbytes==0.49.1
|
||||||
triton>=3.4.0
|
triton>=3.0.0
|
||||||
mamba-ssm==1.2.0.post1
|
mamba-ssm==1.2.0.post1
|
||||||
xformers>=0.0.23.post1
|
xformers>=0.0.23.post1
|
||||||
liger-kernel==0.7.0
|
liger-kernel==0.6.4
|
||||||
# END section
|
# END section
|
||||||
|
|
||||||
packaging==26.0
|
packaging==23.2
|
||||||
huggingface_hub>=1.1.7
|
|
||||||
|
huggingface_hub>=0.36.0
|
||||||
peft>=0.18.1
|
peft>=0.18.1
|
||||||
tokenizers>=0.22.1
|
tokenizers>=0.22.1
|
||||||
transformers @ git+https://github.com/winglian/transformers.git@refactor-inner-training-loop-reorder-only
|
transformers==4.57.6
|
||||||
accelerate==1.12.0
|
accelerate==1.12.0
|
||||||
datasets==4.5.0
|
datasets==4.5.0
|
||||||
deepspeed>=0.18.3
|
deepspeed>=0.18.3
|
||||||
trl==0.28.0
|
trl==0.25.1
|
||||||
hf_xet==1.2.0
|
hf_xet==1.2.0
|
||||||
kernels==0.11.5
|
kernels==0.11.5
|
||||||
|
|
||||||
trackio>=0.13.0
|
trackio>=0.13.0
|
||||||
typing-extensions>=4.15.0
|
typing-extensions>=4.15.0
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ langdetect==1.0.9
|
|||||||
immutabledict==4.2.0
|
immutabledict==4.2.0
|
||||||
antlr4-python3-runtime==4.13.2
|
antlr4-python3-runtime==4.13.2
|
||||||
|
|
||||||
torchao==0.16.0
|
torchao==0.13.0
|
||||||
openenv-core==0.1.0
|
openenv-core==0.1.0
|
||||||
schedulefree==1.4.1
|
schedulefree==1.4.1
|
||||||
|
|
||||||
@@ -72,4 +72,4 @@ axolotl-contribs-mit==0.0.6
|
|||||||
# telemetry
|
# telemetry
|
||||||
posthog==6.7.11
|
posthog==6.7.11
|
||||||
|
|
||||||
mistral-common==1.8.8
|
mistral-common==1.8.6
|
||||||
|
|||||||
@@ -29,5 +29,5 @@ UV_PREFIX = "uv " if USE_UV else ""
|
|||||||
|
|
||||||
print(
|
print(
|
||||||
UNINSTALL_PREFIX
|
UNINSTALL_PREFIX
|
||||||
+ f'{UV_PREFIX}pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@0d4ce4b"'
|
+ f'{UV_PREFIX}pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@318b7e2"'
|
||||||
)
|
)
|
||||||
|
|||||||
5
setup.py
5
setup.py
@@ -78,11 +78,6 @@ def parse_requirements(extras_require_map):
|
|||||||
extras_require_map["vllm"] = ["vllm==0.11.1"]
|
extras_require_map["vllm"] = ["vllm==0.11.1"]
|
||||||
if not install_xformers:
|
if not install_xformers:
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
extras_require_map["vllm"] = ["vllm==0.13.0"]
|
|
||||||
if patch == 0:
|
|
||||||
extras_require_map["vllm"] = ["vllm==0.13.0"]
|
|
||||||
else:
|
|
||||||
extras_require_map["vllm"] = ["vllm==0.14.0"]
|
|
||||||
elif (major, minor) >= (2, 8):
|
elif (major, minor) >= (2, 8):
|
||||||
extras_require_map.pop("fbgemm-gpu")
|
extras_require_map.pop("fbgemm-gpu")
|
||||||
extras_require_map["fbgemm-gpu"] = ["fbgemm-gpu-genai==1.3.0"]
|
extras_require_map["fbgemm-gpu"] = ["fbgemm-gpu-genai==1.3.0"]
|
||||||
|
|||||||
@@ -5,6 +5,6 @@ import os
|
|||||||
from axolotl.logging_config import configure_logging
|
from axolotl.logging_config import configure_logging
|
||||||
|
|
||||||
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
||||||
os.environ.setdefault("HF_XET_HIGH_PERFORMANCE", "1")
|
os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
|
||||||
|
|
||||||
configure_logging()
|
configure_logging()
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ def check_user_token() -> bool:
|
|||||||
return bool(user_info)
|
return bool(user_info)
|
||||||
except LocalTokenNotFoundError:
|
except LocalTokenNotFoundError:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
"Error verifying HuggingFace token. Remember to log in using `hf auth login` and get your access token from https://huggingface.co/settings/tokens if you want to use gated models or datasets."
|
"Error verifying HuggingFace token. Remember to log in using `huggingface-cli login` and get your access token from https://huggingface.co/settings/tokens if you want to use gated models or datasets."
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
except HTTPError:
|
except HTTPError:
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ def do_merge_lora(*, cfg: DictDefault) -> None:
|
|||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
"""
|
"""
|
||||||
model, tokenizer, processor = load_model_and_tokenizer(cfg=cfg)
|
model, tokenizer, processor = load_model_and_tokenizer(cfg=cfg)
|
||||||
|
safe_serialization = cfg.save_safetensors is True
|
||||||
|
|
||||||
LOG.info("Running merge of LoRA with base model...")
|
LOG.info("Running merge of LoRA with base model...")
|
||||||
model = model.merge_and_unload(progressbar=True)
|
model = model.merge_and_unload(progressbar=True)
|
||||||
@@ -41,6 +42,7 @@ def do_merge_lora(*, cfg: DictDefault) -> None:
|
|||||||
LOG.info(f"Saving merged model to: {str(Path(cfg.output_dir) / 'merged')}...")
|
LOG.info(f"Saving merged model to: {str(Path(cfg.output_dir) / 'merged')}...")
|
||||||
model.save_pretrained(
|
model.save_pretrained(
|
||||||
str(Path(cfg.output_dir) / "merged"),
|
str(Path(cfg.output_dir) / "merged"),
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
progressbar=True,
|
progressbar=True,
|
||||||
)
|
)
|
||||||
tokenizer.save_pretrained(
|
tokenizer.save_pretrained(
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ from accelerate import PartialState
|
|||||||
from accelerate.utils import (
|
from accelerate.utils import (
|
||||||
SAFE_WEIGHTS_INDEX_NAME,
|
SAFE_WEIGHTS_INDEX_NAME,
|
||||||
SAFE_WEIGHTS_NAME,
|
SAFE_WEIGHTS_NAME,
|
||||||
|
WEIGHTS_INDEX_NAME,
|
||||||
|
WEIGHTS_NAME,
|
||||||
is_torch_version,
|
is_torch_version,
|
||||||
)
|
)
|
||||||
from huggingface_hub import split_torch_state_dict_into_shards
|
from huggingface_hub import split_torch_state_dict_into_shards
|
||||||
@@ -38,15 +40,17 @@ class BFloat16CastPlanner(_EmptyStateDictLoadPlanner):
|
|||||||
def _distributed_checkpoint_to_merged_weights(
|
def _distributed_checkpoint_to_merged_weights(
|
||||||
checkpoint_dir: Union[str, Path],
|
checkpoint_dir: Union[str, Path],
|
||||||
save_path: str,
|
save_path: str,
|
||||||
|
safe_serialization: bool = False,
|
||||||
max_shard_size: str = "5GB",
|
max_shard_size: str = "5GB",
|
||||||
) -> Path:
|
) -> Path:
|
||||||
"""
|
"""
|
||||||
Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save`. Will
|
Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save`. Will
|
||||||
save under `save_path` as `model.safetensors`.
|
save under `save_path` as either `model.safetensors` or `pytorch_model.bin`.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
checkpoint_dir: Directory where distributed checkpoint is saved.
|
checkpoint_dir: Directory where distributed checkpoint is saved.
|
||||||
save_path: Path to save model to.
|
save_path: Path to save model to.
|
||||||
|
safe_serialization: Whether to save in safetensors format.
|
||||||
max_shard_size: Max size of model shards to save.
|
max_shard_size: Max size of model shards to save.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -72,7 +76,11 @@ def _distributed_checkpoint_to_merged_weights(
|
|||||||
if isinstance(value, torch.Tensor) and value.dtype != torch.bfloat16:
|
if isinstance(value, torch.Tensor) and value.dtype != torch.bfloat16:
|
||||||
state_dict[key] = value.to(torch.bfloat16)
|
state_dict[key] = value.to(torch.bfloat16)
|
||||||
|
|
||||||
filename_pattern = SAFE_WEIGHTS_NAME.replace(".safetensors", "{suffix}.safetensors")
|
weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
|
||||||
|
|
||||||
|
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(
|
||||||
|
".safetensors", "{suffix}.safetensors"
|
||||||
|
)
|
||||||
state_dict_split = split_torch_state_dict_into_shards(
|
state_dict_split = split_torch_state_dict_into_shards(
|
||||||
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
|
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
|
||||||
)
|
)
|
||||||
@@ -90,12 +98,19 @@ def _distributed_checkpoint_to_merged_weights(
|
|||||||
|
|
||||||
for shard_file, tensors in filename_to_tensors:
|
for shard_file, tensors in filename_to_tensors:
|
||||||
shard = {tensor: state_dict[tensor] for tensor in tensors}
|
shard = {tensor: state_dict[tensor] for tensor in tensors}
|
||||||
|
|
||||||
|
if safe_serialization:
|
||||||
safe_save_file(
|
safe_save_file(
|
||||||
shard, os.path.join(save_path_, shard_file), metadata={"format": "pt"}
|
shard, os.path.join(save_path_, shard_file), metadata={"format": "pt"}
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
torch.save(shard, os.path.join(save_path_, shard_file))
|
||||||
|
|
||||||
if index is not None:
|
if index is not None:
|
||||||
save_index_file = os.path.join(save_path_, SAFE_WEIGHTS_INDEX_NAME)
|
save_index_file = (
|
||||||
|
SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
|
||||||
|
)
|
||||||
|
save_index_file = os.path.join(save_path_, save_index_file)
|
||||||
# Save the index as well
|
# Save the index as well
|
||||||
with open(save_index_file, "w", encoding="utf-8") as fout:
|
with open(save_index_file, "w", encoding="utf-8") as fout:
|
||||||
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
||||||
@@ -108,11 +123,13 @@ def _distributed_checkpoint_to_merged_weights(
|
|||||||
def merge_fsdp_weights(
|
def merge_fsdp_weights(
|
||||||
checkpoint_dir: str,
|
checkpoint_dir: str,
|
||||||
output_path: str,
|
output_path: str,
|
||||||
|
safe_serialization: bool = False,
|
||||||
remove_checkpoint_dir: bool = False,
|
remove_checkpoint_dir: bool = False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Merge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if
|
Merge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if
|
||||||
`SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}/model.safetensors`.
|
`SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}/model.safetensors` if
|
||||||
|
`safe_serialization` else `pytorch_model.bin`.
|
||||||
|
|
||||||
Note: this is a CPU-bound process.
|
Note: this is a CPU-bound process.
|
||||||
|
|
||||||
@@ -121,6 +138,8 @@ def merge_fsdp_weights(
|
|||||||
The directory containing the FSDP checkpoints (can be either the model or optimizer).
|
The directory containing the FSDP checkpoints (can be either the model or optimizer).
|
||||||
output_path (`str`):
|
output_path (`str`):
|
||||||
The path to save the merged checkpoint.
|
The path to save the merged checkpoint.
|
||||||
|
safe_serialization (`bool`, *optional*, defaults to `True`):
|
||||||
|
Whether to save the merged weights with safetensors (recommended).
|
||||||
remove_checkpoint_dir (`bool`, *optional*, defaults to `False`):
|
remove_checkpoint_dir (`bool`, *optional*, defaults to `False`):
|
||||||
Whether to remove the checkpoint directory after merging.
|
Whether to remove the checkpoint directory after merging.
|
||||||
|
|
||||||
@@ -158,7 +177,7 @@ def merge_fsdp_weights(
|
|||||||
if state.is_main_process:
|
if state.is_main_process:
|
||||||
LOG.info(f"Merging FSDP weights from {checkpoint_dir_}")
|
LOG.info(f"Merging FSDP weights from {checkpoint_dir_}")
|
||||||
save_path = _distributed_checkpoint_to_merged_weights(
|
save_path = _distributed_checkpoint_to_merged_weights(
|
||||||
checkpoint_dir_, output_path
|
checkpoint_dir_, output_path, safe_serialization
|
||||||
)
|
)
|
||||||
LOG.info(f"Successfully merged FSDP weights and saved to {save_path}")
|
LOG.info(f"Successfully merged FSDP weights and saved to {save_path}")
|
||||||
if remove_checkpoint_dir:
|
if remove_checkpoint_dir:
|
||||||
@@ -191,6 +210,7 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
|||||||
merge_fsdp_weights(
|
merge_fsdp_weights(
|
||||||
checkpoint_dir=str(fsdp_dir),
|
checkpoint_dir=str(fsdp_dir),
|
||||||
output_path=output_path,
|
output_path=output_path,
|
||||||
|
safe_serialization=True,
|
||||||
)
|
)
|
||||||
state = PartialState()
|
state = PartialState()
|
||||||
state.wait_for_everyone()
|
state.wait_for_everyone()
|
||||||
|
|||||||
@@ -102,10 +102,12 @@ def do_quantize(
|
|||||||
LOG.info(f"Saving quantized model to: {str(Path(output_dir) / 'quantized')}.")
|
LOG.info(f"Saving quantized model to: {str(Path(output_dir) / 'quantized')}.")
|
||||||
model.save_pretrained(
|
model.save_pretrained(
|
||||||
str(Path(output_dir) / "quantized"),
|
str(Path(output_dir) / "quantized"),
|
||||||
|
safe_serialization=False,
|
||||||
progressbar=True,
|
progressbar=True,
|
||||||
)
|
)
|
||||||
tokenizer.save_pretrained(
|
tokenizer.save_pretrained(
|
||||||
str(Path(output_dir) / "quantized"),
|
str(Path(output_dir) / "quantized"),
|
||||||
|
safe_serialization=False,
|
||||||
progressbar=True,
|
progressbar=True,
|
||||||
save_jinja_files=cfg.tokenizer_save_jinja_files,
|
save_jinja_files=cfg.tokenizer_save_jinja_files,
|
||||||
)
|
)
|
||||||
@@ -119,7 +121,7 @@ def do_quantize(
|
|||||||
hub_model_id.rstrip("-")
|
hub_model_id.rstrip("-")
|
||||||
+ f"-{quantization_config_to_str[type(quantization_config)]}"
|
+ f"-{quantization_config_to_str[type(quantization_config)]}"
|
||||||
)
|
)
|
||||||
model.push_to_hub(hub_model_id)
|
model.push_to_hub(hub_model_id, safe_serialization=False)
|
||||||
tokenizer.push_to_hub(hub_model_id)
|
tokenizer.push_to_hub(hub_model_id)
|
||||||
if processor:
|
if processor:
|
||||||
processor.push_to_hub(hub_model_id)
|
processor.push_to_hub(hub_model_id)
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
def _configure_warmup_and_logging(
|
def _configure_warmup_and_logging(
|
||||||
self, total_num_steps: int, training_args_kwargs: dict
|
self, total_num_steps: int, training_args_kwargs: dict
|
||||||
):
|
):
|
||||||
warmup_steps: int | float = 0
|
warmup_steps = 0
|
||||||
warmup_ratio = 0.0
|
warmup_ratio = 0.0
|
||||||
if self.cfg.warmup_steps is not None:
|
if self.cfg.warmup_steps is not None:
|
||||||
warmup_steps = self.cfg.warmup_steps
|
warmup_steps = self.cfg.warmup_steps
|
||||||
@@ -230,10 +230,6 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
else:
|
else:
|
||||||
warmup_ratio = 0.03
|
warmup_ratio = 0.03
|
||||||
|
|
||||||
# transformers v5
|
|
||||||
if warmup_ratio > 0.0 and warmup_steps == 0:
|
|
||||||
warmup_steps = warmup_ratio
|
|
||||||
|
|
||||||
if warmup_steps == 1:
|
if warmup_steps == 1:
|
||||||
warmup_steps = 2
|
warmup_steps = 2
|
||||||
|
|
||||||
@@ -246,6 +242,7 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
else max(min(int(0.005 * total_num_steps), 10), 1)
|
else max(min(int(0.005 * total_num_steps), 10), 1)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
training_args_kwargs["warmup_ratio"] = warmup_ratio
|
||||||
training_args_kwargs["warmup_steps"] = warmup_steps
|
training_args_kwargs["warmup_steps"] = warmup_steps
|
||||||
|
|
||||||
def _configure_precision_settings(self, training_args_kwargs: dict):
|
def _configure_precision_settings(self, training_args_kwargs: dict):
|
||||||
@@ -409,9 +406,6 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
if self.cfg.hub_strategy:
|
if self.cfg.hub_strategy:
|
||||||
training_args_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
training_args_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
||||||
|
|
||||||
if self.cfg.hub_revision:
|
|
||||||
training_args_kwargs["hub_revision"] = self.cfg.hub_revision
|
|
||||||
|
|
||||||
def _configure_save_and_eval_strategy(self, training_args_kwargs: dict):
|
def _configure_save_and_eval_strategy(self, training_args_kwargs: dict):
|
||||||
# save_strategy and save_steps
|
# save_strategy and save_steps
|
||||||
if self.cfg.save_steps:
|
if self.cfg.save_steps:
|
||||||
@@ -536,7 +530,9 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
"loraplus_lr_ratio",
|
"loraplus_lr_ratio",
|
||||||
"loraplus_lr_embedding",
|
"loraplus_lr_embedding",
|
||||||
"output_dir",
|
"output_dir",
|
||||||
|
"save_safetensors",
|
||||||
"save_only_model",
|
"save_only_model",
|
||||||
|
"include_tokens_per_second",
|
||||||
"weight_decay",
|
"weight_decay",
|
||||||
"seed",
|
"seed",
|
||||||
"dion_momentum",
|
"dion_momentum",
|
||||||
@@ -549,7 +545,6 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
|
|
||||||
arg_map = {
|
arg_map = {
|
||||||
"dion_learning_rate": "dion_lr",
|
"dion_learning_rate": "dion_lr",
|
||||||
"include_num_input_tokens_seen": "include_tokens_per_second",
|
|
||||||
}
|
}
|
||||||
for kwarg, cfg_arg in arg_map.items():
|
for kwarg, cfg_arg in arg_map.items():
|
||||||
if hasattr(self.cfg, cfg_arg) and getattr(self.cfg, cfg_arg) is not None:
|
if hasattr(self.cfg, cfg_arg) and getattr(self.cfg, cfg_arg) is not None:
|
||||||
|
|||||||
@@ -246,8 +246,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
ddp_find_unused_parameters
|
ddp_find_unused_parameters
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.cfg.group_by_length:
|
training_arguments_kwargs["group_by_length"] = self.cfg.group_by_length
|
||||||
training_arguments_kwargs["train_sampling_strategy"] = "group_by_length"
|
|
||||||
training_arguments_kwargs["curriculum_sampling"] = self.cfg.curriculum_sampling
|
training_arguments_kwargs["curriculum_sampling"] = self.cfg.curriculum_sampling
|
||||||
|
|
||||||
training_arguments_kwargs["sample_packing"] = bool(self.cfg.sample_packing)
|
training_arguments_kwargs["sample_packing"] = bool(self.cfg.sample_packing)
|
||||||
@@ -374,18 +373,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = multiple
|
data_collator_kwargs["pad_to_multiple_of"] = multiple
|
||||||
|
|
||||||
if self.cfg.use_eaft:
|
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.loss.eaft import eaft_loss
|
|
||||||
|
|
||||||
configured_eaft_loss = partial(
|
|
||||||
eaft_loss,
|
|
||||||
alpha=self.cfg.eaft_alpha if self.cfg.eaft_alpha is not None else 1.0,
|
|
||||||
k=self.cfg.eaft_k if self.cfg.eaft_k is not None else 20,
|
|
||||||
)
|
|
||||||
trainer_kwargs["compute_loss_func"] = configured_eaft_loss
|
|
||||||
|
|
||||||
trainer_cls = self._get_trainer_cls()
|
trainer_cls = self._get_trainer_cls()
|
||||||
|
|
||||||
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
||||||
@@ -450,9 +437,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
or self.cfg.micro_batch_size > 1
|
or self.cfg.micro_batch_size > 1
|
||||||
):
|
):
|
||||||
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
|
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
|
||||||
if not (self.cfg.sample_packing and self.cfg.pretrain_multipack_attn) or (
|
if not (self.cfg.sample_packing and self.cfg.pretrain_multipack_attn):
|
||||||
self.cfg.micro_batch_size == 1 and is_eval is False
|
|
||||||
):
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if self.cfg.model_config_type == "mamba":
|
if self.cfg.model_config_type == "mamba":
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ from axolotl.core.trainers import (
|
|||||||
)
|
)
|
||||||
from axolotl.core.trainers.dpo import DPOStrategy
|
from axolotl.core.trainers.dpo import DPOStrategy
|
||||||
from axolotl.core.trainers.dpo.args import AxolotlDPOConfig
|
from axolotl.core.trainers.dpo.args import AxolotlDPOConfig
|
||||||
|
from axolotl.core.trainers.grpo import GRPOStrategy
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.loaders.utils import ensure_dtype
|
from axolotl.loaders.utils import ensure_dtype
|
||||||
from axolotl.utils.callbacks.qat import QATCallback
|
from axolotl.utils.callbacks.qat import QATCallback
|
||||||
@@ -51,13 +52,12 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
trainer_cls = None
|
trainer_cls = None
|
||||||
trainer_cls_args = [self.model]
|
trainer_cls_args = [self.model]
|
||||||
|
|
||||||
if self.cfg.rl in {RLType.GRPO, RLType.GDPO}:
|
if self.cfg.rl is RLType.GRPO:
|
||||||
from axolotl.core.trainers.grpo import GRPOStrategy
|
|
||||||
|
|
||||||
trainer_cls = GRPOStrategy.get_trainer_class(
|
trainer_cls = GRPOStrategy.get_trainer_class(
|
||||||
sequence_parallel=self.cfg.context_parallel_size > 1
|
sequence_parallel=self.cfg.context_parallel_size > 1
|
||||||
)
|
)
|
||||||
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
trainer_cls_args.extend(GRPOStrategy.set_trainer_args(self.cfg))
|
||||||
|
|
||||||
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
trainer_kwargs.update(GRPOStrategy.set_trainer_kwargs(self.cfg))
|
||||||
|
|
||||||
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
||||||
@@ -134,17 +134,19 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.cpo_alpha is not None:
|
if self.cfg.cpo_alpha is not None:
|
||||||
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
training_args_kwargs["cpo_alpha"] = self.cfg.cpo_alpha
|
||||||
|
|
||||||
blocklist_args_kwargs.append("max_prompt_length")
|
# Handle when max_prompt_length == max_length from defaults
|
||||||
|
# CPOTrainer requires strictly less than
|
||||||
|
if (
|
||||||
|
training_args_kwargs["max_prompt_length"]
|
||||||
|
== training_args_kwargs["max_length"]
|
||||||
|
):
|
||||||
|
training_args_kwargs["max_prompt_length"] -= 1
|
||||||
|
|
||||||
elif self.cfg.rl is RLType.ORPO:
|
elif self.cfg.rl is RLType.ORPO:
|
||||||
training_args_cls = AxolotlORPOConfig
|
training_args_cls = AxolotlORPOConfig
|
||||||
|
|
||||||
blocklist_args_kwargs.append("max_prompt_length")
|
|
||||||
|
|
||||||
elif self.cfg.rl is RLType.KTO:
|
elif self.cfg.rl is RLType.KTO:
|
||||||
training_args_cls = AxolotlKTOConfig
|
training_args_cls = AxolotlKTOConfig
|
||||||
# KTOConfig in TRL >= 0.27.0 no longer accepts max_prompt_length
|
|
||||||
blocklist_args_kwargs.append("max_prompt_length")
|
|
||||||
|
|
||||||
training_args_kwargs["desirable_weight"] = (
|
training_args_kwargs["desirable_weight"] = (
|
||||||
self.cfg.kto_desirable_weight or 1.0
|
self.cfg.kto_desirable_weight or 1.0
|
||||||
@@ -153,16 +155,10 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
|||||||
self.cfg.kto_undesirable_weight or 1.0
|
self.cfg.kto_undesirable_weight or 1.0
|
||||||
)
|
)
|
||||||
|
|
||||||
elif self.cfg.rl in {RLType.GRPO, RLType.GDPO}:
|
elif self.cfg.rl is RLType.GRPO:
|
||||||
from axolotl.core.trainers.grpo import GRPOStrategy
|
|
||||||
|
|
||||||
training_args_cls = GRPOStrategy.get_training_args_class()
|
training_args_cls = GRPOStrategy.get_training_args_class()
|
||||||
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
training_args_kwargs.update(GRPOStrategy.set_training_args_kwargs(self.cfg))
|
||||||
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
blocklist_args_kwargs = GRPOStrategy.get_blocklist_args_kwargs()
|
||||||
if self.cfg.rl is RLType.GDPO:
|
|
||||||
training_args_kwargs.setdefault(
|
|
||||||
"multi_objective_aggregation", "normalize_then_sum"
|
|
||||||
)
|
|
||||||
|
|
||||||
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
elif self.cfg.rl in [RLType.DPO, RLType.IPO]:
|
||||||
training_args_cls = AxolotlDPOConfig
|
training_args_cls = AxolotlDPOConfig
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from torch.utils.data import (
|
|||||||
from transformers import PreTrainedModel, Trainer
|
from transformers import PreTrainedModel, Trainer
|
||||||
from transformers.trainer import TRAINING_ARGS_NAME
|
from transformers.trainer import TRAINING_ARGS_NAME
|
||||||
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, has_length, seed_worker
|
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, has_length, seed_worker
|
||||||
from transformers.utils import SAFE_WEIGHTS_NAME, is_peft_available
|
from transformers.utils import SAFE_WEIGHTS_NAME, WEIGHTS_NAME, is_peft_available
|
||||||
from trl.trainer.utils import pad_to_length
|
from trl.trainer.utils import pad_to_length
|
||||||
from typing_extensions import override
|
from typing_extensions import override
|
||||||
|
|
||||||
@@ -719,13 +719,6 @@ class AxolotlTrainer(
|
|||||||
output_dir = output_dir if output_dir is not None else self.args.output_dir
|
output_dir = output_dir if output_dir is not None else self.args.output_dir
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
LOG.info(f"Saving model checkpoint to {output_dir}")
|
LOG.info(f"Saving model checkpoint to {output_dir}")
|
||||||
if state_dict is None:
|
|
||||||
state_dict = self.accelerator.get_state_dict(self.model)
|
|
||||||
if state_dict is not None:
|
|
||||||
state_dict = {
|
|
||||||
k: v.clone() if isinstance(v, torch.Tensor) else v
|
|
||||||
for k, v in state_dict.items()
|
|
||||||
}
|
|
||||||
supported_classes = (
|
supported_classes = (
|
||||||
(PreTrainedModel,)
|
(PreTrainedModel,)
|
||||||
if not is_peft_available()
|
if not is_peft_available()
|
||||||
@@ -745,20 +738,25 @@ class AxolotlTrainer(
|
|||||||
).save_pretrained(
|
).save_pretrained(
|
||||||
output_dir,
|
output_dir,
|
||||||
state_dict=state_dict,
|
state_dict=state_dict,
|
||||||
|
safe_serialization=self.args.save_safetensors,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"Trainer.model is not a `PreTrainedModel`, only saving its state dict."
|
"Trainer.model is not a `PreTrainedModel`, only saving its state dict."
|
||||||
)
|
)
|
||||||
|
if self.args.save_safetensors:
|
||||||
safetensors.torch.save_file(
|
safetensors.torch.save_file(
|
||||||
state_dict,
|
state_dict,
|
||||||
os.path.join(output_dir, SAFE_WEIGHTS_NAME),
|
os.path.join(output_dir, SAFE_WEIGHTS_NAME),
|
||||||
metadata={"format": "pt"},
|
metadata={"format": "pt"},
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
|
||||||
else:
|
else:
|
||||||
self.model.save_pretrained(
|
self.model.save_pretrained(
|
||||||
output_dir,
|
output_dir,
|
||||||
state_dict=state_dict,
|
state_dict=state_dict,
|
||||||
|
safe_serialization=self.args.save_safetensors,
|
||||||
is_main_process=self.accelerator.is_main_process,
|
is_main_process=self.accelerator.is_main_process,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -57,18 +57,16 @@ class AxolotlDPOTrainer(
|
|||||||
def tokenize_row(
|
def tokenize_row(
|
||||||
features,
|
features,
|
||||||
processing_class,
|
processing_class,
|
||||||
max_prompt_length: int | None = None,
|
max_prompt_length,
|
||||||
max_completion_length: int | None = None,
|
max_completion_length,
|
||||||
add_special_tokens: bool = True,
|
add_special_tokens,
|
||||||
is_chat: bool = False,
|
|
||||||
) -> Dict:
|
) -> Dict:
|
||||||
res = DPOTrainer.tokenize_row(
|
res = DPOTrainer.tokenize_row(
|
||||||
features,
|
features,
|
||||||
processing_class,
|
processing_class,
|
||||||
max_prompt_length=max_prompt_length,
|
max_prompt_length,
|
||||||
max_completion_length=max_completion_length,
|
max_completion_length,
|
||||||
add_special_tokens=add_special_tokens,
|
add_special_tokens,
|
||||||
is_chat=is_chat,
|
|
||||||
)
|
)
|
||||||
# fix when the tokenizer doesn't have a bos_token_id, e.g. Qwen
|
# fix when the tokenizer doesn't have a bos_token_id, e.g. Qwen
|
||||||
if processing_class.bos_token is None and res["prompt_input_ids"][0] is None:
|
if processing_class.bos_token is None and res["prompt_input_ids"][0] is None:
|
||||||
|
|||||||
@@ -126,10 +126,8 @@ class GRPOStrategy:
|
|||||||
if trl.use_liger_loss is not None:
|
if trl.use_liger_loss is not None:
|
||||||
grpo_args_kwargs["use_liger_loss"] = trl.use_liger_loss
|
grpo_args_kwargs["use_liger_loss"] = trl.use_liger_loss
|
||||||
|
|
||||||
if trl.multi_objective_aggregation is not None:
|
if trl.rollout_func:
|
||||||
grpo_args_kwargs["multi_objective_aggregation"] = (
|
grpo_args_kwargs["rollout_func"] = cls.get_rollout_func(trl.rollout_func)
|
||||||
trl.multi_objective_aggregation
|
|
||||||
)
|
|
||||||
|
|
||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
|
|
||||||
@@ -151,8 +149,6 @@ class GRPOStrategy:
|
|||||||
trainer_kwargs["reward_processing_classes"] = (
|
trainer_kwargs["reward_processing_classes"] = (
|
||||||
cfg.trl.reward_processing_classes
|
cfg.trl.reward_processing_classes
|
||||||
)
|
)
|
||||||
if cfg.trl and cfg.trl.rollout_func:
|
|
||||||
trainer_kwargs["rollout_func"] = cls.get_rollout_func(cfg.trl.rollout_func)
|
|
||||||
|
|
||||||
return trainer_kwargs
|
return trainer_kwargs
|
||||||
|
|
||||||
@@ -163,12 +159,7 @@ class GRPOStrategy:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_blocklist_args_kwargs(cls) -> list[str]:
|
def get_blocklist_args_kwargs(cls) -> list[str]:
|
||||||
return [
|
return ["dataset_num_proc", "max_length", "include_tokens_per_second"]
|
||||||
"dataset_num_proc",
|
|
||||||
"max_length",
|
|
||||||
"include_tokens_per_second",
|
|
||||||
"max_prompt_length",
|
|
||||||
]
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_reward_func(cls, reward_func_fqn: str) -> RewardFunc:
|
def get_reward_func(cls, reward_func_fqn: str) -> RewardFunc:
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ class OptimizerMixin(Trainer):
|
|||||||
|
|
||||||
return optimizer_grouped_parameters
|
return optimizer_grouped_parameters
|
||||||
|
|
||||||
def create_optimizer(self, model=None):
|
def create_optimizer(self):
|
||||||
if (
|
if (
|
||||||
self.args.loraplus_lr_ratio is None
|
self.args.loraplus_lr_ratio is None
|
||||||
and self.args.embedding_lr_scale is None
|
and self.args.embedding_lr_scale is None
|
||||||
@@ -112,9 +112,9 @@ class OptimizerMixin(Trainer):
|
|||||||
and self.args.lr_groups is None
|
and self.args.lr_groups is None
|
||||||
and self.optimizer_cls_and_kwargs is None
|
and self.optimizer_cls_and_kwargs is None
|
||||||
):
|
):
|
||||||
return super().create_optimizer(model=model)
|
return super().create_optimizer()
|
||||||
|
|
||||||
opt_model = self.model if model is None else model
|
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not self.optimizer
|
not self.optimizer
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
"""Module for TRL RL trainers"""
|
"""Module for TRL RL trainers"""
|
||||||
|
|
||||||
from trl import RewardTrainer
|
from trl import (
|
||||||
from trl.experimental.cpo import CPOTrainer
|
CPOTrainer,
|
||||||
from trl.experimental.kto import KTOTrainer
|
KTOTrainer,
|
||||||
from trl.experimental.orpo import ORPOTrainer
|
ORPOTrainer,
|
||||||
from trl.experimental.prm import PRMTrainer
|
PRMTrainer,
|
||||||
|
RewardTrainer,
|
||||||
|
)
|
||||||
|
|
||||||
from axolotl.core.trainers.mixins import DistributedParallelMixin, RngLoaderMixin
|
from axolotl.core.trainers.mixins import DistributedParallelMixin, RngLoaderMixin
|
||||||
from axolotl.core.trainers.mixins.optimizer import OptimizerInitMixin, OptimizerMixin
|
from axolotl.core.trainers.mixins.optimizer import OptimizerInitMixin, OptimizerMixin
|
||||||
|
|||||||
@@ -8,11 +8,7 @@ from dataclasses import dataclass, field
|
|||||||
from typing import Optional, Type
|
from typing import Optional, Type
|
||||||
|
|
||||||
from transformers import TrainingArguments
|
from transformers import TrainingArguments
|
||||||
from trl import RewardConfig
|
from trl import CPOConfig, KTOConfig, ORPOConfig, PRMConfig, RewardConfig
|
||||||
from trl.experimental.cpo import CPOConfig
|
|
||||||
from trl.experimental.kto import KTOConfig
|
|
||||||
from trl.experimental.orpo import ORPOConfig
|
|
||||||
from trl.experimental.prm import PRMConfig
|
|
||||||
|
|
||||||
from axolotl.integrations.config import merge_training_args
|
from axolotl.integrations.config import merge_training_args
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ python scripts/cutcrossentropy_install.py | sh
|
|||||||
|
|
||||||
- If you are installing from pip
|
- If you are installing from pip
|
||||||
```bash
|
```bash
|
||||||
pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@0d4ce4b"
|
pip3 uninstall -y cut-cross-entropy && pip3 install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@318b7e2"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
@@ -36,7 +36,6 @@ plugins:
|
|||||||
- cohere
|
- cohere
|
||||||
- cohere2
|
- cohere2
|
||||||
- deepseek_v3
|
- deepseek_v3
|
||||||
- exaone4
|
|
||||||
- gemma
|
- gemma
|
||||||
- gemma2
|
- gemma2
|
||||||
- gemma3
|
- gemma3
|
||||||
@@ -46,16 +45,13 @@ plugins:
|
|||||||
- glm
|
- glm
|
||||||
- glm4
|
- glm4
|
||||||
- glm4_moe
|
- glm4_moe
|
||||||
- glm4_moe_lite
|
|
||||||
- glm46v
|
|
||||||
- glm4v
|
- glm4v
|
||||||
- glm4v_moe
|
- glm4v_moe
|
||||||
- glm_image
|
|
||||||
- gpt_oss
|
- gpt_oss
|
||||||
- granite
|
- granite
|
||||||
- granitemoe
|
- granitemoe
|
||||||
- granitemoehybrid
|
|
||||||
- granitemoeshared
|
- granitemoeshared
|
||||||
|
- granitemoehybrid
|
||||||
- hunyuan_v1_dense
|
- hunyuan_v1_dense
|
||||||
- hunyuan_v1_moe
|
- hunyuan_v1_moe
|
||||||
- internvl
|
- internvl
|
||||||
@@ -80,17 +76,16 @@ plugins:
|
|||||||
- phi3
|
- phi3
|
||||||
- phi4_multimodal
|
- phi4_multimodal
|
||||||
- qwen2
|
- qwen2
|
||||||
- qwen2_moe
|
|
||||||
- qwen2_vl
|
- qwen2_vl
|
||||||
|
- qwen2_moe
|
||||||
- qwen2_5_vl
|
- qwen2_5_vl
|
||||||
- qwen3
|
- qwen3
|
||||||
- qwen3_moe
|
- qwen3_moe
|
||||||
- qwen3_next
|
|
||||||
- qwen3_vl
|
- qwen3_vl
|
||||||
- qwen3_vl_moe
|
- qwen3_vl_moe
|
||||||
- seed_oss
|
- qwen3_next
|
||||||
- smollm3
|
- smollm3
|
||||||
- step3p5
|
- seed_oss
|
||||||
- voxtral
|
- voxtral
|
||||||
|
|
||||||
## Citation
|
## Citation
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ LOG = get_logger(__name__)
|
|||||||
|
|
||||||
_CCE_INSTALL_MESSAGE = (
|
_CCE_INSTALL_MESSAGE = (
|
||||||
"Please install Axolotl's fork of cut_cross_entropy with transformers support using "
|
"Please install Axolotl's fork of cut_cross_entropy with transformers support using "
|
||||||
'`pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@0d4ce4b"`'
|
'`pip install "cut-cross-entropy[transformers] @ git+https://github.com/axolotl-ai-cloud/ml-cross-entropy.git@318b7e2"`'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -104,7 +104,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
|
|
||||||
def patch_llama_like(
|
def patch_llama_like(
|
||||||
self,
|
self,
|
||||||
model_type_to_patch: str,
|
model_type: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Generic patch for model architectures with causal lm similar to llama
|
Generic patch for model architectures with causal lm similar to llama
|
||||||
@@ -112,10 +112,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
from cut_cross_entropy.transformers.patch import PATCH_FNS
|
from cut_cross_entropy.transformers.patch import PATCH_FNS
|
||||||
|
|
||||||
def patch_generic(
|
def patch_generic(
|
||||||
maybe_model,
|
maybe_model, patch_options, model_type: str, remote_model_id: str | None
|
||||||
patch_options,
|
|
||||||
remote_model_id: str | None,
|
|
||||||
model_type: str,
|
|
||||||
):
|
):
|
||||||
import cut_cross_entropy.transformers.llama
|
import cut_cross_entropy.transformers.llama
|
||||||
from cut_cross_entropy.transformers.llama import cce_forward
|
from cut_cross_entropy.transformers.llama import cce_forward
|
||||||
@@ -139,13 +136,11 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
f"Error: {str(e)}"
|
f"Error: {str(e)}"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
if model_type_to_patch not in PATCH_FNS:
|
if model_type not in PATCH_FNS:
|
||||||
LOG.warning_once(
|
LOG.warning_once(
|
||||||
"Setting up generic cce patch for model type: %s", model_type_to_patch
|
"Setting up generic cce patch for model type: %s", model_type
|
||||||
)
|
)
|
||||||
LOG.warning_once(
|
LOG.warning_once(
|
||||||
f"Generic Cut Cross Entropy + {model_type_to_patch} support is experimental and may not work as expected."
|
f"Generic Cut Cross Entropy + {model_type} support is experimental and may not work as expected."
|
||||||
)
|
|
||||||
PATCH_FNS[model_type_to_patch] = partial(
|
|
||||||
patch_generic, model_type=model_type_to_patch
|
|
||||||
)
|
)
|
||||||
|
PATCH_FNS[model_type] = partial(patch_generic, model_type=model_type)
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
# Kernels Integration
|
|
||||||
|
|
||||||
MoE (Mixture of Experts) kernels speed up training for MoE layers and reduce VRAM costs. In transformers v5, `batched_mm` and `grouped_mm` were integrated as built-in options via the `experts_implementation` config kwarg:
|
|
||||||
|
|
||||||
```python
|
|
||||||
class ExpertsInterface(GeneralInterface):
|
|
||||||
_global_mapping = {
|
|
||||||
"batched_mm": batched_mm_experts_forward,
|
|
||||||
"grouped_mm": grouped_mm_experts_forward,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In our custom integration, we add support for **ScatterMoE**, which is even more efficient and faster than `grouped_mm`.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Add the following to your axolotl YAML config:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.kernels.KernelsPlugin
|
|
||||||
|
|
||||||
use_kernels: true
|
|
||||||
use_scattermoe: true
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important:** Setting `experts_implementation` is incompatible with `use_scattermoe`.
|
|
||||||
|
|
||||||
## How It Works
|
|
||||||
|
|
||||||
The `KernelsPlugin` runs before model loading and:
|
|
||||||
|
|
||||||
1. Registers the ScatterMoE kernel from the [`axolotl-ai-co/scattermoe`](https://huggingface.co/axolotl-ai-co/scattermoe) Hub repo.
|
|
||||||
2. Patches the model's `SparseMoeBlock` forward method with the optimized ScatterMoE implementation.
|
|
||||||
|
|
||||||
This works for any MoE model in transformers that uses a `SparseMoeBlock` class (Mixtral, Qwen2-MoE, OLMoE, etc.).
|
|
||||||
|
|
||||||
## Limitations
|
|
||||||
|
|
||||||
ScatterMoE uses a softmax -> topk routing, so results may be different for some model arch as baseline (GPT-OSS, GLM_MOE_DSA).
|
|
||||||
|
|
||||||
## Note on MegaBlocks
|
|
||||||
|
|
||||||
We tested [MegaBlocks](https://huggingface.co/kernels-community/megablocks) but were unable to ensure numerical accuracy, so we did not integrate it. It was also incompatible with many newer model architectures in transformers.
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
from .args import KernelsArgs
|
|
||||||
from .plugin import KernelsPlugin
|
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
"KernelsArgs",
|
|
||||||
"KernelsPlugin",
|
|
||||||
]
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
from pydantic import BaseModel, model_validator
|
|
||||||
|
|
||||||
from axolotl.utils.logging import get_logger
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class KernelsArgs(BaseModel):
|
|
||||||
use_scattermoe: bool | None = True
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def check_use_kernels(cls, data):
|
|
||||||
if data.get("use_kernels") is not True:
|
|
||||||
LOG.warning(
|
|
||||||
"`use_kernels` must be set to True to use this. Automatically setting it to True."
|
|
||||||
)
|
|
||||||
data["use_kernels"] = True
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def check_experts_implementation(cls, data):
|
|
||||||
experts_implementation = data.get("experts_implementation")
|
|
||||||
if experts_implementation is None:
|
|
||||||
# transformers may default to batched_mm when unset
|
|
||||||
data["experts_implementation"] = "eager"
|
|
||||||
elif experts_implementation != "eager":
|
|
||||||
LOG.warning(
|
|
||||||
"`experts_implementation` must be set to 'eager' to use this. Automatically setting it to 'eager'."
|
|
||||||
)
|
|
||||||
data["experts_implementation"] = "eager"
|
|
||||||
|
|
||||||
return data
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
from kernels import (
|
|
||||||
LayerRepository,
|
|
||||||
Mode,
|
|
||||||
register_kernel_mapping,
|
|
||||||
replace_kernel_forward_from_hub,
|
|
||||||
)
|
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
|
||||||
from axolotl.utils.callbacks.models import get_causal_lm_model_cls_prefix
|
|
||||||
|
|
||||||
|
|
||||||
class KernelsPlugin(BasePlugin):
|
|
||||||
def get_input_args(self):
|
|
||||||
return "axolotl.integrations.kernels.KernelsArgs"
|
|
||||||
|
|
||||||
def pre_model_load(self, cfg):
|
|
||||||
if cfg.use_scattermoe:
|
|
||||||
self._register_kernels()
|
|
||||||
self._kernelize_model(cfg.model_config_type)
|
|
||||||
|
|
||||||
def _register_kernels(self):
|
|
||||||
register_kernel_mapping(
|
|
||||||
{
|
|
||||||
"HFScatterMoEParallelExperts": {
|
|
||||||
"cuda": {
|
|
||||||
Mode.TRAINING: LayerRepository(
|
|
||||||
repo_id="axolotl-ai-co/scattermoe",
|
|
||||||
layer_name="HFScatterMoEGatedMLP",
|
|
||||||
),
|
|
||||||
Mode.INFERENCE: LayerRepository(
|
|
||||||
repo_id="axolotl-ai-co/scattermoe",
|
|
||||||
layer_name="HFScatterMoEGatedMLP",
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def _kernelize_model(self, model_type: str):
|
|
||||||
if model_type == "olmoe":
|
|
||||||
from transformers.models.olmoe.modeling_olmoe import OlmoeSparseMoeBlock
|
|
||||||
|
|
||||||
replace_kernel_forward_from_hub(
|
|
||||||
OlmoeSparseMoeBlock, "HFScatterMoEParallelExperts"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
model_moe_cls = get_model_moe_block(model_type)
|
|
||||||
replace_kernel_forward_from_hub(
|
|
||||||
model_moe_cls, "HFScatterMoEParallelExperts"
|
|
||||||
)
|
|
||||||
except Exception as err:
|
|
||||||
raise ValueError(f"Unsupported model type: {model_type}") from err
|
|
||||||
|
|
||||||
|
|
||||||
def get_model_moe_block(model_type: str):
|
|
||||||
module_path = f"transformers.models.{model_type}.modeling_{model_type}"
|
|
||||||
model_cls_prefix, _ = get_causal_lm_model_cls_prefix(model_type)
|
|
||||||
module = __import__(module_path, fromlist=[f"{model_cls_prefix}SparseMoeBlock"])
|
|
||||||
model_cls = getattr(module, f"{model_cls_prefix}SparseMoeBlock")
|
|
||||||
return model_cls
|
|
||||||
@@ -12,6 +12,7 @@ def save_compressed_model(
|
|||||||
model: PreTrainedModel,
|
model: PreTrainedModel,
|
||||||
output_dir: Union[str, bytes],
|
output_dir: Union[str, bytes],
|
||||||
trainer: Trainer,
|
trainer: Trainer,
|
||||||
|
safe_serialization: bool = False,
|
||||||
save_compressed: bool = False,
|
save_compressed: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -21,6 +22,7 @@ def save_compressed_model(
|
|||||||
model (PreTrainedModel): The model to be saved.
|
model (PreTrainedModel): The model to be saved.
|
||||||
output_dir (str or bytes): Path where the model files will be written.
|
output_dir (str or bytes): Path where the model files will be written.
|
||||||
trainer (Trainer): Hugging Face Trainer for process synchronization.
|
trainer (Trainer): Hugging Face Trainer for process synchronization.
|
||||||
|
safe_serialization (bool): Use safe serialization if True.
|
||||||
save_compressed (bool): Write compressed tensors if True.
|
save_compressed (bool): Write compressed tensors if True.
|
||||||
"""
|
"""
|
||||||
trainer.accelerator.wait_for_everyone()
|
trainer.accelerator.wait_for_everyone()
|
||||||
@@ -32,6 +34,7 @@ def save_compressed_model(
|
|||||||
modify_save_pretrained(model)
|
modify_save_pretrained(model)
|
||||||
model.save_pretrained(
|
model.save_pretrained(
|
||||||
output_dir,
|
output_dir,
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
save_compressed=save_compressed,
|
save_compressed=save_compressed,
|
||||||
skip_sparsity_compression_stats=not save_compressed,
|
skip_sparsity_compression_stats=not save_compressed,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,12 +6,6 @@ See https://github.com/EleutherAI/lm-evaluation-harness
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
There are two ways to use the LM Eval integration:
|
|
||||||
|
|
||||||
### 1. Post-Training Evaluation
|
|
||||||
|
|
||||||
When training with the plugin enabled, evaluation runs automatically after training completes:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.lm_eval.LMEvalPlugin
|
- axolotl.integrations.lm_eval.LMEvalPlugin
|
||||||
@@ -22,50 +16,9 @@ lm_eval_tasks:
|
|||||||
- arc_easy
|
- arc_easy
|
||||||
|
|
||||||
lm_eval_batch_size: # Batch size for evaluation
|
lm_eval_batch_size: # Batch size for evaluation
|
||||||
|
output_dir: # Directory to save evaluation results
|
||||||
# Directory to save evaluation results.
|
|
||||||
# The final model is loaded from this directory
|
|
||||||
# unless specified otherwise (see below)
|
|
||||||
output_dir:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Run training as usual:
|
|
||||||
```bash
|
|
||||||
axolotl train config.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Standalone CLI Evaluation
|
|
||||||
|
|
||||||
Evaluate any model directly without training:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
lm_eval_model: meta-llama/Llama-2-7b-hf
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.lm_eval.LMEvalPlugin
|
|
||||||
|
|
||||||
lm_eval_tasks:
|
|
||||||
- gsm8k
|
|
||||||
- hellaswag
|
|
||||||
- arc_easy
|
|
||||||
|
|
||||||
lm_eval_batch_size: 8
|
|
||||||
output_dir: ./outputs
|
|
||||||
```
|
|
||||||
|
|
||||||
Run evaluation:
|
|
||||||
```bash
|
|
||||||
axolotl lm-eval config.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Model Selection Priority
|
|
||||||
|
|
||||||
The model to evaluate is selected in the following priority order:
|
|
||||||
|
|
||||||
1. **`lm_eval_model`** - Explicit model path or HuggingFace repo (highest priority)
|
|
||||||
2. **`hub_model_id`** - Trained model pushed to HuggingFace Hub
|
|
||||||
3. **`output_dir`** - Local checkpoint directory containing trained model weights
|
|
||||||
|
|
||||||
## Citation
|
## Citation
|
||||||
|
|
||||||
```bib
|
```bib
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ Module for the Plugin for LM Eval Harness
|
|||||||
import subprocess # nosec
|
import subprocess # nosec
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
from axolotl.integrations.base import BasePlugin
|
||||||
from axolotl.integrations.lm_eval.cli import build_lm_eval_command, get_model_path
|
from axolotl.integrations.lm_eval.cli import build_lm_eval_command
|
||||||
|
|
||||||
from .args import LMEvalArgs as LMEvalArgs
|
from .args import LMEvalArgs as LMEvalArgs
|
||||||
|
|
||||||
@@ -29,7 +29,7 @@ class LMEvalPlugin(BasePlugin):
|
|||||||
wandb_project=cfg.wandb_project,
|
wandb_project=cfg.wandb_project,
|
||||||
wandb_entity=cfg.wandb_entity,
|
wandb_entity=cfg.wandb_entity,
|
||||||
wandb_name=cfg.wandb_name,
|
wandb_name=cfg.wandb_name,
|
||||||
model=get_model_path(cfg),
|
model=cfg.lm_eval_model or cfg.hub_model_id,
|
||||||
):
|
):
|
||||||
subprocess.run( # nosec
|
subprocess.run( # nosec
|
||||||
lm_eval_args,
|
lm_eval_args,
|
||||||
|
|||||||
@@ -13,21 +13,6 @@ import yaml
|
|||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
|
||||||
def get_model_path(cfg: DictDefault) -> str | None:
|
|
||||||
"""
|
|
||||||
Determine which model path to use for evaluation.
|
|
||||||
|
|
||||||
Priority order (highest to lowest):
|
|
||||||
1. lm_eval_model - Explicit model path override
|
|
||||||
2. hub_model_id - Model pushed to HuggingFace Hub
|
|
||||||
3. None - Falls back to output_dir in build_lm_eval_command
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Model path string or None to use output_dir fallback
|
|
||||||
"""
|
|
||||||
return cfg.lm_eval_model or cfg.hub_model_id or None
|
|
||||||
|
|
||||||
|
|
||||||
def build_lm_eval_command(
|
def build_lm_eval_command(
|
||||||
tasks: list[str],
|
tasks: list[str],
|
||||||
bfloat16=True,
|
bfloat16=True,
|
||||||
@@ -123,7 +108,7 @@ def lm_eval(config: str, cloud: Optional[str] = None):
|
|||||||
wandb_project=cfg.wandb_project,
|
wandb_project=cfg.wandb_project,
|
||||||
wandb_entity=cfg.wandb_entity,
|
wandb_entity=cfg.wandb_entity,
|
||||||
wandb_name=cfg.wandb_name,
|
wandb_name=cfg.wandb_name,
|
||||||
model=get_model_path(cfg),
|
model=cfg.lm_eval_model or cfg.hub_model_id,
|
||||||
revision=cfg.revision,
|
revision=cfg.revision,
|
||||||
apply_chat_template=cfg.apply_chat_template,
|
apply_chat_template=cfg.apply_chat_template,
|
||||||
fewshot_as_multiturn=cfg.fewshot_as_multiturn,
|
fewshot_as_multiturn=cfg.fewshot_as_multiturn,
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ from torch.distributed import DeviceMesh
|
|||||||
from transformers import (
|
from transformers import (
|
||||||
AutoModelForCausalLM,
|
AutoModelForCausalLM,
|
||||||
AutoModelForImageTextToText,
|
AutoModelForImageTextToText,
|
||||||
|
AutoModelForVision2Seq,
|
||||||
AwqConfig,
|
AwqConfig,
|
||||||
BitsAndBytesConfig,
|
BitsAndBytesConfig,
|
||||||
GPTQConfig,
|
GPTQConfig,
|
||||||
@@ -225,7 +226,6 @@ class ModelLoader:
|
|||||||
):
|
):
|
||||||
self.model = self.model.merge_and_unload()
|
self.model = self.model.merge_and_unload()
|
||||||
|
|
||||||
self._configure_experts_implementation()
|
|
||||||
self._apply_activation_checkpointing()
|
self._apply_activation_checkpointing()
|
||||||
self._resize_token_embeddings()
|
self._resize_token_embeddings()
|
||||||
self._adjust_model_config()
|
self._adjust_model_config()
|
||||||
@@ -233,10 +233,6 @@ class ModelLoader:
|
|||||||
self._configure_qat()
|
self._configure_qat()
|
||||||
log_gpu_memory_usage(LOG, "Memory usage after model load", 0)
|
log_gpu_memory_usage(LOG, "Memory usage after model load", 0)
|
||||||
|
|
||||||
def _configure_experts_implementation(self):
|
|
||||||
if self.cfg.experts_implementation is not None:
|
|
||||||
self.model.set_experts_implementation(self.cfg.experts_implementation)
|
|
||||||
|
|
||||||
def _apply_activation_checkpointing(self):
|
def _apply_activation_checkpointing(self):
|
||||||
if self.cfg.activation_offloading is True:
|
if self.cfg.activation_offloading is True:
|
||||||
from axolotl.core.trainers.mixins.activation_checkpointing import (
|
from axolotl.core.trainers.mixins.activation_checkpointing import (
|
||||||
@@ -338,12 +334,7 @@ class ModelLoader:
|
|||||||
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so
|
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so
|
||||||
# we need to convert them back to fp16/bf16 for flash-attn compatibility.
|
# we need to convert them back to fp16/bf16 for flash-attn compatibility.
|
||||||
(
|
(
|
||||||
(
|
(needs_fa2_dtype or self.cfg.flash_attention or self.cfg.flex_attention)
|
||||||
needs_fa2_dtype
|
|
||||||
or self.cfg.flash_attention
|
|
||||||
or self.cfg.flex_attention
|
|
||||||
or self.cfg.sage_attention
|
|
||||||
)
|
|
||||||
and not self.is_qlora_and_fsdp_enabled
|
and not self.is_qlora_and_fsdp_enabled
|
||||||
)
|
)
|
||||||
or (
|
or (
|
||||||
@@ -443,7 +434,7 @@ class ModelLoader:
|
|||||||
"""
|
"""
|
||||||
if self.cfg.is_multimodal:
|
if self.cfg.is_multimodal:
|
||||||
self.auto_model_loader = MULTIMODAL_AUTO_MODEL_MAPPING.get(
|
self.auto_model_loader = MULTIMODAL_AUTO_MODEL_MAPPING.get(
|
||||||
self.model_config.model_type, AutoModelForImageTextToText
|
self.model_config.model_type, AutoModelForVision2Seq
|
||||||
)
|
)
|
||||||
if isinstance(self.auto_model_loader, str):
|
if isinstance(self.auto_model_loader, str):
|
||||||
self.auto_model_loader = AutoModelForImageTextToText
|
self.auto_model_loader = AutoModelForImageTextToText
|
||||||
@@ -485,7 +476,6 @@ class ModelLoader:
|
|||||||
max_memory = None
|
max_memory = None
|
||||||
|
|
||||||
self.model_kwargs["torch_dtype"] = self.cfg.torch_dtype
|
self.model_kwargs["torch_dtype"] = self.cfg.torch_dtype
|
||||||
self.model_kwargs["dtype"] = self.cfg.torch_dtype
|
|
||||||
|
|
||||||
is_ds_zero3 = is_deepspeed_zero3_enabled()
|
is_ds_zero3 = is_deepspeed_zero3_enabled()
|
||||||
|
|
||||||
@@ -617,10 +607,6 @@ class ModelLoader:
|
|||||||
elif self.cfg.sdp_attention:
|
elif self.cfg.sdp_attention:
|
||||||
self.model_kwargs["attn_implementation"] = "sdpa"
|
self.model_kwargs["attn_implementation"] = "sdpa"
|
||||||
self.model_config._attn_implementation = "sdpa"
|
self.model_config._attn_implementation = "sdpa"
|
||||||
elif self.cfg.sage_attention:
|
|
||||||
# sets FA2 attention to re-use same internal handling like masking
|
|
||||||
self.model_kwargs["attn_implementation"] = "flash_attention_2"
|
|
||||||
self.model_config._attn_implementation = "flash_attention_2"
|
|
||||||
elif self.cfg.eager_attention:
|
elif self.cfg.eager_attention:
|
||||||
self.model_kwargs["attn_implementation"] = "eager"
|
self.model_kwargs["attn_implementation"] = "eager"
|
||||||
self.model_config._attn_implementation = "eager"
|
self.model_config._attn_implementation = "eager"
|
||||||
@@ -684,7 +670,7 @@ class ModelLoader:
|
|||||||
Uses the selected loader when provided; otherwise falls back to the auto loader.
|
Uses the selected loader when provided; otherwise falls back to the auto loader.
|
||||||
"""
|
"""
|
||||||
loader = model_loader_class or self.auto_model_loader
|
loader = model_loader_class or self.auto_model_loader
|
||||||
if loader in [AutoModelForCausalLM, AutoModelForImageTextToText]:
|
if loader in [AutoModelForCausalLM, AutoModelForVision2Seq]:
|
||||||
model = loader.from_config(
|
model = loader.from_config(
|
||||||
config=self.model_config,
|
config=self.model_config,
|
||||||
trust_remote_code=self.cfg.trust_remote_code or False,
|
trust_remote_code=self.cfg.trust_remote_code or False,
|
||||||
@@ -802,7 +788,6 @@ class ModelLoader:
|
|||||||
# Use auto model loader (handles gptq and default cases)
|
# Use auto model loader (handles gptq and default cases)
|
||||||
model_loader_class = self.auto_model_loader
|
model_loader_class = self.auto_model_loader
|
||||||
|
|
||||||
self.model_kwargs["dtype"] = self.model_kwargs["torch_dtype"]
|
|
||||||
if self.cfg.reinit_weights:
|
if self.cfg.reinit_weights:
|
||||||
self.model = self._load_model_from_config(model_loader_class)
|
self.model = self._load_model_from_config(model_loader_class)
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ from functools import cached_property
|
|||||||
import addict
|
import addict
|
||||||
import transformers
|
import transformers
|
||||||
from transformers import PretrainedConfig, PreTrainedModel
|
from transformers import PretrainedConfig, PreTrainedModel
|
||||||
from transformers.modeling_flash_attention_utils import is_flash_attn_available
|
|
||||||
|
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.monkeypatch.multipack import (
|
from axolotl.monkeypatch.multipack import (
|
||||||
@@ -97,7 +96,6 @@ class PatchManager:
|
|||||||
# self._apply_flex_attention_patches()
|
# self._apply_flex_attention_patches()
|
||||||
self._apply_flash_attention_patches()
|
self._apply_flash_attention_patches()
|
||||||
self._apply_chunked_cross_entropy_patch()
|
self._apply_chunked_cross_entropy_patch()
|
||||||
self._apply_sageattn_patches()
|
|
||||||
self._apply_fsdp_patches()
|
self._apply_fsdp_patches()
|
||||||
self._apply_adapter_patches()
|
self._apply_adapter_patches()
|
||||||
self._apply_model_specific_patches()
|
self._apply_model_specific_patches()
|
||||||
@@ -203,13 +201,6 @@ class PatchManager:
|
|||||||
flex_attn_compile_kwargs = self.cfg.flex_attn_compile_kwargs or {}
|
flex_attn_compile_kwargs = self.cfg.flex_attn_compile_kwargs or {}
|
||||||
patch_flex_wrapper(**flex_attn_compile_kwargs)
|
patch_flex_wrapper(**flex_attn_compile_kwargs)
|
||||||
|
|
||||||
def _apply_sageattn_patches(self):
|
|
||||||
"""Apply patches for SageAttention."""
|
|
||||||
if self.cfg.sage_attention:
|
|
||||||
from axolotl.monkeypatch.attention.sage_attn import patch_sageattn
|
|
||||||
|
|
||||||
patch_sageattn()
|
|
||||||
|
|
||||||
def _apply_model_specific_patches(self):
|
def _apply_model_specific_patches(self):
|
||||||
"""Apply patches specific to model architectures."""
|
"""Apply patches specific to model architectures."""
|
||||||
if (
|
if (
|
||||||
@@ -229,6 +220,13 @@ class PatchManager:
|
|||||||
|
|
||||||
patch_qwen3_next_modeling_packing()
|
patch_qwen3_next_modeling_packing()
|
||||||
|
|
||||||
|
if self.cfg.model_config_type == "mistral3" and self.cfg.processor_type:
|
||||||
|
from axolotl.monkeypatch.models.mistral3.mistral_common_tokenizer import (
|
||||||
|
apply_mistral_tokenizer_image_patch,
|
||||||
|
)
|
||||||
|
|
||||||
|
apply_mistral_tokenizer_image_patch()
|
||||||
|
|
||||||
if self.cfg.model_config_type == "kimi_linear":
|
if self.cfg.model_config_type == "kimi_linear":
|
||||||
from axolotl.monkeypatch.models.kimi_linear.patch_kimi_linear import (
|
from axolotl.monkeypatch.models.kimi_linear.patch_kimi_linear import (
|
||||||
patch_kimi_model,
|
patch_kimi_model,
|
||||||
@@ -501,7 +499,6 @@ class PatchManager:
|
|||||||
and not self.cfg.trust_remote_code
|
and not self.cfg.trust_remote_code
|
||||||
and not self.cfg.gptq
|
and not self.cfg.gptq
|
||||||
and self.cfg.flash_attention
|
and self.cfg.flash_attention
|
||||||
and is_flash_attn_available()
|
|
||||||
and not self.inference
|
and not self.inference
|
||||||
):
|
):
|
||||||
# TODO(MengqingCao): split these patches separately
|
# TODO(MengqingCao): split these patches separately
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ def load_processor(cfg: DictDefault, tokenizer: PreTrainedTokenizerBase):
|
|||||||
|
|
||||||
from axolotl.utils.mistral import HFMistralTokenizer
|
from axolotl.utils.mistral import HFMistralTokenizer
|
||||||
|
|
||||||
tokenization_mistral_common.MistralCommonBackend = HFMistralTokenizer
|
tokenization_mistral_common.MistralCommonTokenizer = HFMistralTokenizer
|
||||||
|
|
||||||
_patch_mistralcommontokenizer()
|
_patch_mistralcommontokenizer()
|
||||||
|
|
||||||
|
|||||||
@@ -111,6 +111,7 @@ class MambaLMHeadModel(nn.Module, GenerationMixin):
|
|||||||
self,
|
self,
|
||||||
save_directory: Union[str, os.PathLike],
|
save_directory: Union[str, os.PathLike],
|
||||||
state_dict: Optional[dict] = None,
|
state_dict: Optional[dict] = None,
|
||||||
|
safe_serialization: Optional[bool] = None,
|
||||||
):
|
):
|
||||||
if state_dict is None:
|
if state_dict is None:
|
||||||
state_dict = self.state_dict()
|
state_dict = self.state_dict()
|
||||||
|
|||||||
@@ -1,211 +0,0 @@
|
|||||||
"""
|
|
||||||
Monkeypatch for SageAttention for use with transformers.
|
|
||||||
|
|
||||||
https://github.com/thu-ml/SageAttention/
|
|
||||||
"""
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from transformers.integrations.sdpa_attention import repeat_kv
|
|
||||||
|
|
||||||
from axolotl.utils.logging import get_logger
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
|
||||||
|
|
||||||
sageattn = None # pylint: disable=invalid-name
|
|
||||||
sageattn_varlen = None # pylint: disable=invalid-name
|
|
||||||
|
|
||||||
|
|
||||||
def _is_sageattn_available():
|
|
||||||
"""Determine if SageAttention is available"""
|
|
||||||
try:
|
|
||||||
import sageattention # noqa: F401 # pylint: disable=unused-import
|
|
||||||
|
|
||||||
return True
|
|
||||||
except ImportError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
if _is_sageattn_available():
|
|
||||||
# import sageattn here if available
|
|
||||||
from sageattention import sageattn, sageattn_varlen
|
|
||||||
|
|
||||||
|
|
||||||
def _check_sageattn_imported():
|
|
||||||
"""Check if SageAttention is imported. Raises an ImportError if not."""
|
|
||||||
if sageattn is None:
|
|
||||||
raise ImportError(
|
|
||||||
"SageAttention is not installed. Please install it from source: "
|
|
||||||
"`pip install git+https://github.com/thu-ml/SageAttention.git@1718ddc06dbc694bcf3c6b49ac28c1921aa2d8bd`"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def sage_attention_forward(
|
|
||||||
module: torch.nn.Module,
|
|
||||||
query: torch.Tensor,
|
|
||||||
key: torch.Tensor,
|
|
||||||
value: torch.Tensor,
|
|
||||||
attention_mask: torch.Tensor | None = None,
|
|
||||||
dropout: float = 0.0,
|
|
||||||
scaling: float | None = None,
|
|
||||||
is_causal: bool | None = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> tuple[torch.Tensor, None]:
|
|
||||||
"""
|
|
||||||
Forward pass for SageAttention compatible with transformers attention interfaces.
|
|
||||||
|
|
||||||
https://github.com/thu-ml/SageAttention/
|
|
||||||
"""
|
|
||||||
|
|
||||||
_check_sageattn_imported()
|
|
||||||
|
|
||||||
if kwargs.get("output_attentions", False) or kwargs.get("head_mask") is not None:
|
|
||||||
raise NotImplementedError(
|
|
||||||
"SageAttention does not support `output_attentions=True` or `head_mask`."
|
|
||||||
)
|
|
||||||
|
|
||||||
# The base sageattn API does not support dropout.
|
|
||||||
if dropout > 0.0:
|
|
||||||
raise NotImplementedError("SageAttention does not support dropout.")
|
|
||||||
|
|
||||||
# Handle Grouped-Query Attention (GQA) and Multi-Query Attention (MQA)
|
|
||||||
if hasattr(module, "num_key_value_groups"):
|
|
||||||
key = repeat_kv(key, module.num_key_value_groups)
|
|
||||||
value = repeat_kv(value, module.num_key_value_groups)
|
|
||||||
|
|
||||||
# Calculate is_causal following transformers
|
|
||||||
assert is_causal is not False, "is_causal must be True or None"
|
|
||||||
is_causal = True
|
|
||||||
|
|
||||||
position_ids = kwargs.get("position_ids", None)
|
|
||||||
query_length = query.shape[2]
|
|
||||||
|
|
||||||
cu_seqlens_q = kwargs.get("cu_seqlens_q", None)
|
|
||||||
cu_seqlens_k = kwargs.get("cu_seqlens_k", None)
|
|
||||||
max_length_q = kwargs.get("max_length_q", None)
|
|
||||||
max_length_k = kwargs.get("max_length_k", None)
|
|
||||||
|
|
||||||
# Sample packing uses position_ids, so we check for it first
|
|
||||||
if position_ids is not None and (
|
|
||||||
max_length_q is not None
|
|
||||||
or (query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all())
|
|
||||||
):
|
|
||||||
# transpose inputs to NHD layout for use with FA2 utils
|
|
||||||
query = query.transpose(1, 2)
|
|
||||||
key = key.transpose(1, 2)
|
|
||||||
value = value.transpose(1, 2)
|
|
||||||
|
|
||||||
batch_size = query.size(0)
|
|
||||||
|
|
||||||
from transformers.modeling_flash_attention_utils import (
|
|
||||||
prepare_fa2_from_position_ids,
|
|
||||||
)
|
|
||||||
|
|
||||||
if cu_seqlens_q is None or cu_seqlens_k is None:
|
|
||||||
query, key, value, indices_q, cu_seq_lens, max_seq_lens = (
|
|
||||||
prepare_fa2_from_position_ids(query, key, value, position_ids)
|
|
||||||
)
|
|
||||||
|
|
||||||
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
|
||||||
max_length_q, max_length_k = max_seq_lens
|
|
||||||
|
|
||||||
else:
|
|
||||||
query = query.reshape(-1, query.size(-2), query.size(-1))
|
|
||||||
key = key.reshape(-1, key.size(-2), key.size(-1))
|
|
||||||
value = value.reshape(-1, value.size(-2), value.size(-1))
|
|
||||||
|
|
||||||
attn_output_unpad = sageattn_varlen(
|
|
||||||
q=query,
|
|
||||||
k=key,
|
|
||||||
v=value,
|
|
||||||
cu_seqlens_q=cu_seqlens_q,
|
|
||||||
cu_seqlens_k=cu_seqlens_k,
|
|
||||||
max_seqlen_q=max_length_q,
|
|
||||||
max_seqlen_k=max_length_k,
|
|
||||||
is_causal=is_causal,
|
|
||||||
sm_scale=scaling,
|
|
||||||
smooth_k=False, # reduces loss 0 / nan grad norms
|
|
||||||
tensor_layout="NHD",
|
|
||||||
)
|
|
||||||
|
|
||||||
attn_output = attn_output_unpad.view(
|
|
||||||
batch_size, -1, attn_output_unpad.size(-2), attn_output_unpad.size(-1)
|
|
||||||
)
|
|
||||||
|
|
||||||
elif attention_mask is not None:
|
|
||||||
# NOTE: When used without `pad_to_sequence_len`, the loss becomes unstable after a few steps.
|
|
||||||
|
|
||||||
assert attention_mask.ndim == 2, "Attention mask must be 2D"
|
|
||||||
|
|
||||||
from transformers.modeling_flash_attention_utils import (
|
|
||||||
_upad_input,
|
|
||||||
)
|
|
||||||
|
|
||||||
# transpose inputs to NHD layout for use with FA2 utils
|
|
||||||
query = query.transpose(1, 2)
|
|
||||||
key = key.transpose(1, 2)
|
|
||||||
value = value.transpose(1, 2)
|
|
||||||
|
|
||||||
batch_size = query.shape[0]
|
|
||||||
|
|
||||||
query, key, value, indices_q, cu_seq_lens, max_seq_lens = _upad_input(
|
|
||||||
query, key, value, attention_mask, query_length
|
|
||||||
)
|
|
||||||
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
|
||||||
max_seqlen_q, max_seqlen_k = max_seq_lens
|
|
||||||
|
|
||||||
attn_output_unpad = sageattn_varlen(
|
|
||||||
q=query,
|
|
||||||
k=key,
|
|
||||||
v=value,
|
|
||||||
cu_seqlens_q=cu_seqlens_q,
|
|
||||||
cu_seqlens_k=cu_seqlens_k,
|
|
||||||
max_seqlen_q=max_seqlen_q,
|
|
||||||
max_seqlen_k=max_seqlen_k,
|
|
||||||
is_causal=is_causal,
|
|
||||||
sm_scale=scaling,
|
|
||||||
tensor_layout="NHD",
|
|
||||||
)
|
|
||||||
|
|
||||||
from flash_attn.bert_padding import pad_input
|
|
||||||
|
|
||||||
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
|
||||||
else:
|
|
||||||
# Use standard sageattn
|
|
||||||
# The input layout for transformers models is (batch_size, num_heads, seq_len, head_dim),
|
|
||||||
# which corresponds to SageAttention's "HND" layout.
|
|
||||||
attn_output = sageattn(
|
|
||||||
q=query,
|
|
||||||
k=key,
|
|
||||||
v=value,
|
|
||||||
tensor_layout="HND",
|
|
||||||
is_causal=is_causal,
|
|
||||||
sm_scale=scaling,
|
|
||||||
)
|
|
||||||
|
|
||||||
# SageAttention with "HND" returns (batch, heads, seq_len, head_dim)
|
|
||||||
# Transformers expects (batch, seq_len, heads, head_dim) for the output
|
|
||||||
# So we need to transpose dimensions 1 and 2
|
|
||||||
attn_output = attn_output.transpose(1, 2).contiguous()
|
|
||||||
|
|
||||||
return attn_output, None
|
|
||||||
|
|
||||||
|
|
||||||
def patch_sageattn():
|
|
||||||
"""Patch SageAttention for use with transformers."""
|
|
||||||
|
|
||||||
_check_sageattn_imported()
|
|
||||||
|
|
||||||
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
||||||
|
|
||||||
# Replace flash attention with sage attention
|
|
||||||
ALL_ATTENTION_FUNCTIONS.register("flash_attention_2", sage_attention_forward)
|
|
||||||
|
|
||||||
# Note: New method after transformers refactor to use ALL_MASK_ATTENTION_FUNCTIONS
|
|
||||||
# Register sage_attention with the global attention interface
|
|
||||||
# ALL_ATTENTION_FUNCTIONS.register("sage_attention", sage_attention_forward)
|
|
||||||
|
|
||||||
# from transformers.masking_utils import ALL_MASK_ATTENTION_FUNCTIONS, flash_attention_mask
|
|
||||||
|
|
||||||
# ALL_MASK_ATTENTION_FUNCTIONS.register("sage_attention", flash_attention_mask)
|
|
||||||
|
|
||||||
LOG.info("SageAttention patched successfully")
|
|
||||||
@@ -59,12 +59,7 @@ class CPU_Offloaded_Gradient_Checkpointer(torch.autograd.Function):
|
|||||||
hidden_states = hidden_states.to("cuda", non_blocking=True).detach()
|
hidden_states = hidden_states.to("cuda", non_blocking=True).detach()
|
||||||
hidden_states.requires_grad = True
|
hidden_states.requires_grad = True
|
||||||
with torch.enable_grad():
|
with torch.enable_grad():
|
||||||
output = ctx.forward_function(hidden_states, *ctx.args)
|
(output,) = ctx.forward_function(hidden_states, *ctx.args)
|
||||||
# Newer HF models (e.g. Qwen3MoE) using GradientCheckpointingLayer
|
|
||||||
# return a plain tensor, not a tuple. Older models return tuples
|
|
||||||
# like (hidden_states, present_kv, ...). Unwrap if needed.
|
|
||||||
if isinstance(output, (tuple, list)):
|
|
||||||
(output,) = output
|
|
||||||
torch.autograd.backward(output, dY)
|
torch.autograd.backward(output, dY)
|
||||||
return (
|
return (
|
||||||
None,
|
None,
|
||||||
|
|||||||
@@ -169,8 +169,7 @@ def get_attention_cls_from_config(cfg: DictDefault) -> Type[nn.Module]:
|
|||||||
return attention_cls
|
return attention_cls
|
||||||
except (ImportError, AttributeError) as e:
|
except (ImportError, AttributeError) as e:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Axolotl could not import attention class for model_type: {model_type}. "
|
f"Could not import attention class for model_type: {model_type}. "
|
||||||
"Please raise an Issue and turn off lora kernels to continue training. "
|
|
||||||
f"Error: {str(e)}"
|
f"Error: {str(e)}"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
|
|||||||
@@ -1,51 +0,0 @@
|
|||||||
"""
|
|
||||||
eaft (entropy-aware focal training) loss implementation
|
|
||||||
weights examples by entropy approximation from top-k logits
|
|
||||||
|
|
||||||
Reference: https://github.com/ymxyll/LlamaFactory-EAFT/blob/e2ce19e8efcc226450ee8f2b81dfe4e69f1f945d/src/llamafactory/train/trainer_utils.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.nn.functional as F
|
|
||||||
|
|
||||||
|
|
||||||
def eaft_loss(outputs, labels, num_items_in_batch=None, alpha=1.0, k=20):
|
|
||||||
"""
|
|
||||||
compute eaft loss with entropy weighting
|
|
||||||
|
|
||||||
args:
|
|
||||||
outputs: model outputs containing logits
|
|
||||||
labels: target labels for computing loss
|
|
||||||
num_items_in_batch: for sample packing support
|
|
||||||
alpha: exponent for entropy weighting (default 1.0)
|
|
||||||
k: number of top logits for entropy approximation (default 20)
|
|
||||||
"""
|
|
||||||
logits = outputs.logits
|
|
||||||
|
|
||||||
shift_logits = logits[..., :-1, :].contiguous()
|
|
||||||
shift_labels = labels[..., 1:].contiguous()
|
|
||||||
|
|
||||||
vocab_size = shift_logits.size(-1)
|
|
||||||
shift_logits_view = shift_logits.view(-1, vocab_size)
|
|
||||||
shift_labels_view = shift_labels.view(-1)
|
|
||||||
|
|
||||||
mask = shift_labels_view != -100
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
top_k_logits, _ = torch.topk(
|
|
||||||
shift_logits_view[mask].float(), k=min(k, vocab_size), dim=-1
|
|
||||||
)
|
|
||||||
top_k_probs = F.softmax(top_k_logits, dim=-1)
|
|
||||||
entropy = -(top_k_probs * torch.log(top_k_probs + 1e-10)).sum(dim=-1)
|
|
||||||
weights = torch.pow(entropy, alpha)
|
|
||||||
|
|
||||||
loss_fct = torch.nn.CrossEntropyLoss(reduction="none")
|
|
||||||
per_token_loss = loss_fct(shift_logits_view[mask], shift_labels_view[mask])
|
|
||||||
weighted_loss = per_token_loss * weights
|
|
||||||
|
|
||||||
if num_items_in_batch is not None:
|
|
||||||
loss = weighted_loss.sum() / num_items_in_batch
|
|
||||||
else:
|
|
||||||
loss = weighted_loss.mean()
|
|
||||||
|
|
||||||
return loss
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
Monkeypatch to fix inefficient tensor conversion in MistralCommonBackend.apply_chat_template
|
Monkeypatch to fix inefficient tensor conversion in MistralCommonTokenizer.apply_chat_template
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
@@ -12,11 +12,11 @@ LOG = get_logger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def apply_mistral_tokenizer_image_patch():
|
def apply_mistral_tokenizer_image_patch():
|
||||||
"""Apply patch to MistralCommonBackend.apply_chat_template to fix image tensor conversion."""
|
"""Apply patch to MistralCommonTokenizer.apply_chat_template to fix image tensor conversion."""
|
||||||
from transformers.tokenization_mistral_common import MistralCommonBackend
|
from transformers.tokenization_mistral_common import MistralCommonTokenizer
|
||||||
|
|
||||||
# Get original source
|
# Get original source
|
||||||
original_source = inspect.getsource(MistralCommonBackend.apply_chat_template)
|
original_source = inspect.getsource(MistralCommonTokenizer.apply_chat_template)
|
||||||
original_source, _ = detab_code(original_source)
|
original_source, _ = detab_code(original_source)
|
||||||
|
|
||||||
# Define the replacement
|
# Define the replacement
|
||||||
@@ -41,7 +41,7 @@ def apply_mistral_tokenizer_image_patch():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Load necessary imports from the module
|
# Load necessary imports from the module
|
||||||
module_name = MistralCommonBackend.__module__
|
module_name = MistralCommonTokenizer.__module__
|
||||||
module = importlib.import_module(module_name)
|
module = importlib.import_module(module_name)
|
||||||
|
|
||||||
# Detect what needs to be imported
|
# Detect what needs to be imported
|
||||||
@@ -79,7 +79,7 @@ def apply_mistral_tokenizer_image_patch():
|
|||||||
exec(patched_source, globals()) # nosec B102
|
exec(patched_source, globals()) # nosec B102
|
||||||
|
|
||||||
# Replace the method
|
# Replace the method
|
||||||
MistralCommonBackend.apply_chat_template = patched_apply_chat_template
|
MistralCommonTokenizer.apply_chat_template = patched_apply_chat_template
|
||||||
LOG.info("Successfully applied MistralCommonBackend tensor conversion patch")
|
LOG.info("Successfully applied MistralCommonTokenizer tensor conversion patch")
|
||||||
else:
|
else:
|
||||||
LOG.warning("Could not find target code for MistralCommonBackend patching")
|
LOG.warning("Could not find target code for MistralCommonTokenizer patching")
|
||||||
|
|||||||
@@ -155,6 +155,7 @@ class ReLoRACallback(TrainerCallback):
|
|||||||
f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}",
|
f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}",
|
||||||
"adapter",
|
"adapter",
|
||||||
),
|
),
|
||||||
|
safe_serialization=True,
|
||||||
)
|
)
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
merge_and_save(
|
merge_and_save(
|
||||||
@@ -213,7 +214,7 @@ class ReLoRACallback(TrainerCallback):
|
|||||||
|
|
||||||
self.last_full_model = checkpoint_folder
|
self.last_full_model = checkpoint_folder
|
||||||
else:
|
else:
|
||||||
model.model.save_pretrained(checkpoint_folder)
|
model.model.save_pretrained(checkpoint_folder, safe_serialization=True)
|
||||||
|
|
||||||
return control
|
return control
|
||||||
|
|
||||||
|
|||||||
@@ -52,15 +52,9 @@ def patch_prepare_context_parallel_inputs() -> None:
|
|||||||
if item in patched_source:
|
if item in patched_source:
|
||||||
items_to_import.append(item)
|
items_to_import.append(item)
|
||||||
|
|
||||||
# Use a separate namespace to capture the exec'd function
|
exec(f"from {module_name} import ({', '.join(items_to_import)})", globals())
|
||||||
namespace = {}
|
exec(patched_source, globals())
|
||||||
exec(f"from {module_name} import ({', '.join(items_to_import)})", namespace)
|
|
||||||
exec(patched_source, namespace)
|
|
||||||
|
|
||||||
# Explicitly get the function from the namespace
|
|
||||||
axolotl_prepare_context_parallel_inputs = namespace[
|
|
||||||
"axolotl_prepare_context_parallel_inputs"
|
|
||||||
]
|
|
||||||
Trainer._original_prepare_context_parallel_inputs = (
|
Trainer._original_prepare_context_parallel_inputs = (
|
||||||
Trainer._prepare_context_parallel_inputs
|
Trainer._prepare_context_parallel_inputs
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -28,12 +28,8 @@ PATCHED_EVAL_CODE = {
|
|||||||
"array": 'metrics[f"{metric_key_prefix}_loss"] = np.nanmean(all_losses).item()',
|
"array": 'metrics[f"{metric_key_prefix}_loss"] = np.nanmean(all_losses).item()',
|
||||||
}
|
}
|
||||||
|
|
||||||
ORIGINAL_MAYBE_CODE = (
|
ORIGINAL_MAYBE_CODE = "tr_loss_scalar = self._nested_gather(tr_loss).mean().item()"
|
||||||
"tr_loss_scalar = nested_gather(tr_loss, self.args.parallel_mode).mean().item()"
|
PATCHED_MAYBE_CODE = "tr_loss_scalar = self._nested_gather(tr_loss).nanmean().item()"
|
||||||
)
|
|
||||||
PATCHED_MAYBE_CODE = (
|
|
||||||
"tr_loss_scalar = nested_gather(tr_loss, self.args.parallel_mode).nanmean().item()"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_evaluation_loop_is_patchable() -> bool:
|
def check_evaluation_loop_is_patchable() -> bool:
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ from transformers.models.voxtral import VoxtralProcessor
|
|||||||
|
|
||||||
from axolotl.utils.dict import remove_none_values
|
from axolotl.utils.dict import remove_none_values
|
||||||
from axolotl.utils.logging import get_logger
|
from axolotl.utils.logging import get_logger
|
||||||
|
from axolotl.utils.mistral.mistral3_processor import Mistral3Processor
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
@@ -429,7 +430,7 @@ class Mistral3ProcessingStrategy(ProcessingStrategy):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
processor,
|
processor: Mistral3Processor,
|
||||||
chat_template: Optional[str] = None,
|
chat_template: Optional[str] = None,
|
||||||
image_size: int | tuple[int, int] | None = None,
|
image_size: int | tuple[int, int] | None = None,
|
||||||
image_resize_algorithm: Resampling | None = None,
|
image_resize_algorithm: Resampling | None = None,
|
||||||
@@ -485,58 +486,6 @@ class InternVLProcessingStrategy(ProcessingStrategy):
|
|||||||
return labels
|
return labels
|
||||||
|
|
||||||
|
|
||||||
class Glm4vProcessingStrategy(ProcessingStrategy):
|
|
||||||
"""Processing Strategy class for GLM4V and GLM4V-MoE vision models."""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
processor: ProcessorMixin,
|
|
||||||
chat_template: Optional[str] = None,
|
|
||||||
image_size: int | tuple[int, int] | None = None,
|
|
||||||
image_resize_algorithm: Resampling | None = None,
|
|
||||||
):
|
|
||||||
super().__init__(processor, chat_template, image_size, image_resize_algorithm)
|
|
||||||
|
|
||||||
self.tokenizer = getattr(processor, "tokenizer", processor)
|
|
||||||
|
|
||||||
self.image_token = "<|image|>" # nosec
|
|
||||||
self.begin_image_token = "<|begin_of_image|>" # nosec
|
|
||||||
self.end_image_token = "<|end_of_image|>" # nosec
|
|
||||||
self.video_token = "<|video|>" # nosec
|
|
||||||
self.begin_video_token = "<|begin_of_video|>" # nosec
|
|
||||||
self.end_video_token = "<|end_of_video|>" # nosec
|
|
||||||
|
|
||||||
self.image_token_id = self.tokenizer.convert_tokens_to_ids(self.image_token)
|
|
||||||
self.begin_image_token_id = self.tokenizer.convert_tokens_to_ids(
|
|
||||||
self.begin_image_token
|
|
||||||
)
|
|
||||||
self.end_image_token_id = self.tokenizer.convert_tokens_to_ids(
|
|
||||||
self.end_image_token
|
|
||||||
)
|
|
||||||
self.video_token_id = self.tokenizer.convert_tokens_to_ids(self.video_token)
|
|
||||||
self.begin_video_token_id = self.tokenizer.convert_tokens_to_ids(
|
|
||||||
self.begin_video_token
|
|
||||||
)
|
|
||||||
self.end_video_token_id = self.tokenizer.convert_tokens_to_ids(
|
|
||||||
self.end_video_token
|
|
||||||
)
|
|
||||||
|
|
||||||
def process_labels(self, input_ids):
|
|
||||||
labels = input_ids.clone()
|
|
||||||
|
|
||||||
labels[labels == self.tokenizer.pad_token_id] = -100
|
|
||||||
|
|
||||||
labels[labels == self.image_token_id] = -100
|
|
||||||
labels[labels == self.begin_image_token_id] = -100
|
|
||||||
labels[labels == self.end_image_token_id] = -100
|
|
||||||
|
|
||||||
labels[labels == self.video_token_id] = -100
|
|
||||||
labels[labels == self.begin_video_token_id] = -100
|
|
||||||
labels[labels == self.end_video_token_id] = -100
|
|
||||||
|
|
||||||
return labels
|
|
||||||
|
|
||||||
|
|
||||||
def get_processing_strategy(
|
def get_processing_strategy(
|
||||||
processor: ProcessorMixin,
|
processor: ProcessorMixin,
|
||||||
chat_template,
|
chat_template,
|
||||||
@@ -544,8 +493,6 @@ def get_processing_strategy(
|
|||||||
image_size: int | tuple[int, int] | None = None,
|
image_size: int | tuple[int, int] | None = None,
|
||||||
image_resize_algorithm: Resampling | None = None,
|
image_resize_algorithm: Resampling | None = None,
|
||||||
):
|
):
|
||||||
from axolotl.utils.mistral.mistral3_processor import Mistral3Processor
|
|
||||||
|
|
||||||
processing_kwargs = {
|
processing_kwargs = {
|
||||||
"processor": processor,
|
"processor": processor,
|
||||||
"chat_template": chat_template,
|
"chat_template": chat_template,
|
||||||
@@ -553,10 +500,10 @@ def get_processing_strategy(
|
|||||||
"image_resize_algorithm": image_resize_algorithm,
|
"image_resize_algorithm": image_resize_algorithm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if chat_template_type in [None, "tokenizer_default"]:
|
if chat_template_type in [None, "tokenizer_default"] and hasattr(
|
||||||
tokenizer = getattr(processor, "tokenizer", processor)
|
processor.tokenizer, "chat_template"
|
||||||
if hasattr(tokenizer, "chat_template"):
|
):
|
||||||
processing_kwargs["chat_template"] = tokenizer.chat_template
|
processing_kwargs["chat_template"] = processor.tokenizer.chat_template
|
||||||
|
|
||||||
if chat_template_type == "qwen2_vl":
|
if chat_template_type == "qwen2_vl":
|
||||||
return Qwen2VLProcessingStrategy(
|
return Qwen2VLProcessingStrategy(
|
||||||
@@ -585,15 +532,6 @@ def get_processing_strategy(
|
|||||||
return Mistral3ProcessingStrategy(
|
return Mistral3ProcessingStrategy(
|
||||||
**processing_kwargs,
|
**processing_kwargs,
|
||||||
)
|
)
|
||||||
try:
|
|
||||||
from transformers.models.glm46v.processing_glm46v import Glm46VProcessor
|
|
||||||
|
|
||||||
if isinstance(processor, Glm46VProcessor):
|
|
||||||
return Glm4vProcessingStrategy(
|
|
||||||
**processing_kwargs,
|
|
||||||
)
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if isinstance(processor, InternVLProcessor):
|
if isinstance(processor, InternVLProcessor):
|
||||||
return InternVLProcessingStrategy(
|
return InternVLProcessingStrategy(
|
||||||
|
|||||||
@@ -150,8 +150,6 @@ class ChatTemplatePrompter(Prompter):
|
|||||||
|
|
||||||
return self.tokenizer.apply_chat_template(
|
return self.tokenizer.apply_chat_template(
|
||||||
conversation,
|
conversation,
|
||||||
tokenize=True,
|
|
||||||
return_dict=False,
|
|
||||||
**chat_template_kwargs,
|
**chat_template_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -153,27 +153,13 @@ class TelemetryCallback(TrainerCallback):
|
|||||||
self.last_report_step = step
|
self.last_report_step = step
|
||||||
|
|
||||||
def _extract_last_metrics(self, state: TrainerState) -> dict:
|
def _extract_last_metrics(self, state: TrainerState) -> dict:
|
||||||
"""Extract last loss, learning_rate, grad_norm, and token metrics from log history."""
|
"""Extract last loss, learning_rate, and grad_norm from log history."""
|
||||||
if not state.log_history:
|
if not state.log_history:
|
||||||
return {
|
return {"loss": 0, "learning_rate": 0, "grad_norm": 0}
|
||||||
"loss": 0,
|
|
||||||
"ppl": 0,
|
|
||||||
"learning_rate": 0,
|
|
||||||
"grad_norm": 0,
|
|
||||||
"tokens/total": 0,
|
|
||||||
"tokens/trainable": 0,
|
|
||||||
"tokens/train_per_sec_per_gpu": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
last_log = state.log_history[-1]
|
last_log = state.log_history[-1]
|
||||||
return {
|
return {
|
||||||
"loss": last_log.get("loss", 0),
|
"loss": last_log.get("loss", 0),
|
||||||
"ppl": last_log.get("ppl", 0),
|
|
||||||
"learning_rate": last_log.get("learning_rate", 0),
|
"learning_rate": last_log.get("learning_rate", 0),
|
||||||
"grad_norm": last_log.get("grad_norm", 0),
|
"grad_norm": last_log.get("grad_norm", 0),
|
||||||
"tokens/total": last_log.get("tokens/total", 0),
|
|
||||||
"tokens/trainable": last_log.get("tokens/trainable", 0),
|
|
||||||
"tokens/train_per_sec_per_gpu": last_log.get(
|
|
||||||
"tokens/train_per_sec_per_gpu", 0
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -155,10 +155,6 @@ def send_errors(func: Callable) -> Callable:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.error(
|
|
||||||
f"Error captured in telemetry. Run ID: {telemetry_manager.run_id}"
|
|
||||||
)
|
|
||||||
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import importlib
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
@@ -19,6 +20,21 @@ LOG = logging.getLogger(__name__)
|
|||||||
POSTHOG_HOST = "https://app.posthog.com"
|
POSTHOG_HOST = "https://app.posthog.com"
|
||||||
POSTHOG_WRITE_KEY = "phc_1kUR0o04oJKKTTeSsIz2Mfm5mpiVsQEf2WOlzljMD7y"
|
POSTHOG_WRITE_KEY = "phc_1kUR0o04oJKKTTeSsIz2Mfm5mpiVsQEf2WOlzljMD7y"
|
||||||
|
|
||||||
|
OPT_OUT_WARNING_SLEEP_SECONDS = 10
|
||||||
|
OPT_OUT_WARNING = (
|
||||||
|
"\nTelemetry is now enabled by default to help improve Axolotl. "
|
||||||
|
"If you'd like to disable it, set AXOLOTL_DO_NOT_TRACK=1 in your environment.\n\n"
|
||||||
|
"Telemetry data helps us understand:\n"
|
||||||
|
"- Which features are most used\n"
|
||||||
|
"- What hardware configurations to prioritize\n"
|
||||||
|
"- Where users encounter errors\n\n"
|
||||||
|
"Personally identifiable information (PII) is not collected.\n\n"
|
||||||
|
"To remove this warning, explicitly set AXOLOTL_DO_NOT_TRACK=0 (enable telemetry) "
|
||||||
|
"or AXOLOTL_DO_NOT_TRACK=1 (disable telemetry).\n\n"
|
||||||
|
"For details, see: https://docs.axolotl.ai/docs/telemetry.html\n\n"
|
||||||
|
f"Sleeping for {OPT_OUT_WARNING_SLEEP_SECONDS}s..."
|
||||||
|
)
|
||||||
|
|
||||||
WHITELIST_PATH = str(Path(__file__).parent / "whitelist.yaml")
|
WHITELIST_PATH = str(Path(__file__).parent / "whitelist.yaml")
|
||||||
|
|
||||||
# NOTE: Need to keep these up to date with any config schema changes
|
# NOTE: Need to keep these up to date with any config schema changes
|
||||||
@@ -30,8 +46,8 @@ FIELDS_TO_REDACT = {
|
|||||||
"resume_from_checkpoint",
|
"resume_from_checkpoint",
|
||||||
"hub_model_id",
|
"hub_model_id",
|
||||||
}
|
}
|
||||||
PREFIXES_TO_REDACT = {"wandb_", "comet_", "mlflow_", "gradio_", "trackio_", "swanlab_"}
|
PREFIXES_TO_REDACT = {"wandb_", "comet_", "mlflow_", "gradio_"}
|
||||||
PATH_INDICATORS = {"path", "dir", "data_files"}
|
PATH_INDICATORS = {"path", "dir"}
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
RELEVANT_PACKAGES = {
|
RELEVANT_PACKAGES = {
|
||||||
@@ -167,6 +183,11 @@ class TelemetryManager:
|
|||||||
"false",
|
"false",
|
||||||
"true",
|
"true",
|
||||||
):
|
):
|
||||||
|
# Print opt-out info message for main process only
|
||||||
|
if is_main_process():
|
||||||
|
LOG.warning(OPT_OUT_WARNING)
|
||||||
|
time.sleep(OPT_OUT_WARNING_SLEEP_SECONDS)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Only rank 0 will send telemetry
|
# Only rank 0 will send telemetry
|
||||||
|
|||||||
@@ -31,10 +31,3 @@ organizations:
|
|||||||
- "mistral-community"
|
- "mistral-community"
|
||||||
- "llava-hf"
|
- "llava-hf"
|
||||||
- "ByteDance-Seed"
|
- "ByteDance-Seed"
|
||||||
- "ACE-Step"
|
|
||||||
- "openbmb"
|
|
||||||
- "MiniMaxAI"
|
|
||||||
- "stepfun-ai"
|
|
||||||
- "internlm"
|
|
||||||
- "katanemo"
|
|
||||||
- "XiaomiMiMo"
|
|
||||||
|
|||||||
@@ -135,13 +135,16 @@ def setup_reference_model(
|
|||||||
return model_ref
|
return model_ref
|
||||||
|
|
||||||
|
|
||||||
def setup_signal_handler(cfg: DictDefault, model: PreTrainedModel):
|
def setup_signal_handler(
|
||||||
|
cfg: DictDefault, model: PreTrainedModel, safe_serialization: bool
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Set up signal handler for graceful termination.
|
Set up signal handler for graceful termination.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
model: The model to save on termination
|
model: The model to save on termination
|
||||||
|
safe_serialization: Whether to use safe serialization when saving
|
||||||
"""
|
"""
|
||||||
# ray workers don't have access to this signal
|
# ray workers don't have access to this signal
|
||||||
if cfg.local_rank == 0 and not cfg.use_ray:
|
if cfg.local_rank == 0 and not cfg.use_ray:
|
||||||
@@ -149,7 +152,9 @@ def setup_signal_handler(cfg: DictDefault, model: PreTrainedModel):
|
|||||||
def terminate_handler(_, __, model_weakref):
|
def terminate_handler(_, __, model_weakref):
|
||||||
if model_weakref() is not None:
|
if model_weakref() is not None:
|
||||||
_model = model_weakref()
|
_model = model_weakref()
|
||||||
_model.save_pretrained(cfg.output_dir)
|
_model.save_pretrained(
|
||||||
|
cfg.output_dir, safe_serialization=safe_serialization
|
||||||
|
)
|
||||||
|
|
||||||
cleanup_distributed()
|
cleanup_distributed()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@@ -214,6 +219,7 @@ def save_trained_model(
|
|||||||
cfg: DictDefault,
|
cfg: DictDefault,
|
||||||
trainer: Any,
|
trainer: Any,
|
||||||
model: PreTrainedModel,
|
model: PreTrainedModel,
|
||||||
|
safe_serialization: bool,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Save the trained model according to configuration and training setup.
|
Save the trained model according to configuration and training setup.
|
||||||
@@ -222,6 +228,7 @@ def save_trained_model(
|
|||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
trainer: The trainer object.
|
trainer: The trainer object.
|
||||||
model: The trained model to save.
|
model: The trained model to save.
|
||||||
|
safe_serialization: Whether to use safe serialization.
|
||||||
"""
|
"""
|
||||||
LOG.info(f"Training completed! Saving trained model to {cfg.output_dir}.")
|
LOG.info(f"Training completed! Saving trained model to {cfg.output_dir}.")
|
||||||
|
|
||||||
@@ -276,6 +283,7 @@ def save_trained_model(
|
|||||||
merge_fsdp_weights(
|
merge_fsdp_weights(
|
||||||
checkpoint_dir=str(fsdp_dir),
|
checkpoint_dir=str(fsdp_dir),
|
||||||
output_path=merged_path,
|
output_path=merged_path,
|
||||||
|
safe_serialization=True,
|
||||||
)
|
)
|
||||||
trainer.accelerator.wait_for_everyone()
|
trainer.accelerator.wait_for_everyone()
|
||||||
if trainer.accelerator.is_main_process:
|
if trainer.accelerator.is_main_process:
|
||||||
@@ -322,9 +330,11 @@ def save_trained_model(
|
|||||||
pass
|
pass
|
||||||
elif cfg.local_rank == 0:
|
elif cfg.local_rank == 0:
|
||||||
if cfg.rl and cfg.adapter and not cfg.rl_adapter_ref_model:
|
if cfg.rl and cfg.adapter and not cfg.rl_adapter_ref_model:
|
||||||
trainer.model.save_pretrained(cfg.output_dir)
|
trainer.model.save_pretrained(
|
||||||
|
cfg.output_dir, safe_serialization=safe_serialization
|
||||||
|
)
|
||||||
|
|
||||||
model.save_pretrained(cfg.output_dir)
|
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
||||||
|
|
||||||
if hasattr(cfg, "llmcompressor") and cfg.llmcompressor:
|
if hasattr(cfg, "llmcompressor") and cfg.llmcompressor:
|
||||||
# TODO: add integration support so this can be implemented completely within the plugin
|
# TODO: add integration support so this can be implemented completely within the plugin
|
||||||
@@ -334,6 +344,7 @@ def save_trained_model(
|
|||||||
model=model,
|
model=model,
|
||||||
output_dir=cfg.output_dir,
|
output_dir=cfg.output_dir,
|
||||||
trainer=trainer,
|
trainer=trainer,
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
save_compressed=cfg.llmcompressor.save_compressed,
|
save_compressed=cfg.llmcompressor.save_compressed,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -438,6 +449,7 @@ def handle_untrained_tokens_fix(
|
|||||||
model: PreTrainedModel,
|
model: PreTrainedModel,
|
||||||
tokenizer: PreTrainedTokenizer,
|
tokenizer: PreTrainedTokenizer,
|
||||||
train_dataset: Dataset,
|
train_dataset: Dataset,
|
||||||
|
safe_serialization: bool,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Apply fixes for untrained tokens if configured.
|
Apply fixes for untrained tokens if configured.
|
||||||
@@ -447,6 +459,7 @@ def handle_untrained_tokens_fix(
|
|||||||
model: The model to apply fixes to.
|
model: The model to apply fixes to.
|
||||||
tokenizer: The tokenizer for token identification.
|
tokenizer: The tokenizer for token identification.
|
||||||
train_dataset: The training dataset to use.
|
train_dataset: The training dataset to use.
|
||||||
|
safe_serialization: Whether to use safe serialization when saving.
|
||||||
"""
|
"""
|
||||||
if not cfg.fix_untrained_tokens:
|
if not cfg.fix_untrained_tokens:
|
||||||
return
|
return
|
||||||
@@ -470,7 +483,9 @@ def handle_untrained_tokens_fix(
|
|||||||
fix_untrained_tokens(model, tokenizer, train_dataset, **fix_kwargs)
|
fix_untrained_tokens(model, tokenizer, train_dataset, **fix_kwargs)
|
||||||
|
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
model.save_pretrained(str(Path(cfg.output_dir)))
|
model.save_pretrained(
|
||||||
|
str(Path(cfg.output_dir)), safe_serialization=safe_serialization
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def setup_model_and_trainer(
|
def setup_model_and_trainer(
|
||||||
@@ -567,12 +582,15 @@ def train(
|
|||||||
) = setup_model_and_trainer(cfg, dataset_meta)
|
) = setup_model_and_trainer(cfg, dataset_meta)
|
||||||
|
|
||||||
# Handle untrained tokens if configured
|
# Handle untrained tokens if configured
|
||||||
|
safe_serialization = cfg.save_safetensors is True
|
||||||
train_dataset = dataset_meta.train_dataset
|
train_dataset = dataset_meta.train_dataset
|
||||||
handle_untrained_tokens_fix(cfg, model, tokenizer, train_dataset)
|
handle_untrained_tokens_fix(
|
||||||
|
cfg, model, tokenizer, train_dataset, safe_serialization
|
||||||
|
)
|
||||||
|
|
||||||
# Additional setup
|
# Additional setup
|
||||||
save_initial_configs(cfg, tokenizer, model, peft_config, processor)
|
save_initial_configs(cfg, tokenizer, model, peft_config, processor)
|
||||||
setup_signal_handler(cfg, model)
|
setup_signal_handler(cfg, model, safe_serialization)
|
||||||
setup_model_card(cfg)
|
setup_model_card(cfg)
|
||||||
|
|
||||||
# Execute the training
|
# Execute the training
|
||||||
@@ -584,7 +602,7 @@ def train(
|
|||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
# Save the trained model and cleanup
|
# Save the trained model and cleanup
|
||||||
save_trained_model(cfg, trainer, model)
|
save_trained_model(cfg, trainer, model, safe_serialization)
|
||||||
tokenizer.save_pretrained(
|
tokenizer.save_pretrained(
|
||||||
str(Path(cfg.output_dir)), save_jinja_files=cfg.tokenizer_save_jinja_files
|
str(Path(cfg.output_dir)), save_jinja_files=cfg.tokenizer_save_jinja_files
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -7,10 +7,6 @@ from torch import Tensor
|
|||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from transformers.modeling_outputs import CausalLMOutput
|
from transformers.modeling_outputs import CausalLMOutput
|
||||||
from transformers.modeling_utils import PreTrainedModel
|
from transformers.modeling_utils import PreTrainedModel
|
||||||
|
|
||||||
try:
|
|
||||||
from transformers.tokenization_python import PreTrainedTokenizer
|
|
||||||
except ImportError:
|
|
||||||
from transformers.tokenization_utils import PreTrainedTokenizer
|
from transformers.tokenization_utils import PreTrainedTokenizer
|
||||||
|
|
||||||
from axolotl.utils.distributed import is_main_process
|
from axolotl.utils.distributed import is_main_process
|
||||||
|
|||||||
@@ -78,19 +78,12 @@ class TokensPerSecondCallback(TrainerCallback):
|
|||||||
**kwargs,
|
**kwargs,
|
||||||
): # pylint: disable=unused-argument
|
): # pylint: disable=unused-argument
|
||||||
tokens = getattr(state, "tokens", None)
|
tokens = getattr(state, "tokens", None)
|
||||||
if not (tokens and "trainable_tokens" in tokens):
|
if tokens and "trainable_tokens" in tokens:
|
||||||
return
|
|
||||||
step_time = time.perf_counter() - self.start_time
|
step_time = time.perf_counter() - self.start_time
|
||||||
if step_time <= 0:
|
num_tokens_per_device = tokens["trainable_tokens"].clone()
|
||||||
return
|
# non data parallel groups have duplicated tokens, so we avoid double-counting
|
||||||
|
num_tokens_per_device = num_tokens_per_device / self.non_data_parallel_size
|
||||||
num_tokens = tokens["trainable_tokens"].clone() / self.non_data_parallel_size
|
state.last_tokens_per_second = num_tokens_per_device / step_time
|
||||||
if torch.distributed.is_initialized():
|
|
||||||
dp_size = max(
|
|
||||||
1, torch.distributed.get_world_size() // self.non_data_parallel_size
|
|
||||||
)
|
|
||||||
num_tokens = num_tokens / dp_size
|
|
||||||
state.last_tokens_per_second = num_tokens / step_time
|
|
||||||
|
|
||||||
def on_log(
|
def on_log(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@@ -218,9 +218,6 @@ class SequenceParallelContextManager:
|
|||||||
self.original_seq_len = 0
|
self.original_seq_len = 0
|
||||||
self.pad_len = 0
|
self.pad_len = 0
|
||||||
|
|
||||||
# Track local valid token count for eval loss correction across CP ranks
|
|
||||||
self._local_valid_tokens: torch.Tensor | None = None
|
|
||||||
|
|
||||||
# Create a partially applied version of the apply_sequence_parallelism function
|
# Create a partially applied version of the apply_sequence_parallelism function
|
||||||
self.apply_sequence_parallelism = functools.partial(
|
self.apply_sequence_parallelism = functools.partial(
|
||||||
apply_sequence_parallelism,
|
apply_sequence_parallelism,
|
||||||
@@ -273,18 +270,6 @@ class SequenceParallelContextManager:
|
|||||||
self.apply_sequence_parallelism(updated_kwargs)
|
self.apply_sequence_parallelism(updated_kwargs)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Track local valid tokens for eval loss correction
|
|
||||||
if "labels" in updated_kwargs and not self.models[0].training:
|
|
||||||
self._local_valid_tokens = (
|
|
||||||
(updated_kwargs["labels"] != -100).sum().float()
|
|
||||||
)
|
|
||||||
# Strip num_items_in_batch during eval so the model uses
|
|
||||||
# reduction='mean', allowing the post-hook weighted all-reduce
|
|
||||||
# formula (loss * local_valid) to correctly recover the loss sum
|
|
||||||
updated_kwargs.pop("num_items_in_batch", None)
|
|
||||||
else:
|
|
||||||
self._local_valid_tokens = None
|
|
||||||
|
|
||||||
return remaining_args, updated_kwargs
|
return remaining_args, updated_kwargs
|
||||||
|
|
||||||
# Forward post-hook to gather outputs
|
# Forward post-hook to gather outputs
|
||||||
@@ -302,44 +287,6 @@ class SequenceParallelContextManager:
|
|||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
# Post-hook to correct eval loss via weighted all-reduce across CP ranks
|
|
||||||
def eval_loss_correction_post_hook(_, __, output: ModelOutput) -> ModelOutput:
|
|
||||||
if self._local_valid_tokens is None:
|
|
||||||
return output
|
|
||||||
if not hasattr(output, "loss") or output.loss is None:
|
|
||||||
return output
|
|
||||||
|
|
||||||
local_valid = self._local_valid_tokens.to(output.loss.device)
|
|
||||||
loss = output.loss.detach().clone()
|
|
||||||
|
|
||||||
# Handle rank with zero valid tokens (loss is NaN)
|
|
||||||
if local_valid.item() == 0:
|
|
||||||
weighted_loss = torch.zeros(1, device=loss.device, dtype=loss.dtype)
|
|
||||||
else:
|
|
||||||
weighted_loss = loss * local_valid
|
|
||||||
|
|
||||||
total_valid = local_valid.clone()
|
|
||||||
dist.all_reduce(
|
|
||||||
weighted_loss,
|
|
||||||
op=dist.ReduceOp.SUM,
|
|
||||||
group=self.process_group,
|
|
||||||
)
|
|
||||||
dist.all_reduce(
|
|
||||||
total_valid,
|
|
||||||
op=dist.ReduceOp.SUM,
|
|
||||||
group=self.process_group,
|
|
||||||
)
|
|
||||||
|
|
||||||
if total_valid.item() > 0:
|
|
||||||
output["loss"] = (weighted_loss / total_valid).squeeze()
|
|
||||||
else:
|
|
||||||
output["loss"] = torch.tensor(
|
|
||||||
float("nan"), device=loss.device, dtype=loss.dtype
|
|
||||||
)
|
|
||||||
|
|
||||||
self._local_valid_tokens = None
|
|
||||||
return output
|
|
||||||
|
|
||||||
# Register hooks
|
# Register hooks
|
||||||
for model in self.models:
|
for model in self.models:
|
||||||
self.hook_handles.append(
|
self.hook_handles.append(
|
||||||
@@ -351,10 +298,6 @@ class SequenceParallelContextManager:
|
|||||||
self.hook_handles.append(
|
self.hook_handles.append(
|
||||||
model.register_forward_hook(sequence_parallel_post_hook)
|
model.register_forward_hook(sequence_parallel_post_hook)
|
||||||
)
|
)
|
||||||
# Always register eval loss correction hook
|
|
||||||
self.hook_handles.append(
|
|
||||||
model.register_forward_hook(eval_loss_correction_post_hook)
|
|
||||||
)
|
|
||||||
|
|
||||||
def _gather_outputs(self, output: CausalLMOutputWithPast) -> CausalLMOutputWithPast:
|
def _gather_outputs(self, output: CausalLMOutputWithPast) -> CausalLMOutputWithPast:
|
||||||
"""Gather sharded outputs from all ranks and reconstruct the full tensor."""
|
"""Gather sharded outputs from all ranks and reconstruct the full tensor."""
|
||||||
|
|||||||
@@ -173,7 +173,7 @@ def _drop_long_sequences(
|
|||||||
|
|
||||||
return (len_prompt + len_completion) <= sequence_len
|
return (len_prompt + len_completion) <= sequence_len
|
||||||
|
|
||||||
if rl in {RLType.GRPO, RLType.GDPO}:
|
if rl is RLType.GRPO:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
raise ValueError("Unknown RL type")
|
raise ValueError("Unknown RL type")
|
||||||
|
|||||||
@@ -2,19 +2,11 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from axolotl.utils.logging import get_logger
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_process_count():
|
def get_default_process_count():
|
||||||
if axolotl_dataset_num_proc := os.environ.get("AXOLOTL_DATASET_NUM_PROC"):
|
if axolotl_dataset_num_proc := os.environ.get("AXOLOTL_DATASET_NUM_PROC"):
|
||||||
return int(axolotl_dataset_num_proc)
|
return int(axolotl_dataset_num_proc)
|
||||||
if axolotl_dataset_processes := os.environ.get("AXOLOTL_DATASET_PROCESSES"):
|
if axolotl_dataset_processes := os.environ.get("AXOLOTL_DATASET_PROCESSES"):
|
||||||
LOG.warning(
|
|
||||||
"AXOLOTL_DATASET_PROCESSES and `dataset_processes` are deprecated and will be "
|
|
||||||
"removed in a future version. Please use `dataset_num_proc` instead."
|
|
||||||
)
|
|
||||||
return int(axolotl_dataset_processes)
|
return int(axolotl_dataset_processes)
|
||||||
if runpod_cpu_count := os.environ.get("RUNPOD_CPU_COUNT"):
|
if runpod_cpu_count := os.environ.get("RUNPOD_CPU_COUNT"):
|
||||||
return int(runpod_cpu_count)
|
return int(runpod_cpu_count)
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ import numpy as np
|
|||||||
from mistral_common.protocol.instruct.validator import ValidationMode
|
from mistral_common.protocol.instruct.validator import ValidationMode
|
||||||
from mistral_common.tokens.tokenizers.utils import download_tokenizer_from_hf_hub
|
from mistral_common.tokens.tokenizers.utils import download_tokenizer_from_hf_hub
|
||||||
from torch import Tensor
|
from torch import Tensor
|
||||||
from transformers.tokenization_mistral_common import MistralCommonBackend
|
from transformers.tokenization_mistral_common import MistralCommonTokenizer
|
||||||
from transformers.tokenization_utils_base import VERY_LARGE_INTEGER
|
from transformers.tokenization_utils_base import VERY_LARGE_INTEGER
|
||||||
|
|
||||||
|
|
||||||
class HFMistralTokenizer(MistralCommonBackend):
|
class HFMistralTokenizer(MistralCommonTokenizer):
|
||||||
"""
|
"""
|
||||||
Wraps mistral_common.tokens.tokenizers.mistral.MistralTokenizer
|
Wraps mistral_common.tokens.tokenizers.mistral.MistralTokenizer
|
||||||
and exposes HuggingFace API for special tokens.
|
and exposes HuggingFace API for special tokens.
|
||||||
@@ -37,19 +37,11 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
def name_or_path(self) -> str:
|
def name_or_path(self) -> str:
|
||||||
return self._name_or_path
|
return self._name_or_path
|
||||||
|
|
||||||
@name_or_path.setter
|
|
||||||
def name_or_path(self, name_or_path: str) -> None:
|
|
||||||
self._name_or_path = name_or_path
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def chat_template(self) -> str | None:
|
def chat_template(self) -> str | None:
|
||||||
"""Chat template is not supported. Dummy method to satisfy HuggingFace API."""
|
"""Chat template is not supported. Dummy method to satisfy HuggingFace API."""
|
||||||
return "[This is a dummy chat template]"
|
return "[This is a dummy chat template]"
|
||||||
|
|
||||||
@chat_template.setter
|
|
||||||
def chat_template(self, chat_template: str | None) -> None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _set_mode(self, mode: ValidationMode):
|
def _set_mode(self, mode: ValidationMode):
|
||||||
"""Set the mode of the MistralRequestValidator.
|
"""Set the mode of the MistralRequestValidator.
|
||||||
|
|
||||||
@@ -86,15 +78,15 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
add_generation_prompt: bool = False,
|
add_generation_prompt: bool = False,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> str | list[int]:
|
) -> str | list[int]:
|
||||||
"""Patched fn to handle setting test mode, remove chat_template and add_generation_prompt kwarg"""
|
"""Patched fn to handle setting serving mode, continue_final_message, remove chat_template and add_generation_prompt kwarg"""
|
||||||
|
|
||||||
# pop unnecessary kwarg for mistral
|
# pop unnecessary kwarg for mistral
|
||||||
kwargs.pop("real_last_index", None)
|
kwargs.pop("real_last_index", None)
|
||||||
kwargs.pop("add_special_tokens", None)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if add_generation_prompt:
|
if add_generation_prompt:
|
||||||
self._set_mode(ValidationMode.test)
|
self._set_mode(ValidationMode.serving)
|
||||||
|
kwargs["continue_final_message"] = True
|
||||||
|
|
||||||
out = super().apply_chat_template(conversation, **kwargs)
|
out = super().apply_chat_template(conversation, **kwargs)
|
||||||
|
|
||||||
@@ -141,7 +133,7 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
r"""
|
r"""
|
||||||
Patched fn to pass `name_or_path` and remove extra kwargs.
|
Patched fn to pass `name_or_path` and remove extra kwargs.
|
||||||
|
|
||||||
Instantiate a `MistralCommonBackend` from a predefined
|
Instantiate a `MistralCommonTokenizer` from a predefined
|
||||||
tokenizer.
|
tokenizer.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -150,7 +142,7 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
|
|
||||||
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
|
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
|
||||||
- A path to a *directory* containing the tokenizer config, for instance saved
|
- A path to a *directory* containing the tokenizer config, for instance saved
|
||||||
using the [`MistralCommonBackend.tokenization_mistral_common.save_pretrained`] method, e.g.,
|
using the [`MistralCommonTokenizer.tokenization_mistral_common.save_pretrained`] method, e.g.,
|
||||||
`./my_model_directory/`.
|
`./my_model_directory/`.
|
||||||
mode (`ValidationMode`, *optional*, defaults to `ValidationMode.test`):
|
mode (`ValidationMode`, *optional*, defaults to `ValidationMode.test`):
|
||||||
Validation mode for the `MistralTokenizer` tokenizer.
|
Validation mode for the `MistralTokenizer` tokenizer.
|
||||||
@@ -162,7 +154,7 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
exist.
|
exist.
|
||||||
token (`str` or *bool*, *optional*):
|
token (`str` or *bool*, *optional*):
|
||||||
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
||||||
when running `hf auth login` (stored in `~/.huggingface`).
|
when running `huggingface-cli login` (stored in `~/.huggingface`).
|
||||||
local_files_only (`bool`, *optional*, defaults to `False`):
|
local_files_only (`bool`, *optional*, defaults to `False`):
|
||||||
Whether or not to only rely on local files and not to attempt to download any files.
|
Whether or not to only rely on local files and not to attempt to download any files.
|
||||||
revision (`str`, *optional*, defaults to `"main"`):
|
revision (`str`, *optional*, defaults to `"main"`):
|
||||||
@@ -187,12 +179,12 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
Whether or not the model should cleanup the spaces that were added when splitting the input text during the
|
Whether or not the model should cleanup the spaces that were added when splitting the input text during the
|
||||||
tokenization process.
|
tokenization process.
|
||||||
kwargs (additional keyword arguments, *optional*):
|
kwargs (additional keyword arguments, *optional*):
|
||||||
Not supported by `MistralCommonBackend.from_pretrained`.
|
Not supported by `MistralCommonTokenizer.from_pretrained`.
|
||||||
Will raise an error if used.
|
Will raise an error if used.
|
||||||
"""
|
"""
|
||||||
if init_inputs:
|
if init_inputs:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"`init_inputs` are not supported by `MistralCommonBackend.from_pretrained`."
|
"`init_inputs` are not supported by `MistralCommonTokenizer.from_pretrained`."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Delete trust_remote_code as it does nothing
|
# Delete trust_remote_code as it does nothing
|
||||||
@@ -204,7 +196,7 @@ class HFMistralTokenizer(MistralCommonBackend):
|
|||||||
# Handle kwargs and AutoTokenizer case
|
# Handle kwargs and AutoTokenizer case
|
||||||
if kwargs and not kwargs.keys() == {"_from_auto"}:
|
if kwargs and not kwargs.keys() == {"_from_auto"}:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonBackend.from_pretrained`."
|
f"Kwargs {list(kwargs.keys())} are not supported by `MistralCommonTokenizer.from_pretrained`."
|
||||||
)
|
)
|
||||||
|
|
||||||
if not os.path.isfile(pretrained_model_name_or_path):
|
if not os.path.isfile(pretrained_model_name_or_path):
|
||||||
|
|||||||
@@ -446,16 +446,7 @@ class AxolotlInputConfig(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
unfrozen_parameters: list[str] | None = Field(
|
unfrozen_parameters: list[str] | None = None
|
||||||
default=None,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "List of regex patterns for parameter names to keep unfrozen. "
|
|
||||||
"All other parameters will be frozen via requires_grad=False. "
|
|
||||||
"Note: range-based patterns (e.g. embed_tokens.weight$[:32000]) use gradient "
|
|
||||||
"zeroing rather than a true freeze, so weight decay will still apply to the "
|
|
||||||
"frozen portion and optimizer states are allocated for the full parameter."
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
sequence_len: int = Field(
|
sequence_len: int = Field(
|
||||||
default=512,
|
default=512,
|
||||||
@@ -618,12 +609,6 @@ class AxolotlInputConfig(
|
|||||||
default=None,
|
default=None,
|
||||||
json_schema_extra={"description": "Whether to use bettertransformers"},
|
json_schema_extra={"description": "Whether to use bettertransformers"},
|
||||||
)
|
)
|
||||||
sage_attention: bool | None = Field(
|
|
||||||
default=None,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "Whether to use SageAttention https://github.com/thu-ml/SageAttention"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
eager_attention: bool | None = None
|
eager_attention: bool | None = None
|
||||||
|
|
||||||
@@ -634,13 +619,6 @@ class AxolotlInputConfig(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
experts_implementation: str | None = Field(
|
|
||||||
default=None,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "Which experts implementation to use for MoE models,"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
scaling_softmax: bool | None = Field(
|
scaling_softmax: bool | None = Field(
|
||||||
default=None,
|
default=None,
|
||||||
json_schema_extra={
|
json_schema_extra={
|
||||||
@@ -698,24 +676,6 @@ class AxolotlInputConfig(
|
|||||||
"description": "Number of chunks to use for chunked cross entropy loss"
|
"description": "Number of chunks to use for chunked cross entropy loss"
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
use_eaft: bool | None = Field(
|
|
||||||
default=None,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "Enable Entropy-Aware Focal Training loss (EAFT)"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
eaft_alpha: float | None = Field(
|
|
||||||
default=1.0,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "Exponent for entropy weighting in EAFT (default: 1.0)"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
eaft_k: int | None = Field(
|
|
||||||
default=20,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "Number of top logits for entropy approximation (default: 20)"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
tiled_mlp: bool | None = Field(
|
tiled_mlp: bool | None = Field(
|
||||||
default=None,
|
default=None,
|
||||||
@@ -1135,27 +1095,6 @@ class AxolotlInputConfig(
|
|||||||
)
|
)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def check_sageattn_wo_sample_packing(cls, data):
|
|
||||||
if (not data.get("sample_packing", False)) and data.get("sage_attention"):
|
|
||||||
if not data.get("pad_to_sequence_len", False):
|
|
||||||
LOG.warning(
|
|
||||||
"We recommend turning on `pad_to_sequence_len` for SageAttention without packing."
|
|
||||||
"This is because there has been signs that the loss explodes after a few steps."
|
|
||||||
)
|
|
||||||
return data
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def check_sageattn_fft(cls, data):
|
|
||||||
if (not data.get("adapter", False)) and data.get("sage_attention"):
|
|
||||||
LOG.warning(
|
|
||||||
"We found loss to drop to 0 with SageAttention full finetuning."
|
|
||||||
"Please observe the loss, otherwise switch to LoRA/QLoRA or another attention method."
|
|
||||||
)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class AxolotlConfigWCapabilities(AxolotlInputConfig):
|
class AxolotlConfigWCapabilities(AxolotlInputConfig):
|
||||||
"""Wrapper to valdiate GPU capabilities with the configured options"""
|
"""Wrapper to valdiate GPU capabilities with the configured options"""
|
||||||
@@ -1212,21 +1151,6 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def check_compute_capability_w_sageattn(cls, data):
|
|
||||||
if (
|
|
||||||
data.get("sage_attention")
|
|
||||||
and data.get("capabilities")
|
|
||||||
and data.get("capabilities").get("compute_capability")
|
|
||||||
not in ["sm_80", "sm_86", "sm_89", "sm_90", "sm_120"]
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
"SageAttention supports compute capability between sm_80 and sm_120. "
|
|
||||||
"Please use a different attention implementation."
|
|
||||||
)
|
|
||||||
return data
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_multigpu_unsloth(cls, data):
|
def check_multigpu_unsloth(cls, data):
|
||||||
@@ -1280,10 +1204,6 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig):
|
|||||||
):
|
):
|
||||||
return data
|
return data
|
||||||
|
|
||||||
# Skip if trust_remote_code is enabled, as lora kernels are not compatible
|
|
||||||
if data.get("trust_remote_code"):
|
|
||||||
return data
|
|
||||||
|
|
||||||
# Skip if dropout is not 0, as auto enabling it would just disable it during runtime patch checks
|
# Skip if dropout is not 0, as auto enabling it would just disable it during runtime patch checks
|
||||||
if data.get("lora_dropout") != 0:
|
if data.get("lora_dropout") != 0:
|
||||||
return data
|
return data
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ class RLType(str, Enum):
|
|||||||
"""RL trainer type configuration subset"""
|
"""RL trainer type configuration subset"""
|
||||||
|
|
||||||
DPO = "dpo"
|
DPO = "dpo"
|
||||||
GDPO = "gdpo"
|
|
||||||
GRPO = "grpo"
|
GRPO = "grpo"
|
||||||
IPO = "ipo"
|
IPO = "ipo"
|
||||||
ORPO = "orpo"
|
ORPO = "orpo"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ FSDP Configuration Schema
|
|||||||
|
|
||||||
from typing import Literal
|
from typing import Literal
|
||||||
|
|
||||||
from pydantic import AliasChoices, BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
class FSDPConfig(BaseModel):
|
class FSDPConfig(BaseModel):
|
||||||
@@ -12,11 +12,6 @@ class FSDPConfig(BaseModel):
|
|||||||
FSDP Configuration Schema
|
FSDP Configuration Schema
|
||||||
"""
|
"""
|
||||||
|
|
||||||
fsdp_version: int | None = Field(
|
|
||||||
validation_alias=AliasChoices("fsdp_version", "version"),
|
|
||||||
default=None,
|
|
||||||
json_schema_extra={"description": "FSDP version"},
|
|
||||||
)
|
|
||||||
activation_checkpointing: bool | None = Field(
|
activation_checkpointing: bool | None = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Enable activation checkpointing to reduce memory usage during forward passes",
|
description="Enable activation checkpointing to reduce memory usage during forward passes",
|
||||||
|
|||||||
@@ -120,31 +120,13 @@ class ModelOutputConfig(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
json_schema_extra={"description": "how to push checkpoints to hub"},
|
json_schema_extra={"description": "how to push checkpoints to hub"},
|
||||||
)
|
)
|
||||||
hub_revision: str | None = Field(
|
|
||||||
default=None,
|
|
||||||
json_schema_extra={
|
|
||||||
"description": "branch/revision to push to on hub (default: main)"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
save_safetensors: bool | None = Field(
|
save_safetensors: bool | None = Field(
|
||||||
default=True,
|
default=True,
|
||||||
json_schema_extra={
|
json_schema_extra={
|
||||||
"description": "Whether to save the model using safetensors format. Defaults to True."
|
"description": "Save model as safetensors (require safetensors package). Default True"
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@field_validator("save_safetensors")
|
|
||||||
@classmethod
|
|
||||||
def validate_save_safetensors(cls, v):
|
|
||||||
if v is False:
|
|
||||||
raise ValueError(
|
|
||||||
"save_safetensors=False is not supported in Transformers V5. "
|
|
||||||
"Transformers V5 always uses safetensors format for model serialization. "
|
|
||||||
"This field is deprecated and will be removed in a future version."
|
|
||||||
)
|
|
||||||
# Allow None and True, will default to True if None
|
|
||||||
return True if v is None else v
|
|
||||||
|
|
||||||
|
|
||||||
class SpecialTokensConfig(BaseModel):
|
class SpecialTokensConfig(BaseModel):
|
||||||
"""Special tokens configuration subset"""
|
"""Special tokens configuration subset"""
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user