Compare commits
33 Commits
v0.9.0
...
activation
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7610a02881 | ||
|
|
b0cd54bcb9 | ||
|
|
54960d4de0 | ||
|
|
ed922796b7 | ||
|
|
3dd9c3bf3f | ||
|
|
0ba7d362fa | ||
|
|
e4f73bc98e | ||
|
|
bcb59c70e2 | ||
|
|
6a3e6f8c53 | ||
|
|
fee3c13bb5 | ||
|
|
996fc124e5 | ||
|
|
e963990ad7 | ||
|
|
c3f2b1c5c2 | ||
|
|
6ba5c0ed2c | ||
|
|
24ff5f53f8 | ||
|
|
5e949eaa07 | ||
|
|
89ca14d9a0 | ||
|
|
8446b4ad28 | ||
|
|
fc79606b6d | ||
|
|
baeb00231b | ||
|
|
2413688b08 | ||
|
|
5bb1f3da56 | ||
|
|
a21b9cc472 | ||
|
|
41a1ec0c95 | ||
|
|
ecac731922 | ||
|
|
742fef4200 | ||
|
|
a39caf8824 | ||
|
|
07e4f2e25b | ||
|
|
c7d07de6b4 | ||
|
|
6565ae85d8 | ||
|
|
80b4edb4a7 | ||
|
|
fedbcc0254 | ||
|
|
8175896ada |
6
.github/workflows/base.yml
vendored
6
.github/workflows/base.yml
vendored
@@ -22,12 +22,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: "124"
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
cudnn_version: ""
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 8.7 8.9 9.0+PTX"
|
|
||||||
- cuda: "124"
|
- cuda: "124"
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
cudnn_version: ""
|
cudnn_version: ""
|
||||||
|
|||||||
13
.github/workflows/main.yml
vendored
13
.github/workflows/main.yml
vendored
@@ -15,11 +15,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -35,7 +30,7 @@ jobs:
|
|||||||
cuda_version: 12.6.3
|
cuda_version: 12.6.3
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.7.0
|
pytorch: 2.7.0
|
||||||
axolotl_extras: vllm
|
axolotl_extras:
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -67,6 +62,7 @@ jobs:
|
|||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
AXOLOTL_ARGS=${{ matrix.axolotl_args }}
|
||||||
|
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: |
|
||||||
@@ -82,11 +78,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
8
.github/workflows/multi-gpu-e2e.yml
vendored
8
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -9,6 +9,7 @@ on:
|
|||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- '.github/workflows/multi-gpu-e2e.yml'
|
- '.github/workflows/multi-gpu-e2e.yml'
|
||||||
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
||||||
|
- 'src/axolotl/utils/distributed.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
||||||
@@ -32,13 +33,6 @@ jobs:
|
|||||||
axolotl_extras: vllm
|
axolotl_extras: vllm
|
||||||
num_gpus: 2
|
num_gpus: 2
|
||||||
nightly_build: "true"
|
nightly_build: "true"
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras: # no vllm support for 2.4.1
|
|
||||||
num_gpus: 2
|
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
10
.github/workflows/nightlies.yml
vendored
10
.github/workflows/nightlies.yml
vendored
@@ -12,11 +12,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -70,11 +65,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
6
.github/workflows/preview-docs.yml
vendored
6
.github/workflows/preview-docs.yml
vendored
@@ -4,6 +4,12 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
# Run the workflow only when one of these files changes
|
||||||
|
paths:
|
||||||
|
- '**/*.md' # any Markdown file
|
||||||
|
- '**/*.qmd' # any Quarto file
|
||||||
|
- '_quarto.yaml'
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
checks: write
|
checks: write
|
||||||
contents: write
|
contents: write
|
||||||
|
|||||||
9
.github/workflows/tests-nightly.yml
vendored
9
.github/workflows/tests-nightly.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
|||||||
max-parallel: 2
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -106,13 +106,6 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: 124
|
|
||||||
cuda_version: 12.4.1
|
|
||||||
python_version: "3.11"
|
|
||||||
pytorch: 2.4.1
|
|
||||||
num_gpus: 1
|
|
||||||
axolotl_extras:
|
|
||||||
nightly_build: "true"
|
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
13
.github/workflows/tests.yml
vendored
13
.github/workflows/tests.yml
vendored
@@ -27,6 +27,9 @@ concurrency:
|
|||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||||
|
|
||||||
|
env:
|
||||||
|
TRANSFORMERS_IS_CI: "yes"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pre-commit:
|
pre-commit:
|
||||||
name: pre-commit
|
name: pre-commit
|
||||||
@@ -49,7 +52,7 @@ jobs:
|
|||||||
max-parallel: 2
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -135,7 +138,7 @@ jobs:
|
|||||||
max-parallel: 1
|
max-parallel: 1
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.4.1", "2.5.1", "2.6.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -258,6 +261,12 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
- cuda: 124
|
||||||
|
cuda_version: 12.4.1
|
||||||
|
python_version: "3.11"
|
||||||
|
pytorch: 2.6.0
|
||||||
|
num_gpus: 1
|
||||||
|
axolotl_extras: llmcompressor
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
|
|||||||
86
.runpod/test-input.json
Normal file
86
.runpod/test-input.json
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
{
|
||||||
|
"input": {
|
||||||
|
"name": "quick_smoke_test_sft",
|
||||||
|
"user_id": "user",
|
||||||
|
"model_id": "llama-test",
|
||||||
|
"run_id": "llama-test",
|
||||||
|
"credentials": {
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"hf_token": ""
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"model_type": "AutoModelForCausalLM",
|
||||||
|
"tokenizer_type": "AutoTokenizer",
|
||||||
|
"load_in_4bit": true,
|
||||||
|
"strict": false,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
"split": "train[:10%]"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "qlora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 64,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": true,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>"
|
||||||
|
},
|
||||||
|
"max_steps": 20
|
||||||
|
},
|
||||||
|
"timeout": 100000
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
||||||
|
"gpuCount": 1,
|
||||||
|
"containerDiskInGb": 200,
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"key": "TOKENIZER",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "DISABLE_LOG_STATS",
|
||||||
|
"value": "true"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"allowedCudaVersions": [
|
||||||
|
"12.8",
|
||||||
|
"12.7",
|
||||||
|
"12.6",
|
||||||
|
"12.5",
|
||||||
|
"12.4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,65 +1,70 @@
|
|||||||
{
|
{
|
||||||
"input": {
|
"tests": [
|
||||||
"name": "quick_smoke_test_sft",
|
{
|
||||||
"user_id": "user",
|
"name": "quick_smoke_test_sft",
|
||||||
"model_id": "llama-test",
|
"input": {
|
||||||
"run_id": "llama-test",
|
"user_id": "user",
|
||||||
"credentials": {
|
"model_id": "llama-test",
|
||||||
"wandb_api_key": "",
|
"run_id": "llama-test",
|
||||||
"hf_token": ""
|
"credentials": {
|
||||||
},
|
"wandb_api_key": "",
|
||||||
"args": {
|
"hf_token": ""
|
||||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
},
|
||||||
"model_type": "AutoModelForCausalLM",
|
"args": {
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
"load_in_8bit": true,
|
"model_type": "AutoModelForCausalLM",
|
||||||
"load_in_4bit": false,
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"strict": false,
|
"load_in_4bit": true,
|
||||||
"datasets": [
|
"strict": false,
|
||||||
{
|
"datasets": [
|
||||||
"path": "mhenrichsen/alpaca_2k_test",
|
{
|
||||||
"type": "alpaca"
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
"split": "train[:10%]"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"output_dir": "./outputs/lora-out",
|
||||||
|
"sequence_len": 4096,
|
||||||
|
"sample_packing": true,
|
||||||
|
"eval_sample_packing": false,
|
||||||
|
"pad_to_sequence_len": true,
|
||||||
|
"adapter": "qlora",
|
||||||
|
"lora_r": 32,
|
||||||
|
"lora_alpha": 64,
|
||||||
|
"lora_dropout": 0.05,
|
||||||
|
"lora_target_linear": true,
|
||||||
|
"lora_modules_to_save": [
|
||||||
|
"embed_tokens",
|
||||||
|
"lm_head"
|
||||||
|
],
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"num_epochs": 1,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"learning_rate": 0.0002,
|
||||||
|
"train_on_inputs": false,
|
||||||
|
"group_by_length": false,
|
||||||
|
"bf16": "auto",
|
||||||
|
"tf32": true,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"logging_steps": 1,
|
||||||
|
"flash_attention": true,
|
||||||
|
"warmup_steps": 1,
|
||||||
|
"evals_per_epoch": 1,
|
||||||
|
"eval_max_new_tokens": 128,
|
||||||
|
"saves_per_epoch": 1,
|
||||||
|
"weight_decay": 0.0,
|
||||||
|
"special_tokens": {
|
||||||
|
"pad_token": "<|endoftext|>"
|
||||||
|
},
|
||||||
|
"max_steps": 20
|
||||||
}
|
}
|
||||||
],
|
},
|
||||||
"val_set_size": 0.05,
|
"timeout": 100000
|
||||||
"output_dir": "./outputs/lora-out",
|
}
|
||||||
"sequence_len": 4096,
|
],
|
||||||
"sample_packing": true,
|
|
||||||
"eval_sample_packing": false,
|
|
||||||
"pad_to_sequence_len": true,
|
|
||||||
"adapter": "lora",
|
|
||||||
"lora_r": 32,
|
|
||||||
"lora_alpha": 64,
|
|
||||||
"lora_dropout": 0.05,
|
|
||||||
"lora_target_linear": true,
|
|
||||||
"lora_modules_to_save": [
|
|
||||||
"embed_tokens",
|
|
||||||
"lm_head"
|
|
||||||
],
|
|
||||||
"gradient_accumulation_steps": 4,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"num_epochs": 1,
|
|
||||||
"optimizer": "adamw_torch_fused",
|
|
||||||
"lr_scheduler": "cosine",
|
|
||||||
"learning_rate": 0.0002,
|
|
||||||
"train_on_inputs": false,
|
|
||||||
"group_by_length": false,
|
|
||||||
"bf16": "auto",
|
|
||||||
"tf32": true,
|
|
||||||
"gradient_checkpointing": true,
|
|
||||||
"logging_steps": 1,
|
|
||||||
"flash_attention": true,
|
|
||||||
"warmup_steps": 1,
|
|
||||||
"evals_per_epoch": 1,
|
|
||||||
"eval_max_new_tokens": 128,
|
|
||||||
"saves_per_epoch": 1,
|
|
||||||
"weight_decay": 0.0,
|
|
||||||
"special_tokens": {
|
|
||||||
"pad_token": "<|endoftext|>"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"timeout": 100000
|
|
||||||
},
|
|
||||||
"config": {
|
"config": {
|
||||||
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
"gpuTypeId": "NVIDIA GeForce RTX 4090",
|
||||||
"gpuCount": 1,
|
"gpuCount": 1,
|
||||||
|
|||||||
@@ -184,6 +184,10 @@ datasets:
|
|||||||
# adding a system turn with empty content.
|
# adding a system turn with empty content.
|
||||||
drop_system_message:
|
drop_system_message:
|
||||||
|
|
||||||
|
# Optional[bool]. Whether to split the assistant turn based on a reasoning trace inside delimited tags
|
||||||
|
# defaults to False
|
||||||
|
split_thinking:
|
||||||
|
|
||||||
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
# IMPORTANT: The following fields determine which parts of the conversation to train on.
|
||||||
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
# Priority order: message_field_training > message_field_training_detail > train_on_inputs or role in roles_to_train
|
||||||
# See examples at `docs/dataset-formats/conversation.qmd`
|
# See examples at `docs/dataset-formats/conversation.qmd`
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ sections = [
|
|||||||
("Knowledge Distillation (KD)", "kd"),
|
("Knowledge Distillation (KD)", "kd"),
|
||||||
("Liger Kernels", "liger"),
|
("Liger Kernels", "liger"),
|
||||||
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
("Language Model Evaluation Harness (LM Eval)", "lm_eval"),
|
||||||
("Spectrum", "spectrum")
|
("Spectrum", "spectrum"),
|
||||||
|
("LLMCompressor", "llm_compressor")
|
||||||
]
|
]
|
||||||
|
|
||||||
for section_name, folder_name in sections:
|
for section_name, folder_name in sections:
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ Here is an example of a multi-modal dataset:
|
|||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": [
|
"content": [
|
||||||
{"type": "image", "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
|
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"},
|
||||||
{"type": "text", "text": "Describe this image in detail."}
|
{"type": "text", "text": "Describe this image in detail."}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
77
examples/llama-3/sparse-finetuning.yaml
Normal file
77
examples/llama-3/sparse-finetuning.yaml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
base_model: neuralmagic/Sparse-Llama-3.1-8B-2of4
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: tatsu-lab/alpaca
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 4096
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
eval_sample_packing: false
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 1
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: auto
|
||||||
|
fp16:
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 100
|
||||||
|
evals_per_epoch: 2
|
||||||
|
eval_table_size:
|
||||||
|
saves_per_epoch: 1
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <|end_of_text|>
|
||||||
|
|
||||||
|
llmcompressor:
|
||||||
|
recipe:
|
||||||
|
finetuning_stage:
|
||||||
|
finetuning_modifiers:
|
||||||
|
ConstantPruningModifier:
|
||||||
|
targets: [
|
||||||
|
're:.*q_proj.weight',
|
||||||
|
're:.*k_proj.weight',
|
||||||
|
're:.*v_proj.weight',
|
||||||
|
're:.*o_proj.weight',
|
||||||
|
're:.*gate_proj.weight',
|
||||||
|
're:.*up_proj.weight',
|
||||||
|
're:.*down_proj.weight',
|
||||||
|
]
|
||||||
|
start: 0
|
||||||
|
save_compressed: true
|
||||||
@@ -18,7 +18,7 @@ accelerate==1.6.0
|
|||||||
datasets==3.5.0
|
datasets==3.5.0
|
||||||
deepspeed>=0.15.4
|
deepspeed>=0.15.4
|
||||||
trl==0.17.0
|
trl==0.17.0
|
||||||
hf_xet==1.0.0
|
hf_xet==1.1.0
|
||||||
hqq==0.2.5
|
hqq==0.2.5
|
||||||
|
|
||||||
optimum==1.16.2
|
optimum==1.16.2
|
||||||
|
|||||||
7
setup.py
7
setup.py
@@ -67,13 +67,13 @@ def parse_requirements(extras_require_map):
|
|||||||
if (major, minor) >= (2, 7):
|
if (major, minor) >= (2, 7):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
# _install_requires.append("xformers==0.0.29.post3") # xformers seems to be hard pinned to 2.6.0
|
||||||
extras_require_map["vllm"] = ["vllm==0.8.4"]
|
extras_require_map["vllm"] = ["vllm==0.8.5"]
|
||||||
elif (major, minor) >= (2, 6):
|
elif (major, minor) >= (2, 6):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
_install_requires.append(
|
_install_requires.append(
|
||||||
"xformers==0.0.29.post2"
|
"xformers==0.0.29.post2"
|
||||||
) # vllm needs post2 w torch 2.6
|
) # vllm needs post2 w torch 2.6
|
||||||
extras_require_map["vllm"] = ["vllm==0.8.4"]
|
extras_require_map["vllm"] = ["vllm==0.8.5"]
|
||||||
elif (major, minor) >= (2, 5):
|
elif (major, minor) >= (2, 5):
|
||||||
_install_requires.pop(_install_requires.index(xformers_version))
|
_install_requires.pop(_install_requires.index(xformers_version))
|
||||||
if patch == 0:
|
if patch == 0:
|
||||||
@@ -149,6 +149,9 @@ extras_require = {
|
|||||||
"vllm": [
|
"vllm": [
|
||||||
"vllm==0.7.2",
|
"vllm==0.7.2",
|
||||||
],
|
],
|
||||||
|
"llmcompressor": [
|
||||||
|
"llmcompressor==0.5.1",
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
install_requires, dependency_links, extras_require_build = parse_requirements(
|
install_requires, dependency_links, extras_require_build = parse_requirements(
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ import pkgutil
|
|||||||
|
|
||||||
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
__path__ = pkgutil.extend_path(__path__, __name__) # Make this a namespace package
|
||||||
|
|
||||||
__version__ = "0.9.0"
|
__version__ = "0.10.0.dev0"
|
||||||
|
|||||||
@@ -2,4 +2,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from axolotl.logging_config import configure_logging
|
||||||
|
|
||||||
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||||
|
configure_logging()
|
||||||
|
|||||||
@@ -8,9 +8,6 @@ from accelerate.commands.config import config_args
|
|||||||
from huggingface_hub import HfApi
|
from huggingface_hub import HfApi
|
||||||
from huggingface_hub.utils import LocalTokenNotFoundError
|
from huggingface_hub.utils import LocalTokenNotFoundError
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
from typing import Union
|
from typing import Union
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
@@ -152,7 +153,15 @@ def prepare_plugins(cfg: DictDefault):
|
|||||||
plugin_manager.register(plugin_name)
|
plugin_manager.register(plugin_name)
|
||||||
|
|
||||||
|
|
||||||
def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefault:
|
def plugin_set_cfg(cfg: DictDefault):
|
||||||
|
if cfg.get("plugins"):
|
||||||
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
plugin_manager.cfg = cfg
|
||||||
|
|
||||||
|
|
||||||
|
def load_cfg(
|
||||||
|
config: str | Path | DictDefault = Path("examples/"), **kwargs
|
||||||
|
) -> DictDefault:
|
||||||
"""
|
"""
|
||||||
Loads the `axolotl` configuration stored at `config`, validates it, and performs
|
Loads the `axolotl` configuration stored at `config`, validates it, and performs
|
||||||
various setup.
|
various setup.
|
||||||
@@ -164,13 +173,24 @@ def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefa
|
|||||||
Returns:
|
Returns:
|
||||||
`DictDefault` mapping configuration keys to values.
|
`DictDefault` mapping configuration keys to values.
|
||||||
"""
|
"""
|
||||||
config = check_remote_config(config)
|
if isinstance(config, (str, Path)):
|
||||||
if Path(config).is_dir():
|
config = check_remote_config(config)
|
||||||
config = choose_config(Path(config))
|
if Path(config).is_dir():
|
||||||
|
config = choose_config(Path(config))
|
||||||
|
|
||||||
# Load the config from the yaml file
|
# Load the config from the yaml file
|
||||||
with open(config, encoding="utf-8") as file:
|
with open(config, encoding="utf-8") as file:
|
||||||
cfg: DictDefault = DictDefault(yaml.safe_load(file))
|
cfg: DictDefault = DictDefault(yaml.safe_load(file))
|
||||||
|
|
||||||
|
cfg.axolotl_config_path = config
|
||||||
|
else:
|
||||||
|
cfg = config
|
||||||
|
with NamedTemporaryFile(
|
||||||
|
mode="w", delete=False, suffix=".yml", prefix="axolotl_config_"
|
||||||
|
) as temp_file:
|
||||||
|
temp_file.write(yaml.dump(config.to_dict()))
|
||||||
|
temp_file.close()
|
||||||
|
cfg.axolotl_config_path = temp_file.name
|
||||||
|
|
||||||
# If there are any options passed in the cli, if it is something that seems valid
|
# If there are any options passed in the cli, if it is something that seems valid
|
||||||
# from the yaml, then overwrite the value
|
# from the yaml, then overwrite the value
|
||||||
@@ -184,8 +204,6 @@ def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefa
|
|||||||
else:
|
else:
|
||||||
cfg[k] = kwargs[k]
|
cfg[k] = kwargs[k]
|
||||||
|
|
||||||
cfg.axolotl_config_path = config
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
device_props = torch.cuda.get_device_properties("cuda")
|
device_props = torch.cuda.get_device_properties("cuda")
|
||||||
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
||||||
@@ -213,5 +231,6 @@ def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefa
|
|||||||
setup_wandb_env_vars(cfg)
|
setup_wandb_env_vars(cfg)
|
||||||
setup_mlflow_env_vars(cfg)
|
setup_mlflow_env_vars(cfg)
|
||||||
setup_comet_env_vars(cfg)
|
setup_comet_env_vars(cfg)
|
||||||
|
plugin_set_cfg(cfg)
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""CLI to run evaluation on a model."""
|
"""CLI to run evaluation on a model."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
@@ -14,6 +15,7 @@ from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
|||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.evaluate import evaluate
|
from axolotl.evaluate import evaluate
|
||||||
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -29,10 +31,14 @@ def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
|||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: CLI arguments.
|
cli_args: CLI arguments.
|
||||||
"""
|
"""
|
||||||
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
|
patch_optimized_env()
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
check_user_token()
|
if int(os.getenv("LOCAL_RANK", "0")) == 0:
|
||||||
|
check_user_token()
|
||||||
|
|
||||||
if cfg.rl:
|
if cfg.rl:
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|||||||
@@ -28,9 +28,8 @@ from axolotl.cli.utils import (
|
|||||||
fetch_from_github,
|
fetch_from_github,
|
||||||
filter_none_kwargs,
|
filter_none_kwargs,
|
||||||
)
|
)
|
||||||
from axolotl.cli.vllm_serve import do_vllm_serve
|
|
||||||
from axolotl.integrations.lm_eval.cli import lm_eval
|
from axolotl.integrations.lm_eval.cli import lm_eval
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
|
||||||
@@ -56,6 +55,8 @@ def preprocess(config: str, cloud: Optional[str] = None, **kwargs) -> None:
|
|||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
|
patch_optimized_env()
|
||||||
|
|
||||||
if cloud:
|
if cloud:
|
||||||
from axolotl.cli.cloud import do_cli_preprocess
|
from axolotl.cli.cloud import do_cli_preprocess
|
||||||
|
|
||||||
@@ -101,7 +102,7 @@ def train(
|
|||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
if "use_ray" in kwargs and kwargs["use_ray"]:
|
if "use_ray" in kwargs and kwargs["use_ray"]:
|
||||||
accelerate = False
|
accelerate = False
|
||||||
@@ -327,6 +328,8 @@ def fetch(directory: str, dest: Optional[str]) -> None:
|
|||||||
@add_options_from_dataclass(VllmServeCliArgs)
|
@add_options_from_dataclass(VllmServeCliArgs)
|
||||||
@filter_none_kwargs
|
@filter_none_kwargs
|
||||||
def vllm_serve(config: str, **cli_args: VllmServeCliArgs):
|
def vllm_serve(config: str, **cli_args: VllmServeCliArgs):
|
||||||
|
from axolotl.cli.vllm_serve import do_vllm_serve
|
||||||
|
|
||||||
do_vllm_serve(config, cli_args)
|
do_vllm_serve(config, cli_args)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from axolotl.cli.config import load_cfg
|
|||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.config import normalize_config, resolve_dtype
|
from axolotl.utils.config import normalize_config, resolve_dtype
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
|||||||
cli_args: Training-specific CLI arguments.
|
cli_args: Training-specific CLI arguments.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
|
|||||||
@@ -20,11 +20,9 @@ from transformers import (
|
|||||||
ProcessorMixin,
|
ProcessorMixin,
|
||||||
)
|
)
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,5 +11,6 @@ MOE_ARCH_BLOCK = {
|
|||||||
],
|
],
|
||||||
"mixtral": "MixtralSparseMoeBlock",
|
"mixtral": "MixtralSparseMoeBlock",
|
||||||
"qwen2_moe": "Qwen2MoeSparseMoeBlock",
|
"qwen2_moe": "Qwen2MoeSparseMoeBlock",
|
||||||
|
"qwen3_moe": "Qwen3MoeSparseMoeBlock",
|
||||||
"deepseek_v2": "DeepseekV2MoE",
|
"deepseek_v2": "DeepseekV2MoE",
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ def sample_dataset(dataset: Dataset, num_samples: int) -> Dataset:
|
|||||||
def load_datasets(
|
def load_datasets(
|
||||||
*,
|
*,
|
||||||
cfg: DictDefault,
|
cfg: DictDefault,
|
||||||
cli_args: Union[PreprocessCliArgs, TrainerCliArgs],
|
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""
|
||||||
Loads one or more training or evaluation datasets, calling
|
Loads one or more training or evaluation datasets, calling
|
||||||
@@ -64,7 +64,8 @@ def load_datasets(
|
|||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
||||||
preprocess_iterable = (
|
preprocess_iterable = (
|
||||||
hasattr(cli_args, "iterable")
|
cli_args
|
||||||
|
and hasattr(cli_args, "iterable")
|
||||||
and cli_args.iterable is not None
|
and cli_args.iterable is not None
|
||||||
and cli_args.iterable
|
and cli_args.iterable
|
||||||
)
|
)
|
||||||
@@ -76,7 +77,7 @@ def load_datasets(
|
|||||||
preprocess_iterable=preprocess_iterable,
|
preprocess_iterable=preprocess_iterable,
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if cli_args and (
|
||||||
cli_args.debug
|
cli_args.debug
|
||||||
or cfg.debug
|
or cfg.debug
|
||||||
or cli_args.debug_text_only
|
or cli_args.debug_text_only
|
||||||
|
|||||||
@@ -60,6 +60,7 @@ from axolotl.core.training_args import (
|
|||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
||||||
from axolotl.monkeypatch.relora import ReLoRACallback
|
from axolotl.monkeypatch.relora import ReLoRACallback
|
||||||
|
from axolotl.monkeypatch.trainer.lr import patch_trainer_get_lr
|
||||||
from axolotl.processing_strategies import get_processing_strategy
|
from axolotl.processing_strategies import get_processing_strategy
|
||||||
from axolotl.utils import is_comet_available, is_mlflow_available
|
from axolotl.utils import is_comet_available, is_mlflow_available
|
||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
@@ -114,6 +115,8 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
if hasattr(model, "add_model_tags"):
|
if hasattr(model, "add_model_tags"):
|
||||||
model.add_model_tags(["axolotl"])
|
model.add_model_tags(["axolotl"])
|
||||||
|
|
||||||
|
patch_trainer_get_lr()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_ref(self):
|
def model_ref(self):
|
||||||
return self._model_ref
|
return self._model_ref
|
||||||
@@ -485,7 +488,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
|
|
||||||
# these are all the "standard" kwargs that are def used
|
# these are all the "standard" kwargs that are def used
|
||||||
training_arguments_kwargs["max_steps"] = (
|
training_arguments_kwargs["max_steps"] = (
|
||||||
total_num_steps if self.cfg.max_steps else -1
|
self.cfg.max_steps if self.cfg.max_steps else -1
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["max_seq_length"] = self.cfg.sequence_len
|
training_arguments_kwargs["max_seq_length"] = self.cfg.sequence_len
|
||||||
training_arguments_kwargs["per_device_train_batch_size"] = (
|
training_arguments_kwargs["per_device_train_batch_size"] = (
|
||||||
|
|||||||
@@ -610,3 +610,15 @@ class AxolotlTrainer(
|
|||||||
output_dir = os.path.join(run_dir, checkpoint_folder)
|
output_dir = os.path.join(run_dir, checkpoint_folder)
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
return super()._save_checkpoint(model, trial, **kwargs)
|
return super()._save_checkpoint(model, trial, **kwargs)
|
||||||
|
|
||||||
|
def compute_loss_context_manager(self):
|
||||||
|
from contextlib import ExitStack
|
||||||
|
|
||||||
|
from torchtune.training import OffloadActivations
|
||||||
|
|
||||||
|
stack = ExitStack()
|
||||||
|
|
||||||
|
stack.enter_context(super().compute_loss_context_manager())
|
||||||
|
stack.enter_context(OffloadActivations())
|
||||||
|
|
||||||
|
return stack
|
||||||
|
|||||||
@@ -177,12 +177,8 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
# dpo trainer may incorrectly prepend the bos_token_id to the dpo outputs
|
# dpo trainer may incorrectly prepend the bos_token_id to the dpo outputs
|
||||||
if res["chosen_input_ids"][0] == processing_class.bos_token_id:
|
if res["chosen_input_ids"][0] == processing_class.bos_token_id:
|
||||||
res["chosen_input_ids"] = res["chosen_input_ids"][1:]
|
res["chosen_input_ids"] = res["chosen_input_ids"][1:]
|
||||||
res["chosen_labels"] = res["chosen_labels"][1:]
|
|
||||||
res["chosen_attention_mask"] = res["chosen_attention_mask"][1:]
|
|
||||||
if res["rejected_input_ids"][0] == processing_class.bos_token_id:
|
if res["rejected_input_ids"][0] == processing_class.bos_token_id:
|
||||||
res["rejected_input_ids"] = res["rejected_input_ids"][1:]
|
res["rejected_input_ids"] = res["rejected_input_ids"][1:]
|
||||||
res["rejected_labels"] = res["rejected_labels"][1:]
|
|
||||||
res["rejected_attention_mask"] = res["rejected_attention_mask"][1:]
|
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ class GRPOStrategy:
|
|||||||
|
|
||||||
grpo_args_kwargs["max_completion_length"] = trl.max_completion_length
|
grpo_args_kwargs["max_completion_length"] = trl.max_completion_length
|
||||||
grpo_args_kwargs["log_completions"] = trl.log_completions
|
grpo_args_kwargs["log_completions"] = trl.log_completions
|
||||||
|
grpo_args_kwargs["num_completions_to_print"] = trl.num_completions_to_print
|
||||||
|
|
||||||
if trl.reward_weights:
|
if trl.reward_weights:
|
||||||
grpo_args_kwargs["reward_weights"] = trl.reward_weights
|
grpo_args_kwargs["reward_weights"] = trl.reward_weights
|
||||||
@@ -70,6 +71,13 @@ class GRPOStrategy:
|
|||||||
if trl.scale_rewards is not None:
|
if trl.scale_rewards is not None:
|
||||||
grpo_args_kwargs["scale_rewards"] = trl.scale_rewards
|
grpo_args_kwargs["scale_rewards"] = trl.scale_rewards
|
||||||
|
|
||||||
|
if trl.loss_type is not None:
|
||||||
|
grpo_args_kwargs["loss_type"] = trl.loss_type
|
||||||
|
if trl.mask_truncated_completions is not None:
|
||||||
|
grpo_args_kwargs["mask_truncated_completions"] = (
|
||||||
|
trl.mask_truncated_completions
|
||||||
|
)
|
||||||
|
|
||||||
if trl.temperature is not None:
|
if trl.temperature is not None:
|
||||||
grpo_args_kwargs["temperature"] = trl.temperature
|
grpo_args_kwargs["temperature"] = trl.temperature
|
||||||
if trl.top_p is not None:
|
if trl.top_p is not None:
|
||||||
@@ -85,6 +93,11 @@ class GRPOStrategy:
|
|||||||
grpo_args_kwargs["num_iterations"] = trl.num_iterations
|
grpo_args_kwargs["num_iterations"] = trl.num_iterations
|
||||||
if trl.epsilon is not None:
|
if trl.epsilon is not None:
|
||||||
grpo_args_kwargs["epsilon"] = trl.epsilon
|
grpo_args_kwargs["epsilon"] = trl.epsilon
|
||||||
|
if trl.epsilon_high is not None:
|
||||||
|
grpo_args_kwargs["epsilon_high"] = trl.epsilon_high
|
||||||
|
|
||||||
|
if trl.use_liger_loss is not None:
|
||||||
|
grpo_args_kwargs["use_liger_loss"] = trl.use_liger_loss
|
||||||
|
|
||||||
return grpo_args_kwargs
|
return grpo_args_kwargs
|
||||||
|
|
||||||
|
|||||||
@@ -3,9 +3,10 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from torch.optim.lr_scheduler import OneCycleLR
|
from torch.optim.lr_scheduler import LRScheduler, OneCycleLR
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.utils.schedulers import (
|
from axolotl.utils.schedulers import (
|
||||||
RexLR,
|
RexLR,
|
||||||
get_cosine_schedule_with_min_lr,
|
get_cosine_schedule_with_min_lr,
|
||||||
@@ -25,9 +26,9 @@ class SchedulerMixin(Trainer):
|
|||||||
|
|
||||||
def create_scheduler(
|
def create_scheduler(
|
||||||
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
||||||
):
|
) -> LRScheduler:
|
||||||
"""
|
"""
|
||||||
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
|
Set up the scheduler. The optimizer of the trainer must have been set up either before this method is called or
|
||||||
passed as an argument.
|
passed as an argument.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -47,7 +48,16 @@ class SchedulerMixin(Trainer):
|
|||||||
# fmt: off
|
# fmt: off
|
||||||
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
||||||
# fmt: on
|
# fmt: on
|
||||||
if self.args.alternate_lr_scheduler_type == "one_cycle":
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
lr_scheduler: LRScheduler | None = plugin_manager.create_lr_scheduler(
|
||||||
|
trainer=self,
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_training_steps=num_training_steps
|
||||||
|
)
|
||||||
|
if lr_scheduler is not None:
|
||||||
|
LOG.info(f"Using plugin-created lr_scheduler: {lr_scheduler}")
|
||||||
|
self.lr_scheduler = lr_scheduler
|
||||||
|
elif self.args.alternate_lr_scheduler_type == "one_cycle":
|
||||||
num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
|
num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
|
||||||
pct_start = num_warmup_steps / num_training_steps
|
pct_start = num_warmup_steps / num_training_steps
|
||||||
extra_lr_kwargs = {}
|
extra_lr_kwargs = {}
|
||||||
@@ -110,4 +120,4 @@ class SchedulerMixin(Trainer):
|
|||||||
if use_cosine_min_lr:
|
if use_cosine_min_lr:
|
||||||
LOG.warning("axolotl's cosine scheduler with min lr not used (e.g., because of deepspeed).")
|
LOG.warning("axolotl's cosine scheduler with min lr not used (e.g., because of deepspeed).")
|
||||||
|
|
||||||
return self.lr_scheduler
|
return self.lr_scheduler # type: ignore
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""Module for ReLoRA trainer"""
|
"""Module for ReLoRA trainer"""
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from torch.optim.lr_scheduler import LRScheduler
|
||||||
|
|
||||||
from axolotl.core.trainers.base import AxolotlTrainer
|
from axolotl.core.trainers.base import AxolotlTrainer
|
||||||
from axolotl.monkeypatch.relora import ReLoRAScheduler
|
from axolotl.monkeypatch.relora import ReLoRAScheduler
|
||||||
@@ -19,9 +20,11 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
self,
|
self,
|
||||||
num_training_steps: int,
|
num_training_steps: int,
|
||||||
optimizer: torch.optim.Optimizer | None = None,
|
optimizer: torch.optim.Optimizer | None = None,
|
||||||
):
|
) -> LRScheduler:
|
||||||
optimizer = self.optimizer if optimizer is None else optimizer
|
optimizer = self.optimizer if optimizer is None else optimizer
|
||||||
lr_scheduler = super().create_scheduler(num_training_steps, optimizer)
|
lr_scheduler: LRScheduler = super().create_scheduler(
|
||||||
|
num_training_steps, optimizer
|
||||||
|
)
|
||||||
|
|
||||||
if self.args.relora_steps:
|
if self.args.relora_steps:
|
||||||
warmup_steps = (
|
warmup_steps = (
|
||||||
@@ -30,7 +33,7 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
anneal_steps = (
|
anneal_steps = (
|
||||||
self.args.relora_anneal_steps if self.args.relora_anneal_steps else 1
|
self.args.relora_anneal_steps if self.args.relora_anneal_steps else 1
|
||||||
)
|
)
|
||||||
self.lr_scheduler = ReLoRAScheduler(
|
self.lr_scheduler = ReLoRAScheduler( # type: ignore
|
||||||
optimizer,
|
optimizer,
|
||||||
lr_scheduler,
|
lr_scheduler,
|
||||||
self.args.relora_steps,
|
self.args.relora_steps,
|
||||||
@@ -38,6 +41,6 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
warmup_steps,
|
warmup_steps,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.lr_scheduler = lr_scheduler
|
self.lr_scheduler = lr_scheduler # type: ignore
|
||||||
|
|
||||||
return self.lr_scheduler
|
return self.lr_scheduler # type: ignore
|
||||||
|
|||||||
@@ -11,20 +11,19 @@ from accelerate.logging import get_logger
|
|||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
from axolotl.train import (
|
||||||
from axolotl.train import TrainDatasetMeta
|
TrainDatasetMeta,
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
setup_model_and_tokenizer,
|
||||||
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import cleanup_distributed
|
from axolotl.utils.distributed import cleanup_distributed
|
||||||
from axolotl.utils.models import load_model, load_processor, load_tokenizer
|
|
||||||
from axolotl.utils.trainer import setup_trainer
|
from axolotl.utils.trainer import setup_trainer
|
||||||
|
|
||||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
src_dir = os.path.join(project_root, "src")
|
src_dir = os.path.join(project_root, "src")
|
||||||
sys.path.insert(0, src_dir)
|
sys.path.insert(0, src_dir)
|
||||||
|
|
||||||
configure_logging()
|
LOG = get_logger(__name__)
|
||||||
LOG = get_logger("axolotl.evaluate")
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_dataset(
|
def evaluate_dataset(
|
||||||
@@ -75,37 +74,22 @@ def evaluate(*, cfg: DictDefault, dataset_meta: TrainDatasetMeta) -> Dict[str, f
|
|||||||
Returns:
|
Returns:
|
||||||
Dictionary mapping metric names to their values.
|
Dictionary mapping metric names to their values.
|
||||||
"""
|
"""
|
||||||
# pylint: disable=duplicate-code
|
# Load tokenizer, processor and model
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
LOG.debug("loading model for evaluation...")
|
||||||
set_pytorch_cuda_alloc_conf()
|
model, tokenizer, _, processor = setup_model_and_tokenizer(cfg)
|
||||||
|
|
||||||
# Load tokenizer
|
|
||||||
LOG.debug(
|
|
||||||
f"loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}",
|
|
||||||
main_process_only=True,
|
|
||||||
)
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
|
|
||||||
# Load processor for multimodal models if needed
|
|
||||||
processor = None
|
|
||||||
if cfg.is_multimodal:
|
|
||||||
processor = load_processor(cfg, tokenizer)
|
|
||||||
|
|
||||||
# Get datasets
|
# Get datasets
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
train_dataset = dataset_meta.train_dataset
|
train_dataset = dataset_meta.train_dataset
|
||||||
eval_dataset = dataset_meta.eval_dataset
|
eval_dataset = dataset_meta.eval_dataset
|
||||||
total_num_steps = dataset_meta.total_num_steps
|
total_num_steps = dataset_meta.total_num_steps
|
||||||
|
|
||||||
# Load model
|
|
||||||
LOG.debug("loading model for evaluation...")
|
|
||||||
model, _ = load_model(cfg, tokenizer, processor=processor)
|
|
||||||
|
|
||||||
# Set up trainer
|
# Set up trainer
|
||||||
trainer = setup_trainer(
|
trainer = setup_trainer(
|
||||||
cfg,
|
cfg=cfg,
|
||||||
train_dataset=train_dataset,
|
train_dataset=train_dataset,
|
||||||
eval_dataset=eval_dataset,
|
eval_dataset=eval_dataset,
|
||||||
model=(model, None, None), # No need for model_ref or peft_config
|
model=model,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
total_num_steps=total_num_steps,
|
total_num_steps=total_num_steps,
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import logging
|
|||||||
from typing import OrderedDict
|
from typing import OrderedDict
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from torch.optim.lr_scheduler import LRScheduler
|
||||||
|
|
||||||
|
|
||||||
class BasePlugin:
|
class BasePlugin:
|
||||||
@@ -41,7 +42,7 @@ class BasePlugin:
|
|||||||
post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
post_lora_load(cfg, model): Performs actions after LoRA weights are loaded.
|
||||||
post_model_load(cfg, model): Performs actions after the model is loaded, inclusive of any adapters.
|
post_model_load(cfg, model): Performs actions after the model is loaded, inclusive of any adapters.
|
||||||
create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
create_optimizer(cfg, trainer): Creates and returns an optimizer for training.
|
||||||
create_lr_scheduler(cfg, trainer, optimizer): Creates and returns a learning rate scheduler.
|
create_lr_scheduler(cfg, trainer, optimizer, num_training_steps): Creates and returns a learning rate scheduler.
|
||||||
add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before training.
|
add_callbacks_pre_trainer(cfg, model): Adds callbacks to the trainer before training.
|
||||||
add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after training.
|
add_callbacks_post_trainer(cfg, trainer): Adds callbacks to the trainer after training.
|
||||||
"""
|
"""
|
||||||
@@ -146,8 +147,8 @@ class BasePlugin:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def create_lr_scheduler(
|
def create_lr_scheduler(
|
||||||
self, cfg, trainer, optimizer
|
self, cfg, trainer, optimizer, num_training_steps
|
||||||
): # pylint: disable=unused-argument
|
) -> LRScheduler | None: # pylint: disable=unused-argument
|
||||||
"""
|
"""
|
||||||
Creates and returns a learning rate scheduler.
|
Creates and returns a learning rate scheduler.
|
||||||
|
|
||||||
@@ -155,9 +156,10 @@ class BasePlugin:
|
|||||||
cfg (dict): The configuration for the plugin.
|
cfg (dict): The configuration for the plugin.
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
optimizer (object): The optimizer for training.
|
optimizer (object): The optimizer for training.
|
||||||
|
num_training_steps (int): Total number of training steps
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
object: The created learning rate scheduler.
|
object (LRScheduler): The created learning rate scheduler.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def add_callbacks_pre_trainer(self, cfg, model): # pylint: disable=unused-argument
|
def add_callbacks_pre_trainer(self, cfg, model): # pylint: disable=unused-argument
|
||||||
@@ -270,6 +272,7 @@ class PluginManager:
|
|||||||
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
|
plugins: OrderedDict[str, BasePlugin] = collections.OrderedDict()
|
||||||
|
|
||||||
_instance = None
|
_instance = None
|
||||||
|
_cfg = None
|
||||||
|
|
||||||
def __new__(cls):
|
def __new__(cls):
|
||||||
"""
|
"""
|
||||||
@@ -277,7 +280,9 @@ class PluginManager:
|
|||||||
"""
|
"""
|
||||||
if cls._instance is None:
|
if cls._instance is None:
|
||||||
cls._instance = super(PluginManager, cls).__new__(cls)
|
cls._instance = super(PluginManager, cls).__new__(cls)
|
||||||
cls._instance.plugins = collections.OrderedDict()
|
cls._instance.plugins: OrderedDict[str, BasePlugin] = (
|
||||||
|
collections.OrderedDict()
|
||||||
|
)
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -290,6 +295,14 @@ class PluginManager:
|
|||||||
PluginManager()
|
PluginManager()
|
||||||
return PluginManager._instance # type: ignore
|
return PluginManager._instance # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cfg(self):
|
||||||
|
return self._cfg
|
||||||
|
|
||||||
|
@cfg.setter
|
||||||
|
def cfg(self, cfg):
|
||||||
|
self._cfg = cfg
|
||||||
|
|
||||||
def register(self, plugin_name: str):
|
def register(self, plugin_name: str):
|
||||||
"""
|
"""
|
||||||
Registers a new plugin by its name.
|
Registers a new plugin by its name.
|
||||||
@@ -409,29 +422,29 @@ class PluginManager:
|
|||||||
return trainer_cls
|
return trainer_cls
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_optimizer(self, cfg, trainer):
|
def create_optimizer(self, trainer):
|
||||||
"""
|
"""
|
||||||
Calls the create_optimizer method of all registered plugins and returns the first non-None optimizer.
|
Calls the create_optimizer method of all registered plugins and returns the first non-None optimizer.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
cfg (dict): The configuration for the plugins.
|
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
object: The created optimizer, or None if none was found.
|
object: The created optimizer, or None if none was found.
|
||||||
"""
|
"""
|
||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
optimizer = plugin.create_optimizer(cfg, trainer)
|
optimizer = plugin.create_optimizer(self.cfg, trainer)
|
||||||
if optimizer is not None:
|
if optimizer is not None:
|
||||||
return optimizer
|
return optimizer
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_lr_scheduler(self, cfg, trainer, optimizer):
|
def create_lr_scheduler(
|
||||||
|
self, trainer, optimizer, num_training_steps
|
||||||
|
) -> LRScheduler | None:
|
||||||
"""
|
"""
|
||||||
Calls the create_lr_scheduler method of all registered plugins and returns the first non-None scheduler.
|
Calls the create_lr_scheduler method of all registered plugins and returns the first non-None scheduler.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
cfg (dict): The configuration for the plugins.
|
|
||||||
trainer (object): The trainer object for training.
|
trainer (object): The trainer object for training.
|
||||||
optimizer (object): The optimizer for training.
|
optimizer (object): The optimizer for training.
|
||||||
|
|
||||||
@@ -439,7 +452,12 @@ class PluginManager:
|
|||||||
object: The created learning rate scheduler, or None if none was found.
|
object: The created learning rate scheduler, or None if none was found.
|
||||||
"""
|
"""
|
||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
scheduler = plugin.create_lr_scheduler(cfg, trainer, optimizer)
|
scheduler: LRScheduler | None = plugin.create_lr_scheduler(
|
||||||
|
self.cfg,
|
||||||
|
trainer=trainer,
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_training_steps=num_training_steps,
|
||||||
|
)
|
||||||
if scheduler is not None:
|
if scheduler is not None:
|
||||||
return scheduler
|
return scheduler
|
||||||
return None
|
return None
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ import torch
|
|||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
from axolotl.integrations.base import BasePlugin
|
||||||
from axolotl.utils import get_pytorch_version
|
from axolotl.utils import get_pytorch_version
|
||||||
from axolotl.utils.distributed import zero_only
|
from axolotl.utils.distributed import is_main_process
|
||||||
|
|
||||||
from .args import CutCrossEntropyArgs # pylint: disable=unused-import. # noqa: F401
|
from .args import CutCrossEntropyArgs # pylint: disable=unused-import. # noqa: F401
|
||||||
|
|
||||||
@@ -76,7 +76,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
cce_patch,
|
cce_patch,
|
||||||
)
|
)
|
||||||
|
|
||||||
with zero_only():
|
if is_main_process(use_environ=True):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
f"Applying Cut Cross Entropy to model type: {cfg.model_config_type}"
|
f"Applying Cut Cross Entropy to model type: {cfg.model_config_type}"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
train_on_eos=None,
|
train_on_eos=None,
|
||||||
train_on_eot=None,
|
train_on_eot=None,
|
||||||
eot_tokens=None,
|
eot_tokens=None,
|
||||||
|
split_thinking: bool | None = False,
|
||||||
logprobs_field="logprobs",
|
logprobs_field="logprobs",
|
||||||
gen_temperature=1.0,
|
gen_temperature=1.0,
|
||||||
kd_temperature=1.0,
|
kd_temperature=1.0,
|
||||||
@@ -54,6 +55,7 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
train_on_eos=train_on_eos,
|
train_on_eos=train_on_eos,
|
||||||
train_on_eot=train_on_eot,
|
train_on_eot=train_on_eot,
|
||||||
eot_tokens=eot_tokens,
|
eot_tokens=eot_tokens,
|
||||||
|
split_thinking=split_thinking,
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ import logging
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
from axolotl.integrations.base import BasePlugin
|
||||||
|
from axolotl.utils.distributed import is_main_process
|
||||||
|
|
||||||
from ...utils.distributed import zero_only
|
|
||||||
from .args import LigerArgs # pylint: disable=unused-import. # noqa: F401
|
from .args import LigerArgs # pylint: disable=unused-import. # noqa: F401
|
||||||
from .utils import patch_with_compile_disable
|
from .utils import patch_with_compile_disable
|
||||||
|
|
||||||
@@ -85,7 +85,7 @@ class LigerPlugin(BasePlugin):
|
|||||||
kwargs["geglu"] = cfg.liger_glu_activation
|
kwargs["geglu"] = cfg.liger_glu_activation
|
||||||
elif "swiglu" in liger_fn_sig.parameters:
|
elif "swiglu" in liger_fn_sig.parameters:
|
||||||
kwargs["swiglu"] = cfg.liger_glu_activation
|
kwargs["swiglu"] = cfg.liger_glu_activation
|
||||||
with zero_only():
|
if is_main_process(use_environ=True):
|
||||||
LOG.info(
|
LOG.info(
|
||||||
f"Applying LIGER to {cfg.model_config_type} with kwargs: {kwargs}"
|
f"Applying LIGER to {cfg.model_config_type} with kwargs: {kwargs}"
|
||||||
)
|
)
|
||||||
@@ -151,6 +151,30 @@ class LigerPlugin(BasePlugin):
|
|||||||
rms_norm=cfg.liger_rms_norm,
|
rms_norm=cfg.liger_rms_norm,
|
||||||
layer_norm=cfg.liger_layer_norm,
|
layer_norm=cfg.liger_layer_norm,
|
||||||
)
|
)
|
||||||
|
elif cfg.model_config_type == "qwen3":
|
||||||
|
from axolotl.integrations.liger.models.qwen3 import (
|
||||||
|
apply_liger_kernel_to_qwen3,
|
||||||
|
)
|
||||||
|
|
||||||
|
apply_liger_kernel_to_qwen3(
|
||||||
|
cross_entropy=cfg.liger_cross_entropy,
|
||||||
|
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
|
||||||
|
glu_activation=cfg.liger_glu_activation,
|
||||||
|
rms_norm=cfg.liger_rms_norm,
|
||||||
|
layer_norm=cfg.liger_layer_norm,
|
||||||
|
)
|
||||||
|
elif cfg.model_config_type == "qwen3_moe":
|
||||||
|
from axolotl.integrations.liger.models.qwen3_moe import (
|
||||||
|
apply_liger_kernel_to_qwen3_moe,
|
||||||
|
)
|
||||||
|
|
||||||
|
apply_liger_kernel_to_qwen3_moe(
|
||||||
|
cross_entropy=cfg.liger_cross_entropy,
|
||||||
|
fused_linear_cross_entropy=cfg.liger_fused_linear_cross_entropy,
|
||||||
|
glu_activation=cfg.liger_glu_activation,
|
||||||
|
rms_norm=cfg.liger_rms_norm,
|
||||||
|
layer_norm=cfg.liger_layer_norm,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
f"Unsupported model config type: {cfg.model_config_type}. Liger not applied."
|
f"Unsupported model config type: {cfg.model_config_type}. Liger not applied."
|
||||||
|
|||||||
160
src/axolotl/integrations/liger/models/qwen3.py
Normal file
160
src/axolotl/integrations/liger/models/qwen3.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
"""
|
||||||
|
Liger FLCE for Qwen3. Based on transformers v4.51.3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
||||||
|
from transformers.cache_utils import Cache
|
||||||
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||||
|
|
||||||
|
|
||||||
|
def lce_forward(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[Cache] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
||||||
|
**kwargs,
|
||||||
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
||||||
|
r"""
|
||||||
|
Args:
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
||||||
|
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
||||||
|
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
||||||
|
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
||||||
|
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
||||||
|
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
|
||||||
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||||
|
outputs = self.model(
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
cache_position=cache_position,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs[0]
|
||||||
|
|
||||||
|
logits = None
|
||||||
|
loss = None
|
||||||
|
# if in training mode, don't materialize logits
|
||||||
|
if self.training and (labels is not None):
|
||||||
|
loss = LigerForCausalLMLoss(
|
||||||
|
hidden_states=hidden_states,
|
||||||
|
lm_head_weight=self.lm_head.weight,
|
||||||
|
labels=labels,
|
||||||
|
hidden_size=self.config.hidden_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
else: # if in inference mode materialize logits
|
||||||
|
slice_indices = (
|
||||||
|
slice(-logits_to_keep, None)
|
||||||
|
if isinstance(logits_to_keep, int)
|
||||||
|
else logits_to_keep
|
||||||
|
)
|
||||||
|
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
||||||
|
if labels is not None:
|
||||||
|
loss = self.loss_function(
|
||||||
|
logits=logits,
|
||||||
|
labels=labels,
|
||||||
|
vocab_size=self.config.vocab_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
return CausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_liger_kernel_to_qwen3(
|
||||||
|
cross_entropy: bool = False,
|
||||||
|
fused_linear_cross_entropy: bool = False,
|
||||||
|
rms_norm: bool = False,
|
||||||
|
glu_activation: bool = False,
|
||||||
|
layer_norm: bool = False,
|
||||||
|
**kwargs, # pylint: disable=unused-argument
|
||||||
|
) -> None:
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
"""
|
||||||
|
Apply Liger kernels to replace original implementation in HuggingFace Llama models (2 and 3)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
||||||
|
fused_linear_cross_entropy (bool):
|
||||||
|
Whether to apply Liger's fused linear cross entropy loss. Default is False.
|
||||||
|
`cross_entropy` and `fused_linear_cross_entropy` cannot both be False.
|
||||||
|
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
||||||
|
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is False.
|
||||||
|
glu_activation (bool): Whether to apply Liger's SwiGLU MLP. Default is False.
|
||||||
|
layer_norm (bool): Whether to apply Liger's LayerNorm. Default is False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import transformers.models.qwen3.modeling_qwen3 # noqa: F401 # pylint: disable=unused-import
|
||||||
|
from liger_kernel.transformers.functional import liger_cross_entropy
|
||||||
|
from liger_kernel.transformers.layer_norm import LigerLayerNorm
|
||||||
|
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
||||||
|
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
|
||||||
|
|
||||||
|
assert not (
|
||||||
|
cross_entropy and fused_linear_cross_entropy
|
||||||
|
), "cross_entropy and fused_linear_cross_entropy cannot both be True."
|
||||||
|
|
||||||
|
modeling_qwen3 = sys.modules["transformers.models.qwen3.modeling_qwen3"]
|
||||||
|
|
||||||
|
if rms_norm:
|
||||||
|
modeling_qwen3.Qwen3RMSNorm = LigerRMSNorm
|
||||||
|
|
||||||
|
if glu_activation:
|
||||||
|
modeling_qwen3.Qwen3MLP = LigerSwiGLUMLP
|
||||||
|
|
||||||
|
if layer_norm:
|
||||||
|
modeling_qwen3.nn.LayerNorm = LigerLayerNorm
|
||||||
|
|
||||||
|
if cross_entropy:
|
||||||
|
from transformers.loss.loss_utils import nn
|
||||||
|
|
||||||
|
nn.functional.cross_entropy = liger_cross_entropy
|
||||||
|
|
||||||
|
if fused_linear_cross_entropy:
|
||||||
|
modeling_qwen3.Qwen3ForCausalLM.forward = lce_forward
|
||||||
191
src/axolotl/integrations/liger/models/qwen3_moe.py
Normal file
191
src/axolotl/integrations/liger/models/qwen3_moe.py
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
"""
|
||||||
|
Liger FLCE for Qwen3 MoE. Based on transformers v4.51.3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from copy import deepcopy
|
||||||
|
from typing import List, Optional, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
||||||
|
from transformers.modeling_outputs import MoeCausalLMOutputWithPast
|
||||||
|
from transformers.models.qwen3_moe.modeling_qwen3_moe import load_balancing_loss_func
|
||||||
|
|
||||||
|
|
||||||
|
def lce_forward(
|
||||||
|
self,
|
||||||
|
input_ids: Optional[torch.LongTensor] = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
output_router_logits: Optional[bool] = None,
|
||||||
|
cache_position: Optional[torch.LongTensor] = None,
|
||||||
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
||||||
|
**kwargs,
|
||||||
|
) -> MoeCausalLMOutputWithPast:
|
||||||
|
r"""
|
||||||
|
Args:
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
|
||||||
|
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
||||||
|
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
||||||
|
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
||||||
|
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
||||||
|
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
||||||
|
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
"""
|
||||||
|
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
output_attentions = (
|
||||||
|
output_attentions
|
||||||
|
if output_attentions is not None
|
||||||
|
else self.config.output_attentions
|
||||||
|
)
|
||||||
|
output_router_logits = (
|
||||||
|
output_router_logits
|
||||||
|
if output_router_logits is not None
|
||||||
|
else self.config.output_router_logits
|
||||||
|
)
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states
|
||||||
|
if output_hidden_states is not None
|
||||||
|
else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
|
||||||
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||||
|
outputs = self.model(
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
output_router_logits=output_router_logits,
|
||||||
|
cache_position=cache_position,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs[0]
|
||||||
|
|
||||||
|
logits = None
|
||||||
|
loss = None
|
||||||
|
# if in training mode, don't materialize logits
|
||||||
|
if self.training and (labels is not None):
|
||||||
|
loss = LigerForCausalLMLoss(
|
||||||
|
hidden_states=hidden_states,
|
||||||
|
lm_head_weight=self.lm_head.weight,
|
||||||
|
labels=labels,
|
||||||
|
hidden_size=self.config.hidden_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
else: # if in inference mode materialize logits
|
||||||
|
slice_indices = (
|
||||||
|
slice(-logits_to_keep, None)
|
||||||
|
if isinstance(logits_to_keep, int)
|
||||||
|
else logits_to_keep
|
||||||
|
)
|
||||||
|
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
||||||
|
if labels is not None:
|
||||||
|
loss = self.loss_function(
|
||||||
|
logits=logits,
|
||||||
|
labels=labels,
|
||||||
|
vocab_size=self.config.vocab_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
aux_loss = None
|
||||||
|
if output_router_logits:
|
||||||
|
aux_loss = load_balancing_loss_func(
|
||||||
|
outputs.router_logits,
|
||||||
|
self.num_experts,
|
||||||
|
self.num_experts_per_tok,
|
||||||
|
attention_mask,
|
||||||
|
)
|
||||||
|
if labels is not None:
|
||||||
|
loss += self.router_aux_loss_coef * aux_loss.to(
|
||||||
|
loss.device
|
||||||
|
) # make sure to reside in the same device
|
||||||
|
|
||||||
|
return MoeCausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
aux_loss=aux_loss,
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_liger_kernel_to_qwen3_moe(
|
||||||
|
cross_entropy: bool = False,
|
||||||
|
fused_linear_cross_entropy: bool = False,
|
||||||
|
rms_norm: bool = False,
|
||||||
|
glu_activation: bool = False,
|
||||||
|
layer_norm: bool = False,
|
||||||
|
**kwargs, # pylint: disable=unused-argument
|
||||||
|
) -> None:
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
"""
|
||||||
|
Apply Liger kernels to replace original implementation in HuggingFace Llama models (2 and 3)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is False.
|
||||||
|
fused_linear_cross_entropy (bool):
|
||||||
|
Whether to apply Liger's fused linear cross entropy loss. Default is False.
|
||||||
|
`cross_entropy` and `fused_linear_cross_entropy` cannot both be False.
|
||||||
|
If `fused_linear_cross_entropy` is True, the logits will not be materialized but more memory efficient.
|
||||||
|
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is False.
|
||||||
|
glu_activation (bool): Whether to apply Liger's SwiGLU MLP. Default is False.
|
||||||
|
layer_norm (bool): Whether to apply Liger's LayerNorm. Default is False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import transformers.models.qwen3_moe.modeling_qwen3_moe # noqa: F401 # pylint: disable=unused-import
|
||||||
|
from liger_kernel.transformers.functional import liger_cross_entropy
|
||||||
|
from liger_kernel.transformers.layer_norm import LigerLayerNorm
|
||||||
|
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
||||||
|
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
|
||||||
|
|
||||||
|
assert not (
|
||||||
|
cross_entropy and fused_linear_cross_entropy
|
||||||
|
), "cross_entropy and fused_linear_cross_entropy cannot both be True."
|
||||||
|
|
||||||
|
modeling_qwen3_moe = sys.modules["transformers.models.qwen3_moe.modeling_qwen3_moe"]
|
||||||
|
|
||||||
|
if rms_norm:
|
||||||
|
modeling_qwen3_moe.Qwen3MoeRMSNorm = LigerRMSNorm
|
||||||
|
|
||||||
|
if glu_activation:
|
||||||
|
|
||||||
|
def _liger_swiglu_mlp_wrapper(config, intermediate_size=None, **kwargs):
|
||||||
|
"Accepts intermediate_size to pass to LigerSwiGLUMLP"
|
||||||
|
# clone config to avoid modifying the original
|
||||||
|
config = deepcopy(config)
|
||||||
|
if intermediate_size:
|
||||||
|
setattr(config, "intermediate_size", intermediate_size)
|
||||||
|
return LigerSwiGLUMLP(config, **kwargs)
|
||||||
|
|
||||||
|
modeling_qwen3_moe.Qwen3MoeMLP = _liger_swiglu_mlp_wrapper
|
||||||
|
|
||||||
|
if layer_norm:
|
||||||
|
modeling_qwen3_moe.nn.LayerNorm = LigerLayerNorm
|
||||||
|
|
||||||
|
if cross_entropy:
|
||||||
|
from transformers.loss.loss_utils import nn
|
||||||
|
|
||||||
|
nn.functional.cross_entropy = liger_cross_entropy
|
||||||
|
|
||||||
|
if fused_linear_cross_entropy:
|
||||||
|
modeling_qwen3_moe.Qwen3MoeForCausalLM.forward = lce_forward
|
||||||
108
src/axolotl/integrations/llm_compressor/README.md
Normal file
108
src/axolotl/integrations/llm_compressor/README.md
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
# LLMCompressor Integration
|
||||||
|
|
||||||
|
Fine-tune sparsified models in Axolotl using Neural Magic's [LLMCompressor](https://github.com/vllm-project/llm-compressor).
|
||||||
|
|
||||||
|
This integration enables fine-tuning of models sparsified using LLMCompressor within the Axolotl training framework. By combining LLMCompressor's model compression capabilities with Axolotl's distributed training pipelines, users can efficiently fine-tune sparse models at scale.
|
||||||
|
|
||||||
|
It uses Axolotl’s plugin system to hook into the fine-tuning flows while maintaining sparsity throughout training.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Axolotl with `llmcompressor` extras:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install "axolotl[llmcompressor]"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Requires `llmcompressor >= 0.5.1`
|
||||||
|
|
||||||
|
This will install all necessary dependencies to fine-tune sparsified models using the integration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To enable sparse fine-tuning with this integration, include the plugin in your Axolotl config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
|
||||||
|
llmcompressor:
|
||||||
|
recipe:
|
||||||
|
finetuning_stage:
|
||||||
|
finetuning_modifiers:
|
||||||
|
ConstantPruningModifier:
|
||||||
|
targets: [
|
||||||
|
're:.*q_proj.weight',
|
||||||
|
're:.*k_proj.weight',
|
||||||
|
're:.*v_proj.weight',
|
||||||
|
're:.*o_proj.weight',
|
||||||
|
're:.*gate_proj.weight',
|
||||||
|
're:.*up_proj.weight',
|
||||||
|
're:.*down_proj.weight',
|
||||||
|
]
|
||||||
|
start: 0
|
||||||
|
save_compressed: true
|
||||||
|
# ... (other training arguments)
|
||||||
|
```
|
||||||
|
|
||||||
|
This plugin **does not apply pruning or sparsification itself** — it is intended for **fine-tuning models that have already been sparsified**.
|
||||||
|
|
||||||
|
Pre-sparsified checkpoints can be:
|
||||||
|
- Generated using [LLMCompressor](https://github.com/vllm-project/llm-compressor)
|
||||||
|
- Downloaded from [Neural Magic's Hugging Face page](https://huggingface.co/neuralmagic)
|
||||||
|
- Any custom LLM with compatible sparsity patterns that you've created yourself
|
||||||
|
|
||||||
|
To learn more about writing and customizing LLMCompressor recipes, refer to the official documentation:
|
||||||
|
[https://github.com/vllm-project/llm-compressor/blob/main/README.md](https://github.com/vllm-project/llm-compressor/blob/main/README.md)
|
||||||
|
|
||||||
|
### Storage Optimization with save_compressed
|
||||||
|
|
||||||
|
Setting `save_compressed: true` in your configuration enables saving models in a compressed format, which:
|
||||||
|
- Reduces disk space usage by approximately 40%
|
||||||
|
- Maintains compatibility with vLLM for accelerated inference
|
||||||
|
- Maintains compatibility with llmcompressor for further optimization (example: quantization)
|
||||||
|
|
||||||
|
This option is highly recommended when working with sparse models to maximize the benefits of model compression.
|
||||||
|
|
||||||
|
### Example Config
|
||||||
|
|
||||||
|
See [`examples/llama-3/sparse-finetuning.yaml`](examples/llama-3/sparse-finetuning.yaml) for a complete example.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Inference with vLLM
|
||||||
|
|
||||||
|
After fine-tuning your sparse model, you can leverage vLLM for efficient inference.
|
||||||
|
You can also use LLMCompressor to apply additional quantization to your fine-tuned
|
||||||
|
sparse model before inference for even greater performance benefits.:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vllm import LLM, SamplingParams
|
||||||
|
|
||||||
|
prompts = [
|
||||||
|
"Hello, my name is",
|
||||||
|
"The president of the United States is",
|
||||||
|
"The capital of France is",
|
||||||
|
"The future of AI is",
|
||||||
|
]
|
||||||
|
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||||
|
llm = LLM("path/to/your/sparse/model")
|
||||||
|
outputs = llm.generate(prompts, sampling_params)
|
||||||
|
|
||||||
|
for output in outputs:
|
||||||
|
prompt = output.prompt
|
||||||
|
generated_text = output.outputs[0].text
|
||||||
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||||
|
```
|
||||||
|
|
||||||
|
For more details on vLLM's capabilities and advanced configuration options, see the [official vLLM documentation](https://docs.vllm.ai/).
|
||||||
|
|
||||||
|
## Learn More
|
||||||
|
|
||||||
|
For details on available sparsity and quantization schemes, fine-tuning recipes, and usage examples, visit the official LLMCompressor repository:
|
||||||
|
|
||||||
|
[https://github.com/vllm-project/llm-compressor](https://github.com/vllm-project/llm-compressor)
|
||||||
5
src/axolotl/integrations/llm_compressor/__init__.py
Normal file
5
src/axolotl/integrations/llm_compressor/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""Integration entry point for the LLMCompressor plugin."""
|
||||||
|
|
||||||
|
from .plugin import LLMCompressorPlugin
|
||||||
|
|
||||||
|
__all__ = ["LLMCompressorPlugin"]
|
||||||
40
src/axolotl/integrations/llm_compressor/args.py
Normal file
40
src/axolotl/integrations/llm_compressor/args.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
"""
|
||||||
|
LLMCompressor and Sparse Finetuning config models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
|
||||||
|
class CompressionArgs(BaseModel):
|
||||||
|
"""Sparse Finetuning config for LLMCompressor."""
|
||||||
|
|
||||||
|
# Typing for recipe is set to Any due to:
|
||||||
|
# https://github.com/vllm-project/llm-compressor/issues/1319
|
||||||
|
recipe: Annotated[
|
||||||
|
Any,
|
||||||
|
Field(
|
||||||
|
description="The recipe containing the compression algorithms and hyperparameters to apply."
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
save_compressed: Annotated[
|
||||||
|
bool,
|
||||||
|
Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether to save the compressed model after training.",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCompressorArgs(BaseModel):
|
||||||
|
"""LLMCompressor configuration BaseModel."""
|
||||||
|
|
||||||
|
llmcompressor: Annotated[
|
||||||
|
CompressionArgs,
|
||||||
|
Field(
|
||||||
|
description="Arguments enabling compression pathways through the LLM Compressor plugins"
|
||||||
|
),
|
||||||
|
]
|
||||||
171
src/axolotl/integrations/llm_compressor/plugin.py
Normal file
171
src/axolotl/integrations/llm_compressor/plugin.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
"""
|
||||||
|
Sparse Finetuning plugin for Axolotl — enables handling of sparse neural networks
|
||||||
|
by maintaining masks for zero weights during training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from functools import wraps
|
||||||
|
from typing import Any, Callable, Concatenate, ParamSpec, TypeVar
|
||||||
|
|
||||||
|
from llmcompressor import active_session, create_session
|
||||||
|
from llmcompressor.core import callbacks as session_callbacks
|
||||||
|
from llmcompressor.recipe import Recipe
|
||||||
|
from torch.nn import Module
|
||||||
|
from transformers.trainer import Trainer
|
||||||
|
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
|
||||||
|
from transformers.training_args import TrainingArguments
|
||||||
|
|
||||||
|
from axolotl.integrations.base import BasePlugin
|
||||||
|
|
||||||
|
P = ParamSpec("P") # Params for generic function signatures
|
||||||
|
R = TypeVar("R") # Return type for generic function signatures
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl.integrations.llm_compressor")
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCompressorCallbackHandler(TrainerCallback):
|
||||||
|
"""
|
||||||
|
Trainer callback for Sparse Finetuning.
|
||||||
|
Maintains sparsity patterns during training by applying masks after optimization steps,
|
||||||
|
ensuring zero-weight updates are canceled out.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, trainer: Trainer, recipe: Any):
|
||||||
|
"""
|
||||||
|
Initialize the Sparse Finetuning callback handler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
trainer (Trainer): Huggingface Trainer instance.
|
||||||
|
recipe (Recipe | dict): Sparse finetuning recipe to apply.
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
self.trainer = trainer
|
||||||
|
self.recipe = (
|
||||||
|
Recipe.model_validate(recipe) if not isinstance(recipe, Recipe) else recipe
|
||||||
|
)
|
||||||
|
self.original_compute_loss = trainer.compute_loss
|
||||||
|
self.trainer.compute_loss = compute_loss_wrapper(self.trainer.compute_loss)
|
||||||
|
create_session()
|
||||||
|
|
||||||
|
def on_train_begin(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the beginning of training. Initializes the compression session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (TrainingArguments): Training arguments.
|
||||||
|
state (TrainerState): Trainer state.
|
||||||
|
control (TrainerControl): Trainer control.
|
||||||
|
"""
|
||||||
|
super().on_train_begin(args, state, control, **kwargs)
|
||||||
|
self.trainer.accelerator.wait_for_everyone()
|
||||||
|
active_session().initialize(
|
||||||
|
model=self.trainer.model,
|
||||||
|
optimizer=self.trainer.optimizer,
|
||||||
|
start=state.epoch,
|
||||||
|
recipe=self.recipe,
|
||||||
|
)
|
||||||
|
self.trainer.accelerator.wait_for_everyone()
|
||||||
|
|
||||||
|
def on_step_begin(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the beginning of a training step. Triggers batch_start callback.
|
||||||
|
"""
|
||||||
|
super().on_step_begin(args, state, control, **kwargs)
|
||||||
|
session_callbacks.batch_start()
|
||||||
|
|
||||||
|
def on_step_end(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the end of a training step. Triggers optimizer and batch_end callbacks.
|
||||||
|
"""
|
||||||
|
super().on_step_end(args, state, control, **kwargs)
|
||||||
|
session_callbacks.optim_pre_step()
|
||||||
|
session_callbacks.optim_post_step()
|
||||||
|
session_callbacks.batch_end()
|
||||||
|
|
||||||
|
def on_train_end(
|
||||||
|
self,
|
||||||
|
args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Called at the end of training. Finalizes the compression session.
|
||||||
|
"""
|
||||||
|
super().on_train_end(args, state, control, **kwargs)
|
||||||
|
active_session().finalize()
|
||||||
|
self.trainer.compute_loss_func = self.original_compute_loss
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCompressorPlugin(BasePlugin):
|
||||||
|
"""
|
||||||
|
Sparse Finetuning plugin for Axolotl integration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_input_args(self) -> str:
|
||||||
|
"""
|
||||||
|
Returns the path to the plugin's argument definition.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Dotted path to the LLMCompressorArgs class.
|
||||||
|
"""
|
||||||
|
return "axolotl.integrations.llm_compressor.args.LLMCompressorArgs"
|
||||||
|
|
||||||
|
def add_callbacks_post_trainer(self, cfg: Any, trainer: Trainer) -> list:
|
||||||
|
"""
|
||||||
|
Adds Sparse Finetuning callback to the Trainer instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (Any): Configuration object containing the sparse recipe.
|
||||||
|
trainer (Trainer): Huggingface Trainer instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List containing the configured callback instances.
|
||||||
|
"""
|
||||||
|
LOG.info("Adding Sparse Finetuning callback to the trainer")
|
||||||
|
callback = LLMCompressorCallbackHandler(
|
||||||
|
trainer=trainer,
|
||||||
|
recipe=cfg.llmcompressor.recipe,
|
||||||
|
)
|
||||||
|
return [callback]
|
||||||
|
|
||||||
|
|
||||||
|
def compute_loss_wrapper(
|
||||||
|
compute_loss_func: Callable[Concatenate[Module, P], R],
|
||||||
|
) -> Callable[Concatenate[Module, P], R]:
|
||||||
|
"""
|
||||||
|
Wraps the loss computation function to trigger the loss_calculated callback.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
compute_loss_func (Callable): Original loss computation function.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Callable: Wrapped function that also invokes the loss_calculated callback.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@wraps(compute_loss_func)
|
||||||
|
def compute_and_notify(model: Module, *args: P.args, **kwargs: P.kwargs) -> R:
|
||||||
|
loss = compute_loss_func(model, *args, **kwargs)
|
||||||
|
if active_session().lifecycle.initialized_ and model.training:
|
||||||
|
session_callbacks.loss_calculated(loss=loss)
|
||||||
|
return loss
|
||||||
|
|
||||||
|
return compute_and_notify
|
||||||
40
src/axolotl/integrations/llm_compressor/utils.py
Normal file
40
src/axolotl/integrations/llm_compressor/utils.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
"""Utilities for llmcompressor integration with axolotl."""
|
||||||
|
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from llmcompressor.transformers.sparsification.compressed_tensors_utils import (
|
||||||
|
modify_save_pretrained,
|
||||||
|
)
|
||||||
|
from transformers import PreTrainedModel, Trainer
|
||||||
|
|
||||||
|
|
||||||
|
def save_compressed_model(
|
||||||
|
model: PreTrainedModel,
|
||||||
|
output_dir: Union[str, bytes],
|
||||||
|
trainer: Trainer,
|
||||||
|
safe_serialization: bool = False,
|
||||||
|
save_compressed: bool = False,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Synchronize processes, apply compression hooks, and save the model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (PreTrainedModel): The model to be saved.
|
||||||
|
output_dir (str or bytes): Path where the model files will be written.
|
||||||
|
trainer (Trainer): Hugging Face Trainer for process synchronization.
|
||||||
|
safe_serialization (bool): Use safe serialization if True.
|
||||||
|
save_compressed (bool): Write compressed tensors if True.
|
||||||
|
"""
|
||||||
|
trainer.accelerator.wait_for_everyone()
|
||||||
|
|
||||||
|
# Only the main process writes the files
|
||||||
|
if not trainer.accelerator.is_main_process:
|
||||||
|
return
|
||||||
|
|
||||||
|
modify_save_pretrained(model)
|
||||||
|
model.save_pretrained(
|
||||||
|
output_dir,
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
|
save_compressed=save_compressed,
|
||||||
|
skip_sparsity_compression_stats=not save_compressed,
|
||||||
|
)
|
||||||
@@ -12,10 +12,8 @@ import torch
|
|||||||
import torch.distributed as dist
|
import torch.distributed as dist
|
||||||
from accelerate.logging import get_logger
|
from accelerate.logging import get_logger
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = get_logger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -23,22 +23,42 @@ from axolotl.utils.dict import DictDefault
|
|||||||
|
|
||||||
LOG = get_logger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
ORIGINAL_QKV_CODE = """
|
QKV_PATCHES = [
|
||||||
|
(
|
||||||
|
"""
|
||||||
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||||
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||||
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||||
""".lstrip(
|
""".lstrip(
|
||||||
"\n"
|
"\n"
|
||||||
)
|
),
|
||||||
|
"""
|
||||||
PATCHED_QKV_CODE = """
|
|
||||||
query_states, key_states, value_states = self.apply_qkv(hidden_states)
|
query_states, key_states, value_states = self.apply_qkv(hidden_states)
|
||||||
query_states = query_states.view(hidden_shape).transpose(1, 2)
|
query_states = query_states.view(hidden_shape).transpose(1, 2)
|
||||||
key_states = key_states.view(hidden_shape).transpose(1, 2)
|
key_states = key_states.view(hidden_shape).transpose(1, 2)
|
||||||
value_states = value_states.view(hidden_shape).transpose(1, 2)
|
value_states = value_states.view(hidden_shape).transpose(1, 2)
|
||||||
""".lstrip(
|
""".lstrip(
|
||||||
"\n"
|
"\n"
|
||||||
)
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"""
|
||||||
|
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
|
||||||
|
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
|
||||||
|
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||||
|
""".lstrip(
|
||||||
|
"\n"
|
||||||
|
),
|
||||||
|
"""
|
||||||
|
query_states, key_states, value_states = self.apply_qkv(hidden_states)
|
||||||
|
query_states = self.q_norm(query_states.view(hidden_shape)).transpose(1, 2)
|
||||||
|
key_states = self.k_norm(key_states.view(hidden_shape)).transpose(1, 2)
|
||||||
|
value_states = value_states.view(hidden_shape).transpose(1, 2)
|
||||||
|
""".lstrip(
|
||||||
|
"\n"
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
ORIGINAL_O_CODE = """
|
ORIGINAL_O_CODE = """
|
||||||
attn_output = self.o_proj(attn_output)
|
attn_output = self.o_proj(attn_output)
|
||||||
@@ -128,10 +148,11 @@ def get_attention_cls_from_config(cfg: DictDefault) -> Type[nn.Module]:
|
|||||||
try:
|
try:
|
||||||
# Dynamically import the module and attention class
|
# Dynamically import the module and attention class
|
||||||
module_path = f"transformers.models.{model_type}.modeling_{model_type}"
|
module_path = f"transformers.models.{model_type}.modeling_{model_type}"
|
||||||
module = __import__(
|
model_cls_prefix = "".join(
|
||||||
module_path, fromlist=[f"{model_type.capitalize()}Attention"]
|
[part.capitalize() for part in model_type.split("_")]
|
||||||
)
|
)
|
||||||
attention_cls = getattr(module, f"{model_type.capitalize()}Attention")
|
module = __import__(module_path, fromlist=[f"{model_cls_prefix}Attention"])
|
||||||
|
attention_cls = getattr(module, f"{model_cls_prefix}Attention")
|
||||||
|
|
||||||
return attention_cls
|
return attention_cls
|
||||||
except (ImportError, AttributeError) as e:
|
except (ImportError, AttributeError) as e:
|
||||||
@@ -168,10 +189,18 @@ def patch_self_attn_lora(cfg: DictDefault):
|
|||||||
attention_cls._original_forward = self_attn_forward
|
attention_cls._original_forward = self_attn_forward
|
||||||
self_attn_forward, _ = detab_code(self_attn_forward)
|
self_attn_forward, _ = detab_code(self_attn_forward)
|
||||||
|
|
||||||
assert ORIGINAL_QKV_CODE in self_attn_forward, "Original QKV code not found"
|
assert any(
|
||||||
|
qkv_options[0] in self_attn_forward for qkv_options in QKV_PATCHES
|
||||||
|
), "Original QKV code not found"
|
||||||
assert ORIGINAL_O_CODE in self_attn_forward, "Original O code not found"
|
assert ORIGINAL_O_CODE in self_attn_forward, "Original O code not found"
|
||||||
|
|
||||||
self_attn_forward = self_attn_forward.replace(ORIGINAL_QKV_CODE, PATCHED_QKV_CODE)
|
for qkv_orig, qkv_patched in QKV_PATCHES:
|
||||||
|
if qkv_orig in self_attn_forward:
|
||||||
|
self_attn_forward = self_attn_forward.replace(
|
||||||
|
qkv_orig,
|
||||||
|
qkv_patched,
|
||||||
|
)
|
||||||
|
break
|
||||||
self_attn_forward = self_attn_forward.replace(ORIGINAL_O_CODE, PATCHED_O_CODE)
|
self_attn_forward = self_attn_forward.replace(ORIGINAL_O_CODE, PATCHED_O_CODE)
|
||||||
self_attn_forward = self_attn_forward.replace(
|
self_attn_forward = self_attn_forward.replace(
|
||||||
"def forward(",
|
"def forward(",
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ SUPPORTED_MULTIPACK_MODEL_TYPES = [
|
|||||||
"mixtral",
|
"mixtral",
|
||||||
"qwen2",
|
"qwen2",
|
||||||
"qwen2_moe",
|
"qwen2_moe",
|
||||||
|
"qwen3",
|
||||||
|
"qwen3_moe",
|
||||||
"falcon",
|
"falcon",
|
||||||
"phi",
|
"phi",
|
||||||
"phi3",
|
"phi3",
|
||||||
|
|||||||
0
src/axolotl/monkeypatch/trainer/__init__.py
Normal file
0
src/axolotl/monkeypatch/trainer/__init__.py
Normal file
42
src/axolotl/monkeypatch/trainer/lr.py
Normal file
42
src/axolotl/monkeypatch/trainer/lr.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
monkeypatch for Trainer _get_learning_rate method
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO remove this patch once https://github.com/huggingface/transformers/pull/37881 is included in a release
|
||||||
|
def _get_learning_rate(self):
|
||||||
|
if self.is_deepspeed_enabled:
|
||||||
|
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
|
||||||
|
# not run for the first few dozen steps while loss scale is too large, and thus during
|
||||||
|
# that time `get_last_lr` will fail if called during that warm up stage, so work around it:
|
||||||
|
try:
|
||||||
|
last_lr = self.lr_scheduler.get_last_lr()[0]
|
||||||
|
except AssertionError as e:
|
||||||
|
if "need to call step" in str(e):
|
||||||
|
LOG.warning(
|
||||||
|
"tried to get lr value before scheduler/optimizer started stepping, returning lr=0"
|
||||||
|
)
|
||||||
|
last_lr = 0
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
|
||||||
|
last_lr = self.optimizer.param_groups[0]["lr"]
|
||||||
|
else:
|
||||||
|
last_lr = self.lr_scheduler.get_last_lr()[0]
|
||||||
|
|
||||||
|
if torch.is_tensor(last_lr):
|
||||||
|
last_lr = last_lr.item()
|
||||||
|
return last_lr
|
||||||
|
|
||||||
|
|
||||||
|
def patch_trainer_get_lr():
|
||||||
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
|
Trainer._get_learning_rate = _get_learning_rate # pylint: disable=protected-access
|
||||||
@@ -4,7 +4,7 @@ HF Chat Templates prompt strategy
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from typing import Any, Dict, List, Optional, Set, Union
|
from typing import Any, Dict, List, Set, Union
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from transformers import ProcessorMixin
|
from transformers import ProcessorMixin
|
||||||
@@ -29,12 +29,12 @@ class ChatTemplatePrompter(Prompter):
|
|||||||
chat_template: str,
|
chat_template: str,
|
||||||
processor=None,
|
processor=None,
|
||||||
max_length=2048,
|
max_length=2048,
|
||||||
message_property_mappings: Optional[Dict[str, str]] = None,
|
message_property_mappings: Dict[str, str] | None = None,
|
||||||
message_field_training: Optional[str] = None,
|
message_field_training: str | None = None,
|
||||||
message_field_training_detail: Optional[str] = None,
|
message_field_training_detail: str | None = None,
|
||||||
field_messages: str = "messages",
|
field_messages: str = "messages",
|
||||||
field_system: str = "system",
|
field_system: str = "system",
|
||||||
roles: Optional[Dict[str, List[str]]] = None,
|
roles: Dict[str, List[str]] | None = None,
|
||||||
drop_system_message: bool = False,
|
drop_system_message: bool = False,
|
||||||
):
|
):
|
||||||
# check if message_property_mappings is None or empty dict
|
# check if message_property_mappings is None or empty dict
|
||||||
@@ -42,6 +42,7 @@ class ChatTemplatePrompter(Prompter):
|
|||||||
message_property_mappings = {
|
message_property_mappings = {
|
||||||
"role": "role",
|
"role": "role",
|
||||||
"content": "content",
|
"content": "content",
|
||||||
|
"reasoning_content": "reasoning_content",
|
||||||
}
|
}
|
||||||
|
|
||||||
if roles:
|
if roles:
|
||||||
@@ -65,7 +66,7 @@ class ChatTemplatePrompter(Prompter):
|
|||||||
self.field_messages = field_messages
|
self.field_messages = field_messages
|
||||||
self.field_system = field_system
|
self.field_system = field_system
|
||||||
self.tokenizer = tokenizer
|
self.tokenizer = tokenizer
|
||||||
self.processor: Optional[ProcessorMixin] = processor
|
self.processor: ProcessorMixin | None = processor
|
||||||
self.chat_template = chat_template
|
self.chat_template = chat_template
|
||||||
self.max_length = max_length
|
self.max_length = max_length
|
||||||
self.drop_system_message = drop_system_message
|
self.drop_system_message = drop_system_message
|
||||||
@@ -224,11 +225,11 @@ class ChatTemplateStrategy(PromptTokenizingStrategy):
|
|||||||
tokenizer,
|
tokenizer,
|
||||||
train_on_inputs: bool,
|
train_on_inputs: bool,
|
||||||
sequence_len: int,
|
sequence_len: int,
|
||||||
roles_to_train: Optional[List[str]] = None,
|
roles_to_train: list[str] | None = None,
|
||||||
train_on_eos: Optional[str] = None,
|
train_on_eos: str | None = None,
|
||||||
train_on_eot: Optional[str] = None,
|
train_on_eot: str | None = None,
|
||||||
eot_tokens: Optional[List[str]] = None,
|
eot_tokens: list[str] | None = None,
|
||||||
split_thinking: Optional[bool] = False,
|
split_thinking: bool | None = False,
|
||||||
):
|
):
|
||||||
super().__init__(prompter, tokenizer, train_on_inputs, sequence_len)
|
super().__init__(prompter, tokenizer, train_on_inputs, sequence_len)
|
||||||
self.prompter: ChatTemplatePrompter = prompter
|
self.prompter: ChatTemplatePrompter = prompter
|
||||||
@@ -661,16 +662,46 @@ class ChatTemplateStrategy(PromptTokenizingStrategy):
|
|||||||
# if the role is assistant that we want to use reasoning_content
|
# if the role is assistant that we want to use reasoning_content
|
||||||
if self.split_thinking and transformed_message["role"] == "assistant":
|
if self.split_thinking and transformed_message["role"] == "assistant":
|
||||||
content = transformed_message["content"]
|
content = transformed_message["content"]
|
||||||
pairs = [("<think>", "</think>"), ("<reasoning>", "</reasoning>")]
|
thinking_pairs = [
|
||||||
for pair in pairs:
|
("<think>", "</think>"),
|
||||||
if pair[0] in content and pair[1] in content:
|
("<reasoning>", "</reasoning>"),
|
||||||
start_idx = content.find(pair[0])
|
("<|begin_of_thought|>", "<|end_of_thought|>"),
|
||||||
end_idx = content.find(pair[1])
|
]
|
||||||
thinking_content = content[start_idx + len(pair[0]) : end_idx]
|
content_pairs = [("<|begin_of_solution|>", "<|end_of_solution|>")]
|
||||||
|
for tpair in thinking_pairs:
|
||||||
|
# check if the thinking pair is in the content
|
||||||
|
if tpair[0] in content and tpair[1] in content:
|
||||||
|
# find the start and end index of the thinking pair
|
||||||
|
t_start_idx = content.find(tpair[0])
|
||||||
|
t_end_idx = content.find(tpair[1])
|
||||||
|
|
||||||
|
# get the thinking content
|
||||||
|
thinking_content = content[t_start_idx + len(tpair[0]) : t_end_idx]
|
||||||
transformed_message["reasoning_content"] = thinking_content.strip()
|
transformed_message["reasoning_content"] = thinking_content.strip()
|
||||||
transformed_message["content"] = content[
|
|
||||||
end_idx + len(pair[1]) :
|
# take remainder of the content
|
||||||
].lstrip()
|
# strip whitespace from beginning of the remainder (thinking tokens)
|
||||||
|
remainder = content[t_end_idx + len(tpair[1]) :].lstrip()
|
||||||
|
|
||||||
|
# check if the content pair is in the remainder
|
||||||
|
cpair_found = False
|
||||||
|
for cpair in content_pairs:
|
||||||
|
if cpair[0] in remainder and cpair[1] in remainder:
|
||||||
|
# find the start and end index of the content pair
|
||||||
|
c_start_idx = remainder.find(cpair[0])
|
||||||
|
c_end_idx = remainder.find(cpair[1])
|
||||||
|
|
||||||
|
# get the content content
|
||||||
|
content_content = remainder[
|
||||||
|
c_start_idx + len(cpair[0]) : c_end_idx
|
||||||
|
]
|
||||||
|
transformed_message["content"] = content_content.strip()
|
||||||
|
cpair_found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# else, the content is the remainder
|
||||||
|
if not cpair_found:
|
||||||
|
transformed_message["content"] = remainder
|
||||||
break
|
break
|
||||||
|
|
||||||
# Determine which keys in the original message were not mapped
|
# Determine which keys in the original message were not mapped
|
||||||
@@ -714,7 +745,7 @@ class StrategyLoader:
|
|||||||
self,
|
self,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
cfg,
|
cfg,
|
||||||
ds_cfg: Optional[Union[Dict[str, Any], DatasetConfig]] = None,
|
ds_cfg: Union[Dict[str, Any], DatasetConfig] | None = None,
|
||||||
processor=None,
|
processor=None,
|
||||||
):
|
):
|
||||||
if ds_cfg is None:
|
if ds_cfg is None:
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ from axolotl.core.trainers.mixins.sequence_parallel import (
|
|||||||
SequenceParallelContextManager,
|
SequenceParallelContextManager,
|
||||||
)
|
)
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.distributed import cleanup_distributed
|
from axolotl.utils.distributed import cleanup_distributed
|
||||||
from axolotl.utils.freeze import freeze_layers_except
|
from axolotl.utils.freeze import freeze_layers_except
|
||||||
@@ -42,7 +41,6 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
BetterTransformer = None
|
BetterTransformer = None
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = get_logger(__name__)
|
LOG = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -296,8 +294,23 @@ def save_trained_model(
|
|||||||
trainer.model.save_pretrained(
|
trainer.model.save_pretrained(
|
||||||
cfg.output_dir, safe_serialization=safe_serialization
|
cfg.output_dir, safe_serialization=safe_serialization
|
||||||
)
|
)
|
||||||
|
|
||||||
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
||||||
|
|
||||||
|
if hasattr(cfg, "llmcompressor") and cfg.llmcompressor:
|
||||||
|
# TODO: add integration support so this can be implemented completely within the plugin
|
||||||
|
from axolotl.integrations.llm_compressor.utils import (
|
||||||
|
save_compressed_model,
|
||||||
|
)
|
||||||
|
|
||||||
|
save_compressed_model(
|
||||||
|
model=model,
|
||||||
|
output_dir=cfg.output_dir,
|
||||||
|
trainer=trainer,
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
|
save_compressed=cfg.llmcompressor.save_compressed,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
def create_model_card(cfg: DictDefault, trainer: Trainer):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -43,3 +43,12 @@ def set_pytorch_cuda_alloc_conf():
|
|||||||
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
||||||
"expandable_segments:True,roundup_power2_divisions:16"
|
"expandable_segments:True,roundup_power2_divisions:16"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_optimized_env():
|
||||||
|
"""
|
||||||
|
Patch environment variables to improve VRAM usage and increase download speed
|
||||||
|
"""
|
||||||
|
if os.getenv("HF_HUB_ENABLE_HF_TRANSFER") is None:
|
||||||
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||||
|
set_pytorch_cuda_alloc_conf()
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
@@ -808,11 +809,44 @@ class SaveAxolotlConfigtoWandBCallback(TrainerCallback):
|
|||||||
artifact.add_file(temp_file.name)
|
artifact.add_file(temp_file.name)
|
||||||
wandb.log_artifact(artifact)
|
wandb.log_artifact(artifact)
|
||||||
wandb.save(temp_file.name)
|
wandb.save(temp_file.name)
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"The Axolotl config has been saved to the WandB run under files."
|
"The Axolotl config has been saved to the WandB run under files."
|
||||||
)
|
)
|
||||||
except (FileNotFoundError, ConnectionError) as err:
|
except (FileNotFoundError, ConnectionError) as err:
|
||||||
LOG.warning(f"Error while saving Axolotl config to WandB: {err}")
|
LOG.warning(f"Error while saving Axolotl config to WandB: {err}")
|
||||||
|
|
||||||
|
if args.deepspeed:
|
||||||
|
try:
|
||||||
|
# sync config to top level in run, cannot delete file right away because wandb schedules it to be synced even w/policy = 'now', so let OS delete it later.
|
||||||
|
with NamedTemporaryFile(
|
||||||
|
mode="w",
|
||||||
|
delete=False,
|
||||||
|
suffix=".json",
|
||||||
|
prefix="deepspeed_config_",
|
||||||
|
) as temp_file:
|
||||||
|
skip_upload = False
|
||||||
|
if isinstance(args.deepspeed, dict):
|
||||||
|
json.dump(args.deepspeed, temp_file, indent=4)
|
||||||
|
elif isinstance(args.deepspeed, str) and os.path.exists(
|
||||||
|
args.deepspeed
|
||||||
|
):
|
||||||
|
copyfile(args.deepspeed, temp_file.name)
|
||||||
|
else:
|
||||||
|
skip_upload = True
|
||||||
|
if not skip_upload:
|
||||||
|
artifact = wandb.Artifact(
|
||||||
|
f"deepspeed-config-{wandb.run.id}",
|
||||||
|
type="deepspeed-config",
|
||||||
|
)
|
||||||
|
artifact.add_file(temp_file.name)
|
||||||
|
wandb.log_artifact(artifact)
|
||||||
|
wandb.save(temp_file.name)
|
||||||
|
LOG.info(
|
||||||
|
"The DeepSpeed config has been saved to the WandB run under files."
|
||||||
|
)
|
||||||
|
except (FileNotFoundError, ConnectionError) as err:
|
||||||
|
LOG.warning(f"Error while saving DeepSpeed config to WandB: {err}")
|
||||||
|
|
||||||
return control
|
return control
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ def choose_device(cfg):
|
|||||||
|
|
||||||
def resolve_dtype(cfg):
|
def resolve_dtype(cfg):
|
||||||
if (
|
if (
|
||||||
cfg.bf16 == "auto" and not cfg.use_ray
|
not cfg.fp16 and cfg.bf16 == "auto" and not cfg.use_ray
|
||||||
): # if we use ray we want to defer this check to the worker node
|
): # if we use ray we want to defer this check to the worker node
|
||||||
if is_torch_bf16_gpu_available():
|
if is_torch_bf16_gpu_available():
|
||||||
LOG.debug("bf16 support detected, enabling for this configuration.")
|
LOG.debug("bf16 support detected, enabling for this configuration.")
|
||||||
@@ -67,7 +67,7 @@ def resolve_dtype(cfg):
|
|||||||
else:
|
else:
|
||||||
LOG.debug("bf16 support not detected, disabling for this configuration.")
|
LOG.debug("bf16 support not detected, disabling for this configuration.")
|
||||||
cfg.bf16 = False
|
cfg.bf16 = False
|
||||||
if cfg.fp16 is None:
|
if cfg.fp16 is None and not cfg.float16:
|
||||||
cfg.fp16 = True
|
cfg.fp16 = True
|
||||||
|
|
||||||
if cfg.device == "mps":
|
if cfg.device == "mps":
|
||||||
|
|||||||
@@ -69,17 +69,27 @@ def barrier():
|
|||||||
dist.barrier()
|
dist.barrier()
|
||||||
|
|
||||||
|
|
||||||
def is_main_process():
|
def is_main_process(use_environ=False):
|
||||||
"""
|
"""
|
||||||
Check if the current process is the main process. If not in distributed mode,
|
Check if the current process is the main process. If not in distributed mode,
|
||||||
always return `True`.
|
always return `True`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- use_environ (bool, optional): Use environment variable to determine main process.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- bool: `True` if the current process is the main process, `False` otherwise.
|
||||||
"""
|
"""
|
||||||
|
if use_environ:
|
||||||
|
return os.environ.get("LOCAL_RANK", "0") == "0"
|
||||||
if not is_distributed():
|
if not is_distributed():
|
||||||
return True
|
return True
|
||||||
return dist.get_rank() == 0
|
return dist.get_rank() == 0
|
||||||
|
|
||||||
|
|
||||||
def is_local_main_process():
|
def is_local_main_process(use_environ=False):
|
||||||
|
if use_environ:
|
||||||
|
return os.environ.get("LOCAL_RANK", "0") == "0"
|
||||||
return PartialState().is_local_main_process
|
return PartialState().is_local_main_process
|
||||||
|
|
||||||
|
|
||||||
@@ -99,17 +109,6 @@ def cleanup_distributed():
|
|||||||
torch.distributed.destroy_process_group()
|
torch.distributed.destroy_process_group()
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def zero_only():
|
|
||||||
"""
|
|
||||||
Context manager that only runs the enclosed block on the main rank.
|
|
||||||
"""
|
|
||||||
if is_main_process():
|
|
||||||
yield
|
|
||||||
else:
|
|
||||||
yield None
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def zero_first(is_main):
|
def zero_first(is_main):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -2,6 +2,13 @@
|
|||||||
|
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch.utils.checkpoint import (
|
||||||
|
CheckpointPolicy,
|
||||||
|
checkpoint,
|
||||||
|
create_selective_checkpoint_contexts,
|
||||||
|
)
|
||||||
|
|
||||||
from axolotl.utils.gradient_checkpointing.unsloth import (
|
from axolotl.utils.gradient_checkpointing.unsloth import (
|
||||||
Unsloth_Offloaded_Gradient_Checkpointer,
|
Unsloth_Offloaded_Gradient_Checkpointer,
|
||||||
)
|
)
|
||||||
@@ -18,3 +25,32 @@ def hf_grad_checkpoint_offload_wrapper(
|
|||||||
),
|
),
|
||||||
*args,
|
*args,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
aten = torch.ops.aten
|
||||||
|
compute_intensive_ops = [
|
||||||
|
aten.mm.default,
|
||||||
|
aten.bmm.default,
|
||||||
|
aten.addmm.default,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def policy_fn(ctx, op, *args, **kwargs):
|
||||||
|
if op in compute_intensive_ops:
|
||||||
|
return CheckpointPolicy.MUST_SAVE
|
||||||
|
else:
|
||||||
|
return CheckpointPolicy.PREFER_RECOMPUTE
|
||||||
|
|
||||||
|
|
||||||
|
context_fn = partial(create_selective_checkpoint_contexts, policy_fn)
|
||||||
|
|
||||||
|
|
||||||
|
def checkpoint_w_policy(
|
||||||
|
decoder_layer, *args, use_reentrant=None
|
||||||
|
): # pylint: disable=unused-argument
|
||||||
|
return checkpoint(
|
||||||
|
decoder_layer,
|
||||||
|
*args,
|
||||||
|
use_reentrant=use_reentrant,
|
||||||
|
context_fn=context_fn,
|
||||||
|
)
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ from axolotl.utils.distributed import (
|
|||||||
get_device_count,
|
get_device_count,
|
||||||
get_device_type,
|
get_device_type,
|
||||||
is_local_main_process,
|
is_local_main_process,
|
||||||
zero_only,
|
is_main_process,
|
||||||
)
|
)
|
||||||
from axolotl.utils.gradient_checkpointing import hf_grad_checkpoint_offload_wrapper
|
from axolotl.utils.gradient_checkpointing import hf_grad_checkpoint_offload_wrapper
|
||||||
from axolotl.utils.lora_embeddings import get_linear_embedding_layers
|
from axolotl.utils.lora_embeddings import get_linear_embedding_layers
|
||||||
@@ -141,6 +141,22 @@ def check_model_config(cfg: DictDefault, model_config: PretrainedConfig):
|
|||||||
hasattr(model_config, "quantization_config")
|
hasattr(model_config, "quantization_config")
|
||||||
and model_config.quantization_config
|
and model_config.quantization_config
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Detect compressed-tensors config
|
||||||
|
is_compressed_tensors_config = (
|
||||||
|
quant_config_exists
|
||||||
|
and model_config.quantization_config.get("quant_method") == "compressed-tensors"
|
||||||
|
)
|
||||||
|
|
||||||
|
if is_compressed_tensors_config:
|
||||||
|
if model_config.quantization_config.get("config_groups"):
|
||||||
|
LOG.warning(
|
||||||
|
"Found `config_groups` in a compressed-tensors config. "
|
||||||
|
"QAT integration with llmcompressor is not tested."
|
||||||
|
)
|
||||||
|
# Skip further quant checks for compressed-tensors
|
||||||
|
return
|
||||||
|
|
||||||
quant_config_method_is_gptq = (
|
quant_config_method_is_gptq = (
|
||||||
quant_config_exists
|
quant_config_exists
|
||||||
and "quant_method" in model_config.quantization_config
|
and "quant_method" in model_config.quantization_config
|
||||||
@@ -437,7 +453,7 @@ def load_tokenizer(cfg):
|
|||||||
{"additional_special_tokens": additional_special_tokens}
|
{"additional_special_tokens": additional_special_tokens}
|
||||||
)
|
)
|
||||||
|
|
||||||
with zero_only():
|
if is_main_process(use_environ=True):
|
||||||
LOG.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
|
LOG.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
|
||||||
LOG.debug(f"BOS: {tokenizer.bos_token_id} / {tokenizer.bos_token}")
|
LOG.debug(f"BOS: {tokenizer.bos_token_id} / {tokenizer.bos_token}")
|
||||||
LOG.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
|
LOG.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ class MultipackBatchSampler(BatchSampler):
|
|||||||
self.len_across_ranks = None
|
self.len_across_ranks = None
|
||||||
|
|
||||||
if self.sequential and not isinstance(sampler, SequentialSampler):
|
if self.sequential and not isinstance(sampler, SequentialSampler):
|
||||||
LOG.warn(
|
LOG.warning(
|
||||||
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -512,10 +512,17 @@ class AxolotlInputConfig(
|
|||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def hint_sample_packing_padding(cls, data):
|
def hint_sample_packing_padding(cls, data):
|
||||||
if data.get("sample_packing") and not data.get("pad_to_sequence_len"):
|
if data.get("sample_packing"):
|
||||||
LOG.warning(
|
pad_to_sequence_len = data.get("pad_to_sequence_len")
|
||||||
"`pad_to_sequence_len: true` is recommended when using sample_packing"
|
if pad_to_sequence_len is False:
|
||||||
)
|
LOG.warning(
|
||||||
|
"`pad_to_sequence_len: true` is recommended when using sample_packing"
|
||||||
|
)
|
||||||
|
elif pad_to_sequence_len is None:
|
||||||
|
LOG.info(
|
||||||
|
"Setting `pad_to_sequence_len: true` to prevent memory leaks when sample_packing"
|
||||||
|
)
|
||||||
|
data["pad_to_sequence_len"] = True
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@@ -1150,6 +1157,18 @@ class AxolotlInputConfig(
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@model_validator(mode="before")
|
||||||
|
@classmethod
|
||||||
|
def check_grpo_peft_liger(cls, data):
|
||||||
|
if (
|
||||||
|
data.get("rl") == "grpo"
|
||||||
|
and data.get("trl", {})
|
||||||
|
and data.get("trl").get("use_liger_loss")
|
||||||
|
and data.get("adapter")
|
||||||
|
):
|
||||||
|
raise ValueError("PEFT + GRPO + Liger is not yet supported")
|
||||||
|
return data
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def check_sequence_parallel_degree(self):
|
def check_sequence_parallel_degree(self):
|
||||||
if not self.sequence_parallel_degree:
|
if not self.sequence_parallel_degree:
|
||||||
@@ -1315,6 +1334,57 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig):
|
|||||||
)
|
)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@model_validator(mode="before")
|
||||||
|
@classmethod
|
||||||
|
def check_auto_enable_lora_kernels(cls, data):
|
||||||
|
# Only proceed if using LoRA or QLoRA adapter
|
||||||
|
if data.get("rl"):
|
||||||
|
# RL trainers not tested so don't enable kernels by default
|
||||||
|
return data
|
||||||
|
if data.get("adapter") in ["lora", "qlora"]:
|
||||||
|
# Skip if already set, using unsloth optimizations, or using 8-bit
|
||||||
|
unsloth_fields = ["unsloth_lora_mlp", "unsloth_lora_qkv", "unsloth_lora_o"]
|
||||||
|
kernel_fields = ["lora_mlp_kernel", "lora_qkv_kernel", "lora_o_kernel"]
|
||||||
|
if (
|
||||||
|
any(data.get(k) is not None for k in kernel_fields)
|
||||||
|
or any(data.get(k) for k in unsloth_fields)
|
||||||
|
or data.get("adapter") == "lora"
|
||||||
|
and data.get("load_in_8bit")
|
||||||
|
):
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Check multi-GPU compatibility
|
||||||
|
capabilities = data.get("capabilities")
|
||||||
|
is_multi_gpu = capabilities and capabilities.get("n_gpu", 0) > 1
|
||||||
|
is_fsdp = data.get("fsdp") is not None
|
||||||
|
is_fsdp2 = (
|
||||||
|
data.get("fsdp_config") is not None
|
||||||
|
and str(data.get("fsdp_config").get("fsdp_version")) == "2"
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
not is_multi_gpu
|
||||||
|
or (is_multi_gpu and not is_fsdp)
|
||||||
|
or (is_multi_gpu and is_fsdp2)
|
||||||
|
):
|
||||||
|
# Auto-enable kernels if not explicitly set by user
|
||||||
|
if data.get("lora_mlp_kernel") is None:
|
||||||
|
data["lora_mlp_kernel"] = True
|
||||||
|
|
||||||
|
if data.get("lora_qkv_kernel") is None:
|
||||||
|
data["lora_qkv_kernel"] = True
|
||||||
|
|
||||||
|
if data.get("lora_o_kernel") is None:
|
||||||
|
data["lora_o_kernel"] = True
|
||||||
|
|
||||||
|
LOG.warning(
|
||||||
|
"Auto-enabling LoRA kernel optimizations for faster training. "
|
||||||
|
+ "Please explicitly set `lora_*_kernel` config values to `false` to disable. "
|
||||||
|
+ "See https://docs.axolotl.ai/docs/lora_optims.html for more info."
|
||||||
|
)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_adopt_torch_version(cls, data):
|
def check_adopt_torch_version(cls, data):
|
||||||
|
|||||||
@@ -67,6 +67,12 @@ class TRLConfig(BaseModel):
|
|||||||
default=False,
|
default=False,
|
||||||
json_schema_extra={"description": "Whether to log completions"},
|
json_schema_extra={"description": "Whether to log completions"},
|
||||||
)
|
)
|
||||||
|
num_completions_to_print: int | None = Field(
|
||||||
|
default=None,
|
||||||
|
json_schema_extra={
|
||||||
|
"description": "Number of completions to print. If `log_completions` is `True`, this will be the number of completions logged."
|
||||||
|
},
|
||||||
|
)
|
||||||
sync_ref_model: bool | None = Field(
|
sync_ref_model: bool | None = Field(
|
||||||
default=False,
|
default=False,
|
||||||
json_schema_extra={
|
json_schema_extra={
|
||||||
@@ -133,3 +139,25 @@ class TRLConfig(BaseModel):
|
|||||||
"description": "Epsilon value for clipping in the GRPO algorithm."
|
"description": "Epsilon value for clipping in the GRPO algorithm."
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
epsilon_high: float | None = Field(
|
||||||
|
default=None,
|
||||||
|
json_schema_extra={
|
||||||
|
"description": "Upper-bound epsilon value for clipping in the GRPO algorithm."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
use_liger_loss: bool | None = Field(
|
||||||
|
default=None,
|
||||||
|
json_schema_extra={"description": "Whether to use Liger loss for GRPO."},
|
||||||
|
)
|
||||||
|
loss_type: str | None = Field(
|
||||||
|
default=None,
|
||||||
|
json_schema_extra={
|
||||||
|
"description": "Specifies the loss formulation to use. Supported values are `grpo`, `bnpo`, and `dr_grpo`."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
mask_truncated_completions: bool = Field(
|
||||||
|
default=False,
|
||||||
|
json_schema_extra={
|
||||||
|
"description": "When enabled, truncated completions are excluded from the loss calculation."
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|||||||
@@ -597,6 +597,8 @@ def prepare_optim_env(cfg):
|
|||||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "bf16"
|
os.environ["ACCELERATE_MIXED_PRECISION"] = "bf16"
|
||||||
elif cfg.fp16:
|
elif cfg.fp16:
|
||||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp16"
|
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp16"
|
||||||
|
else:
|
||||||
|
os.environ["ACCELERATE_MIXED_PRECISION"] = "no"
|
||||||
|
|
||||||
|
|
||||||
def prepare_opinionated_env(cfg):
|
def prepare_opinionated_env(cfg):
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ class LogHooksPlugin(BasePlugin):
|
|||||||
f.write("get_trainer_cls\n")
|
f.write("get_trainer_cls\n")
|
||||||
|
|
||||||
def create_lr_scheduler(
|
def create_lr_scheduler(
|
||||||
self, cfg, trainer, optimizer
|
self, cfg, trainer, optimizer, num_training_steps
|
||||||
): # pylint: disable=unused-argument
|
): # pylint: disable=unused-argument
|
||||||
with open(
|
with open(
|
||||||
self.base_dir.joinpath("plugin_hooks.log"), "a", encoding="utf-8"
|
self.base_dir.joinpath("plugin_hooks.log"), "a", encoding="utf-8"
|
||||||
@@ -172,7 +172,7 @@ class TestPluginHooks:
|
|||||||
assert "post_model_load" in file_contents
|
assert "post_model_load" in file_contents
|
||||||
# assert "create_optimizer" in file_contents # not implemented yet
|
# assert "create_optimizer" in file_contents # not implemented yet
|
||||||
assert "get_trainer_cls" in file_contents
|
assert "get_trainer_cls" in file_contents
|
||||||
# assert "create_lr_scheduler" in file_contents # not implemented yet
|
assert "create_lr_scheduler" in file_contents
|
||||||
assert "add_callbacks_pre_trainer" in file_contents
|
assert "add_callbacks_pre_trainer" in file_contents
|
||||||
assert "add_callbacks_post_trainer" in file_contents
|
assert "add_callbacks_post_trainer" in file_contents
|
||||||
assert "post_train" in file_contents
|
assert "post_train" in file_contents
|
||||||
|
|||||||
111
tests/e2e/integrations/test_llm_compressor.py
Normal file
111
tests/e2e/integrations/test_llm_compressor.py
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
"""
|
||||||
|
E2E smoke tests for LLMCompressorPlugin integration
|
||||||
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from axolotl.cli.args import TrainerCliArgs
|
||||||
|
from axolotl.common.datasets import load_datasets
|
||||||
|
from axolotl.train import train
|
||||||
|
from axolotl.utils.config import normalize_config, prepare_plugins, validate_config
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
from tests.e2e.utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
require_llmcompressor,
|
||||||
|
require_torch_2_4_1,
|
||||||
|
)
|
||||||
|
|
||||||
|
MODELS = [
|
||||||
|
"nm-testing/llama2.c-stories42M-pruned2.4-compressed",
|
||||||
|
"nm-testing/llama2.c-stories42M-gsm8k-sparse-only-compressed",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"base_model", MODELS, ids=["no-checkpoint-recipe", "with-checkpoint-recipe"]
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"save_compressed", [True, False], ids=["save_compressed", "save_uncompressed"]
|
||||||
|
)
|
||||||
|
class TestLLMCompressorIntegration:
|
||||||
|
"""
|
||||||
|
e2e tests for axolotl.integrations.llm_compressor.LLMCompressorPlugin
|
||||||
|
"""
|
||||||
|
|
||||||
|
@require_llmcompressor
|
||||||
|
@require_torch_2_4_1
|
||||||
|
def test_llmcompressor_plugin(
|
||||||
|
self, temp_dir, base_model: str, save_compressed: bool
|
||||||
|
):
|
||||||
|
from llmcompressor import active_session
|
||||||
|
|
||||||
|
# core cfg
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": base_model,
|
||||||
|
"plugins": ["axolotl.integrations.llm_compressor.LLMCompressorPlugin"],
|
||||||
|
"sequence_len": 1024,
|
||||||
|
"val_set_size": 0.05,
|
||||||
|
"special_tokens": {"pad_token": "<|endoftext|>"},
|
||||||
|
"datasets": [{"path": "mhenrichsen/alpaca_2k_test", "type": "alpaca"}],
|
||||||
|
"num_epochs": 1,
|
||||||
|
"micro_batch_size": 2,
|
||||||
|
"gradient_accumulation_steps": 2,
|
||||||
|
"output_dir": temp_dir,
|
||||||
|
"learning_rate": 1e-5,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"save_safetensors": True,
|
||||||
|
"bf16": "auto",
|
||||||
|
"max_steps": 5,
|
||||||
|
"llmcompressor": {
|
||||||
|
"recipe": {
|
||||||
|
"finetuning_stage": {
|
||||||
|
"finetuning_modifiers": {
|
||||||
|
"ConstantPruningModifier": {
|
||||||
|
"targets": [
|
||||||
|
"re:.*q_proj.weight",
|
||||||
|
"re:.*k_proj.weight",
|
||||||
|
"re:.*v_proj.weight",
|
||||||
|
"re:.*o_proj.weight",
|
||||||
|
"re:.*gate_proj.weight",
|
||||||
|
"re:.*up_proj.weight",
|
||||||
|
"re:.*down_proj.weight",
|
||||||
|
],
|
||||||
|
"start": 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"save_compressed": save_compressed,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_plugins(cfg)
|
||||||
|
cfg = validate_config(cfg)
|
||||||
|
normalize_config(cfg)
|
||||||
|
cli_args = TrainerCliArgs()
|
||||||
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
|
try:
|
||||||
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
_check_llmcompressor_model_outputs(temp_dir, save_compressed)
|
||||||
|
finally:
|
||||||
|
active_session().reset()
|
||||||
|
|
||||||
|
|
||||||
|
def _check_llmcompressor_model_outputs(temp_dir, save_compressed):
|
||||||
|
if save_compressed:
|
||||||
|
assert (Path(temp_dir) / "recipe.yaml").exists()
|
||||||
|
|
||||||
|
from compressed_tensors import ModelCompressor
|
||||||
|
from compressed_tensors.config import Sparse24BitMaskConfig
|
||||||
|
|
||||||
|
compressor = ModelCompressor.from_pretrained(temp_dir)
|
||||||
|
assert compressor is not None
|
||||||
|
assert isinstance(compressor.sparsity_config, Sparse24BitMaskConfig)
|
||||||
@@ -2,14 +2,19 @@
|
|||||||
|
|
||||||
# pylint: disable=redefined-outer-name
|
# pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
|
import yaml
|
||||||
from accelerate.state import PartialState
|
from accelerate.state import PartialState
|
||||||
from peft import PeftModelForCausalLM, get_peft_config
|
from peft import PeftModelForCausalLM, get_peft_config
|
||||||
from transformers import AutoModelForCausalLM, LlamaForCausalLM
|
from transformers import AutoModelForCausalLM, LlamaForCausalLM
|
||||||
from transformers.models.llama.configuration_llama import LlamaConfig
|
from transformers.models.llama.configuration_llama import LlamaConfig
|
||||||
from transformers.models.llama.modeling_llama import LlamaAttention
|
from transformers.models.llama.modeling_llama import LlamaAttention
|
||||||
|
from transformers.models.qwen3_moe.modeling_qwen3_moe import Qwen3MoeAttention
|
||||||
|
|
||||||
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.kernels.lora import (
|
from axolotl.kernels.lora import (
|
||||||
apply_lora_mlp_geglu,
|
apply_lora_mlp_geglu,
|
||||||
apply_lora_mlp_swiglu,
|
apply_lora_mlp_swiglu,
|
||||||
@@ -66,29 +71,36 @@ def small_llama_model():
|
|||||||
return LlamaForCausalLM(LlamaConfig(**config))
|
return LlamaForCausalLM(LlamaConfig(**config))
|
||||||
|
|
||||||
|
|
||||||
def test_attention_patching_integration():
|
@pytest.mark.parametrize(
|
||||||
|
"model_name,attention_cls",
|
||||||
|
[
|
||||||
|
("HuggingFaceTB/SmolLM2-135M", LlamaAttention),
|
||||||
|
("Qwen/Qwen3-30B-A3B", Qwen3MoeAttention),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_attention_patching_integration(model_name, attention_cls):
|
||||||
"""Test attention patching in integration context."""
|
"""Test attention patching in integration context."""
|
||||||
cfg = {"base_model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0"}
|
cfg = {"base_model": model_name}
|
||||||
|
|
||||||
# Store the original implementation
|
# Store the original implementation
|
||||||
original_forward = getattr(LlamaAttention, "forward")
|
original_forward = getattr(attention_cls, "forward")
|
||||||
|
|
||||||
# Apply patch
|
# Apply patch
|
||||||
patch_self_attn_lora(cfg)
|
patch_self_attn_lora(cfg)
|
||||||
|
|
||||||
# Get the new forward method
|
# Get the new forward method
|
||||||
patched_forward = LlamaAttention.forward
|
patched_forward = attention_cls.forward
|
||||||
|
|
||||||
# Check the forward method was replaced
|
# Check the forward method was replaced
|
||||||
assert original_forward is not patched_forward
|
assert original_forward is not patched_forward
|
||||||
assert patched_forward.__name__ == "axolotl_attn_forward"
|
assert patched_forward.__name__ == "axolotl_attn_forward"
|
||||||
|
|
||||||
# Check original implementation was stored
|
# Check original implementation was stored
|
||||||
assert hasattr(LlamaAttention, "_original_forward")
|
assert hasattr(attention_cls, "_original_forward")
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
setattr(LlamaAttention, "forward", original_forward)
|
setattr(attention_cls, "forward", original_forward)
|
||||||
delattr(LlamaAttention, "_original_forward")
|
delattr(attention_cls, "_original_forward")
|
||||||
|
|
||||||
|
|
||||||
def test_swiglu_mlp_integration(small_llama_model):
|
def test_swiglu_mlp_integration(small_llama_model):
|
||||||
@@ -413,3 +425,42 @@ def test_kernel_training_integration():
|
|||||||
# Verify correct activation function
|
# Verify correct activation function
|
||||||
layer = model.model.model.layers[0]
|
layer = model.model.model.layers[0]
|
||||||
assert layer.mlp.forward.__func__ is apply_lora_mlp_swiglu
|
assert layer.mlp.forward.__func__ is apply_lora_mlp_swiglu
|
||||||
|
|
||||||
|
|
||||||
|
def test_kernel_training_integration_auto_enable(temp_dir):
|
||||||
|
"""Test model loading with auto-enabled kernel patches."""
|
||||||
|
# Create minimal config without explicitly setting kernel options
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"tokenizer_config": "HuggingFaceTB/SmolLM2-135M",
|
||||||
|
"learning_rate": 0.000001,
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"micro_batch_size": 1,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"adapter": "lora",
|
||||||
|
"lora_r": 8,
|
||||||
|
"lora_alpha": 16,
|
||||||
|
"lora_dropout": 0.0,
|
||||||
|
"lora_target_linear": True,
|
||||||
|
"sequence_len": 1024,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Write cfg to yaml file
|
||||||
|
path = Path(temp_dir) / "config.yaml"
|
||||||
|
with open(path, "w", encoding="utf-8") as fout:
|
||||||
|
fout.write(yaml.dump(cfg.to_dict(), Dumper=yaml.Dumper))
|
||||||
|
|
||||||
|
# Load config
|
||||||
|
cfg = load_cfg(str(path))
|
||||||
|
|
||||||
|
# Verify kernel options were auto-enabled in the config
|
||||||
|
assert cfg.lora_mlp_kernel is True
|
||||||
|
assert cfg.lora_qkv_kernel is True
|
||||||
|
assert cfg.lora_o_kernel is True
|
||||||
|
|||||||
65
tests/e2e/test_evaluate.py
Normal file
65
tests/e2e/test_evaluate.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
"""E2E smoke test for evaluate CLI command"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from accelerate.test_utils import execute_subprocess_async
|
||||||
|
from transformers.testing_utils import get_torch_dist_unique_port
|
||||||
|
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
|
|
||||||
|
|
||||||
|
class TestE2eEvaluate:
|
||||||
|
"""Test cases for evaluate CLI"""
|
||||||
|
|
||||||
|
def test_evaluate(self, temp_dir):
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "JackFram/llama-68m",
|
||||||
|
"tokenizer_type": "LlamaTokenizer",
|
||||||
|
"sequence_len": 1024,
|
||||||
|
"val_set_size": 0.02,
|
||||||
|
"special_tokens": {
|
||||||
|
"unk_token": "<unk>",
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"eos_token": "</s>",
|
||||||
|
},
|
||||||
|
"datasets": [
|
||||||
|
{
|
||||||
|
"path": "mhenrichsen/alpaca_2k_test",
|
||||||
|
"type": "alpaca",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"num_epochs": 1,
|
||||||
|
"micro_batch_size": 8,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"output_dir": temp_dir,
|
||||||
|
"learning_rate": 0.00001,
|
||||||
|
"optimizer": "adamw_torch_fused",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"max_steps": 20,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# write cfg to yaml file
|
||||||
|
Path(temp_dir).mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(Path(temp_dir) / "config.yaml", "w", encoding="utf-8") as fout:
|
||||||
|
fout.write(yaml.dump(cfg.to_dict(), Dumper=yaml.Dumper))
|
||||||
|
|
||||||
|
execute_subprocess_async(
|
||||||
|
[
|
||||||
|
"accelerate",
|
||||||
|
"launch",
|
||||||
|
"--num-processes",
|
||||||
|
"2",
|
||||||
|
"--main_process_port",
|
||||||
|
f"{get_torch_dist_unique_port()}",
|
||||||
|
"-m",
|
||||||
|
"axolotl.cli.evaluate",
|
||||||
|
str(Path(temp_dir) / "config.yaml"),
|
||||||
|
]
|
||||||
|
)
|
||||||
@@ -105,7 +105,25 @@ def require_vllm(test_case):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
return unittest.skipUnless(
|
return unittest.skipUnless(
|
||||||
is_vllm_installed(), "test requires a vllm to be installed"
|
is_vllm_installed(), "test requires vllm to be installed"
|
||||||
|
)(test_case)
|
||||||
|
|
||||||
|
|
||||||
|
def require_llmcompressor(test_case):
|
||||||
|
"""
|
||||||
|
Decorator marking a test that requires a llmcompressor to be installed
|
||||||
|
"""
|
||||||
|
|
||||||
|
def is_llmcompressor_installed():
|
||||||
|
try:
|
||||||
|
import llmcompressor # pylint: disable=unused-import # noqa: F401
|
||||||
|
|
||||||
|
return True
|
||||||
|
except ImportError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return unittest.skipUnless(
|
||||||
|
is_llmcompressor_installed(), "test requires llmcompressor to be installed"
|
||||||
)(test_case)
|
)(test_case)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -648,7 +648,7 @@ class TestValidation(BaseValidation):
|
|||||||
DictDefault(
|
DictDefault(
|
||||||
{
|
{
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"pad_to_sequence_len": None,
|
"pad_to_sequence_len": False,
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -662,6 +662,26 @@ class TestValidation(BaseValidation):
|
|||||||
for record in self._caplog.records
|
for record in self._caplog.records
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_packing_autoset(self, minimal_cfg):
|
||||||
|
cfg = (
|
||||||
|
DictDefault(
|
||||||
|
{
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": None,
|
||||||
|
"flash_attention": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
| minimal_cfg
|
||||||
|
)
|
||||||
|
with self._caplog.at_level(logging.INFO):
|
||||||
|
cfg = validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"Setting `pad_to_sequence_len: true` to prevent memory leaks when sample_packing"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
assert cfg.pad_to_sequence_len is True
|
||||||
|
|
||||||
def test_merge_lora_no_bf16_fail(self, minimal_cfg):
|
def test_merge_lora_no_bf16_fail(self, minimal_cfg):
|
||||||
"""
|
"""
|
||||||
This is assumed to be run on a CPU machine, so bf16 is not supported.
|
This is assumed to be run on a CPU machine, so bf16 is not supported.
|
||||||
|
|||||||
@@ -34,7 +34,31 @@ def messages_w_reasoning_fixture():
|
|||||||
"content": "<think>lorem</think>\nwelcome",
|
"content": "<think>lorem</think>\nwelcome",
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
{
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "hello",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "<|begin_of_thought|>lorem<|end_of_thought|>\n<|begin_of_solution|>welcome\n<|end_of_solution|>",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "hello",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "<reasoning>lorem</reasoning>\nwelcome",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
},
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -83,36 +107,37 @@ class TestSplitThinking:
|
|||||||
}
|
}
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
transformed_prompt = strategy.get_conversation_thread(messages_w_reasoning[0])
|
for conversation in messages_w_reasoning:
|
||||||
assert transformed_prompt[0]["role"] == "user"
|
transformed_prompt = strategy.get_conversation_thread(conversation)
|
||||||
assert transformed_prompt[1]["role"] == "assistant"
|
assert transformed_prompt[0]["role"] == "user"
|
||||||
assert transformed_prompt[1]["reasoning_content"] == "lorem"
|
assert transformed_prompt[1]["role"] == "assistant"
|
||||||
assert transformed_prompt[1]["content"] == "welcome"
|
assert transformed_prompt[1]["reasoning_content"] == "lorem"
|
||||||
|
assert transformed_prompt[1]["content"] == "welcome"
|
||||||
|
|
||||||
res = strategy.tokenize_prompt(messages_w_reasoning[0])
|
res = strategy.tokenize_prompt(conversation)
|
||||||
input_ids = res["input_ids"]
|
input_ids = res["input_ids"]
|
||||||
# fmt: off
|
# fmt: off
|
||||||
expected_input_ids = [
|
expected_input_ids = [
|
||||||
151644, # im_start
|
151644, # im_start
|
||||||
872, # user
|
872, # user
|
||||||
198, # \n
|
198, # \n
|
||||||
14990, # hello
|
14990, # hello
|
||||||
151645, # im_end
|
151645, # im_end
|
||||||
198, # \n
|
198, # \n
|
||||||
151644, # im_start
|
151644, # im_start
|
||||||
77091, # assistant
|
77091, # assistant
|
||||||
198, # \n
|
198, # \n
|
||||||
151667, # think
|
151667, # think
|
||||||
198, # \n
|
198, # \n
|
||||||
385, 1826, # lorem
|
385, 1826, # lorem
|
||||||
198, # \n
|
198, # \n
|
||||||
151668, # /think
|
151668, # /think
|
||||||
271, # \n
|
271, # \n
|
||||||
34084, # welcome
|
34084, # welcome
|
||||||
151645, # im_end
|
151645, # im_end
|
||||||
198, # \n
|
198, # \n
|
||||||
]
|
]
|
||||||
# fmt: on
|
# fmt: on
|
||||||
assert (
|
assert (
|
||||||
input_ids == expected_input_ids
|
input_ids == expected_input_ids
|
||||||
), f"Input IDs mismatch: {input_ids} != {expected_input_ids}"
|
), f"Input IDs mismatch: {input_ids} != {expected_input_ids}"
|
||||||
|
|||||||
Reference in New Issue
Block a user