Compare commits
22 Commits
fsdp-fft
...
device-mes
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3ade0b81db | ||
|
|
756a34f0fe | ||
|
|
198f7cd893 | ||
|
|
fefa95e350 | ||
|
|
b33dc07a77 | ||
|
|
dcbff16983 | ||
|
|
2f8037fee6 | ||
|
|
de4ea2d1f2 | ||
|
|
7ed92e61c2 | ||
|
|
9caa3eb699 | ||
|
|
5b0b774e38 | ||
|
|
c3fc529bfc | ||
|
|
957c956f89 | ||
|
|
f07802f9fa | ||
|
|
9f917245f6 | ||
|
|
649c19aba3 | ||
|
|
5aac4bc284 | ||
|
|
e29931259b | ||
|
|
b1d2921222 | ||
|
|
803fed3e90 | ||
|
|
68a3c7678a | ||
|
|
f18925fb4b |
8
.github/workflows/multi-gpu-e2e.yml
vendored
8
.github/workflows/multi-gpu-e2e.yml
vendored
@@ -18,6 +18,13 @@ jobs:
|
||||
pytorch: 2.3.1
|
||||
axolotl_extras:
|
||||
num_gpus: 2
|
||||
- cuda: 121
|
||||
cuda_version: 12.1.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.3.1
|
||||
axolotl_extras:
|
||||
num_gpus: 2
|
||||
nightly_build: "true"
|
||||
runs-on: [self-hosted, modal]
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
@@ -39,6 +46,7 @@ jobs:
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.multigpu
|
||||
|
||||
116
.github/workflows/tests-nightly.yml
vendored
Normal file
116
.github/workflows/tests-nightly.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Tests Nightly against upstream main
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Runs at 00:00 UTC every day
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
name: pre-commit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: 'pip' # caching pip dependencies
|
||||
- uses: pre-commit/action@v3.0.0
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
|
||||
pytest:
|
||||
name: PyTest
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python_version: ["3.10", "3.11"]
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python_version }}
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Update requirements.txt
|
||||
run: |
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt
|
||||
sed -i 's#^bitsandbytes.*#bitsandbytes @ git+https://github.com/bitsandbytes-foundation/bitsandbytes.git@main#' requirements.txt
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip3 install --upgrade pip
|
||||
pip3 install --upgrade packaging
|
||||
pip3 install -U -e .
|
||||
pip3 install -r requirements-tests.txt
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
pytest --ignore=tests/e2e/ tests/
|
||||
|
||||
- name: cleanup pip cache
|
||||
run: |
|
||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||
|
||||
docker-e2e-tests:
|
||||
if: github.repository_owner == 'axolotl-ai-cloud'
|
||||
# this job needs to be run on self-hosted GPU runners...
|
||||
runs-on: [self-hosted, modal]
|
||||
timeout-minutes: 60
|
||||
needs: [pre-commit, pytest]
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- cuda: 121
|
||||
cuda_version: 12.1.1
|
||||
python_version: "3.10"
|
||||
pytorch: 2.3.1
|
||||
num_gpus: 1
|
||||
axolotl_extras: mamba-ssm
|
||||
nightly_build: "true"
|
||||
- cuda: 121
|
||||
cuda_version: 12.1.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.3.1
|
||||
num_gpus: 1
|
||||
axolotl_extras: mamba-ssm
|
||||
nightly_build: "true"
|
||||
- cuda: 124
|
||||
cuda_version: 12.4.1
|
||||
python_version: "3.11"
|
||||
pytorch: 2.4.0
|
||||
num_gpus: 1
|
||||
axolotl_extras:
|
||||
nightly_build: "true"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install Modal
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install modal==0.63.64 jinja2
|
||||
- name: Update env vars
|
||||
run: |
|
||||
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
||||
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
||||
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
||||
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
||||
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
||||
echo "NIGHTLY_BUILD=${{ matrix.nightly_build }}" >> $GITHUB_ENV
|
||||
- name: Run tests job on Modal
|
||||
run: |
|
||||
modal run cicd.tests
|
||||
81
README.md
81
README.md
@@ -1,5 +1,9 @@
|
||||
# Axolotl
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
Axolotl is a tool designed to streamline the fine-tuning of various AI models, offering support for multiple configurations and architectures.
|
||||
|
||||
Features:
|
||||
@@ -22,39 +26,49 @@ Features:
|
||||
<td>
|
||||
|
||||
## Table of Contents
|
||||
- [Introduction](#axolotl)
|
||||
- [Supported Features](#axolotl-supports)
|
||||
- [Quickstart](#quickstart-)
|
||||
- [Environment](#environment)
|
||||
- [Docker](#docker)
|
||||
- [Conda/Pip venv](#condapip-venv)
|
||||
- [Cloud GPU](#cloud-gpu) - Latitude.sh, JarvisLabs, RunPod
|
||||
- [Bare Metal Cloud GPU](#bare-metal-cloud-gpu)
|
||||
- [Windows](#windows)
|
||||
- [Mac](#mac)
|
||||
- [Google Colab](#google-colab)
|
||||
- [Launching on public clouds via SkyPilot](#launching-on-public-clouds-via-skypilot)
|
||||
- [Launching on public clouds via dstack](#launching-on-public-clouds-via-dstack)
|
||||
- [Dataset](#dataset)
|
||||
- [Config](#config)
|
||||
- [Train](#train)
|
||||
- [Inference](#inference-playground)
|
||||
- [Merge LORA to Base](#merge-lora-to-base)
|
||||
- [Special Tokens](#special-tokens)
|
||||
- [All Config Options](#all-config-options)
|
||||
- Advanced Topics
|
||||
- [Multipack](./docs/multipack.qmd)<svg width="24" height="24" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><path d="M17 13.5v6H5v-12h6m3-3h6v6m0-6-9 9" class="icon_svg-stroke" stroke="#666" stroke-width="1.5" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round"></path></svg>
|
||||
- [RLHF & DPO](./docs/rlhf.qmd)<svg width="24" height="24" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><path d="M17 13.5v6H5v-12h6m3-3h6v6m0-6-9 9" class="icon_svg-stroke" stroke="#666" stroke-width="1.5" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round"></path></svg>
|
||||
- [Dataset Pre-Processing](./docs/dataset_preprocessing.qmd)<svg width="24" height="24" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><path d="M17 13.5v6H5v-12h6m3-3h6v6m0-6-9 9" class="icon_svg-stroke" stroke="#666" stroke-width="1.5" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round"></path></svg>
|
||||
- [Unsloth](./docs/unsloth.qmd)<svg width="24" height="24" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><path d="M17 13.5v6H5v-12h6m3-3h6v6m0-6-9 9" class="icon_svg-stroke" stroke="#666" stroke-width="1.5" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round"></path></svg>
|
||||
- [Common Errors](#common-errors-)
|
||||
- [Tokenization Mismatch b/w Training & Inference](#tokenization-mismatch-bw-inference--training)
|
||||
- [Debugging Axolotl](#debugging-axolotl)
|
||||
- [Need Help?](#need-help-)
|
||||
- [Badge](#badge-)
|
||||
- [Community Showcase](#community-showcase)
|
||||
- [Contributing](#contributing-)
|
||||
- [Sponsors](#sponsors-)
|
||||
- [Axolotl](#axolotl)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Axolotl supports](#axolotl-supports)
|
||||
- [Quickstart ⚡](#quickstart-)
|
||||
- [Usage](#usage)
|
||||
- [Advanced Setup](#advanced-setup)
|
||||
- [Environment](#environment)
|
||||
- [Docker](#docker)
|
||||
- [Conda/Pip venv](#condapip-venv)
|
||||
- [Cloud GPU](#cloud-gpu)
|
||||
- [Bare Metal Cloud GPU](#bare-metal-cloud-gpu)
|
||||
- [LambdaLabs](#lambdalabs)
|
||||
- [GCP](#gcp)
|
||||
- [Windows](#windows)
|
||||
- [Mac](#mac)
|
||||
- [Google Colab](#google-colab)
|
||||
- [Launching on public clouds via SkyPilot](#launching-on-public-clouds-via-skypilot)
|
||||
- [Launching on public clouds via dstack](#launching-on-public-clouds-via-dstack)
|
||||
- [Dataset](#dataset)
|
||||
- [Config](#config)
|
||||
- [All Config Options](#all-config-options)
|
||||
- [Train](#train)
|
||||
- [Preprocess dataset](#preprocess-dataset)
|
||||
- [Multi-GPU](#multi-gpu)
|
||||
- [DeepSpeed](#deepspeed)
|
||||
- [FSDP](#fsdp)
|
||||
- [FSDP + QLoRA](#fsdp--qlora)
|
||||
- [Weights \& Biases Logging](#weights--biases-logging)
|
||||
- [Special Tokens](#special-tokens)
|
||||
- [Inference Playground](#inference-playground)
|
||||
- [Merge LORA to base](#merge-lora-to-base)
|
||||
- [Common Errors 🧰](#common-errors-)
|
||||
- [Tokenization Mismatch b/w Inference \& Training](#tokenization-mismatch-bw-inference--training)
|
||||
- [Debugging Axolotl](#debugging-axolotl)
|
||||
- [Need help? 🙋](#need-help-)
|
||||
- [Badge ❤🏷️](#badge-️)
|
||||
- [Community Showcase](#community-showcase)
|
||||
- [Contributing 🤝](#contributing-)
|
||||
- [Sponsors 🤝❤](#sponsors-)
|
||||
- [💎 Diamond Sponsors - Contact directly](#-diamond-sponsors---contact-directly)
|
||||
- [🥇 Gold Sponsors - $5000/mo](#-gold-sponsors---5000mo)
|
||||
- [🥈 Silver Sponsors - $1000/mo](#-silver-sponsors---1000mo)
|
||||
- [🥉 Bronze Sponsors - $500/mo](#-bronze-sponsors---500mo)
|
||||
|
||||
</td>
|
||||
<td>
|
||||
@@ -96,6 +110,7 @@ Features:
|
||||
| RWKV | ✅ | ❓ | ❓ | ❓ | ❓ | ❓ | ❓ |
|
||||
| Qwen | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
||||
| Gemma | ✅ | ✅ | ✅ | ❓ | ❓ | ✅ | ❓ |
|
||||
| Jamba | ✅ | ✅ | ✅ | ❓ | ❓ | ✅ | ❓ |
|
||||
|
||||
✅: supported
|
||||
❌: not supported
|
||||
|
||||
@@ -8,6 +8,7 @@ ENV BNB_CUDA_VERSION="{{ CUDA }}"
|
||||
ENV PYTORCH_VERSION="{{ PYTORCH_VERSION }}"
|
||||
ENV GITHUB_REF="{{ GITHUB_REF }}"
|
||||
ENV GITHUB_SHA="{{ GITHUB_SHA }}"
|
||||
ENV NIGHTLY_BUILD="{{ NIGHTLY_BUILD }}"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
|
||||
@@ -23,6 +24,13 @@ RUN git fetch origin +$GITHUB_REF && \
|
||||
|
||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||
RUN pip install causal_conv1d
|
||||
RUN if [ "$NIGHTLY_BUILD" = "true" ] ; then \
|
||||
sed -i 's#^transformers.*#transformers @ git+https://github.com/huggingface/transformers.git@main#' requirements.txt; \
|
||||
sed -i 's#^peft.*#peft @ git+https://github.com/huggingface/peft.git@main#' requirements.txt; \
|
||||
sed -i 's#^accelerate.*#accelerate @ git+https://github.com/huggingface/accelerate.git@main#' requirements.txt; \
|
||||
sed -i 's#^bitsandbytes.*#bitsandbytes @ git+https://github.com/bitsandbytes-foundation/bitsandbytes.git@main#' requirements.txt; \
|
||||
fi
|
||||
|
||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||
pip install -e .[deepspeed,flash-attn,optimizers,$AXOLOTL_EXTRAS] $AXOLOTL_ARGS; \
|
||||
else \
|
||||
|
||||
@@ -28,6 +28,7 @@ df_args = {
|
||||
"CUDA": os.environ.get("CUDA", "121"),
|
||||
"GITHUB_REF": os.environ.get("GITHUB_REF", "refs/heads/main"),
|
||||
"GITHUB_SHA": os.environ.get("GITHUB_SHA", ""),
|
||||
"NIGHTLY_BUILD": os.environ.get("NIGHTLY_BUILD", ""),
|
||||
}
|
||||
|
||||
dockerfile_contents = df_template.render(**df_args)
|
||||
|
||||
@@ -34,7 +34,7 @@ unsloth_lora_o: true
|
||||
```
|
||||
|
||||
These options are composable and can be used with multi-gpu finetuning
|
||||
```
|
||||
```yaml
|
||||
unsloth_cross_entropy_loss: true
|
||||
unsloth_rms_norm: true
|
||||
unsloth_rope: true
|
||||
|
||||
@@ -6,5 +6,5 @@
|
||||
- ✅ qlora w/ deepspeed Zero-3 needs at least 2x GPUs and 67GiB VRAM (wtf?)
|
||||
- ✅ qlora single-gpu, ~51GiB VRAM
|
||||
- ✅ multipack
|
||||
- ❓ FSDP
|
||||
- ✅ FSDP
|
||||
- ❓ 8-bit LoRA
|
||||
|
||||
61
examples/jamba/qlora_fsdp_large.yaml
Normal file
61
examples/jamba/qlora_fsdp_large.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
base_model: ai21labs/AI21-Jamba-1.5-Large
|
||||
tokenizer_type: AutoTokenizer
|
||||
|
||||
load_in_4bit: true
|
||||
strict: false
|
||||
use_tensorboard: true
|
||||
datasets:
|
||||
- path: cgato/SlimOrcaDedupCleaned
|
||||
type: chat_template
|
||||
chat_template: jamba
|
||||
drop_system_message: true
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.0
|
||||
output_dir: jamba-large-fsdp-qlora-ft
|
||||
save_safetensors: true
|
||||
adapter: qlora
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
pad_to_sequence_len: true
|
||||
|
||||
lora_r: 16
|
||||
lora_alpha: 16
|
||||
lora_dropout: 0.05
|
||||
lora_target_modules: [down_proj,gate_proj,in_proj,k_proj,o_proj,out_proj,q_proj,up_proj,v_proj,x_proj]
|
||||
lora_target_linear: false
|
||||
|
||||
gradient_accumulation_steps: 4
|
||||
micro_batch_size: 1
|
||||
num_epochs: 2
|
||||
optimizer: adamw_torch
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.00001
|
||||
|
||||
train_on_inputs: false
|
||||
group_by_length: false
|
||||
bf16: true
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: true
|
||||
logging_steps: 1
|
||||
flash_attention: true
|
||||
|
||||
warmup_steps: 10
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
fsdp:
|
||||
- full_shard
|
||||
- auto_wrap
|
||||
fsdp_config:
|
||||
fsdp_limit_all_gathers: true
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_offload_params: false
|
||||
fsdp_use_orig_params: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_transformer_layer_cls_to_wrap: JambaAttentionDecoderLayer,JambaMambaDecoderLayer
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
62
examples/llama-3/fft-4b-fsdp-tp.yaml
Normal file
62
examples/llama-3/fft-4b-fsdp-tp.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
base_model: nvidia/Llama-3.1-Minitron-4B-Width-Base
|
||||
model_type: LlamaForCausalLM
|
||||
tokenizer_type: AutoTokenizer
|
||||
|
||||
load_in_8bit: false
|
||||
load_in_4bit: false
|
||||
strict: false
|
||||
|
||||
datasets:
|
||||
- path: mlabonne/FineTome-100k
|
||||
type: chat_template
|
||||
split: train
|
||||
train_on_eos: turn
|
||||
dataset_prepared_path: last_run_prepared
|
||||
val_set_size: 0.0
|
||||
output_dir: ./outputs/out
|
||||
|
||||
sequence_len: 2048
|
||||
sample_packing: true
|
||||
pad_to_sequence_len: true
|
||||
|
||||
wandb_project: device_mesh-test
|
||||
wandb_entity: axolotl-ai
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
wandb_log_model:
|
||||
|
||||
gradient_accumulation_steps: 1
|
||||
micro_batch_size: 4
|
||||
num_epochs: 1
|
||||
optimizer: adamw_torch
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 2e-5
|
||||
|
||||
train_on_inputs: false
|
||||
group_by_length: true
|
||||
bf16: true
|
||||
fp16:
|
||||
tf32: true
|
||||
|
||||
gradient_checkpointing: true
|
||||
gradient_checkpointing_kwargs:
|
||||
use_reentrant: false
|
||||
early_stopping_patience:
|
||||
resume_from_checkpoint:
|
||||
logging_steps: 1
|
||||
xformers_attention:
|
||||
flash_attention: true
|
||||
eager_attention:
|
||||
|
||||
warmup_steps: 100
|
||||
evals_per_epoch: 1
|
||||
saves_per_epoch: 1
|
||||
weight_decay: 0.0
|
||||
fsdp:
|
||||
- auto_wrap
|
||||
fsdp_config:
|
||||
fsdp_use_orig_params: true
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer
|
||||
special_tokens:
|
||||
pad_token: <|end_of_text|>
|
||||
@@ -72,4 +72,5 @@ fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_transformer_layer_cls_to_wrap: Qwen2DecoderLayer
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
special_tokens:
|
||||
|
||||
@@ -9,9 +9,9 @@ strict: false
|
||||
|
||||
max_steps: 200
|
||||
pretraining_dataset:
|
||||
path: c4
|
||||
name: en
|
||||
type: pretrain
|
||||
- path: allenai/c4
|
||||
name: en
|
||||
type: pretrain
|
||||
dataset_prepared_path:
|
||||
val_set_size: 0.0
|
||||
output_dir: ./outputs/model-out
|
||||
|
||||
@@ -21,11 +21,11 @@ optimum==1.16.2
|
||||
hf_transfer
|
||||
colorama
|
||||
numba
|
||||
numpy>=1.24.4
|
||||
numpy>=1.24.4,<=2.0.1
|
||||
# qlora things
|
||||
evaluate==0.4.1
|
||||
scipy
|
||||
scikit-learn==1.2.2
|
||||
scikit-learn==1.4.2
|
||||
pynvml
|
||||
art
|
||||
fschat @ git+https://github.com/lm-sys/FastChat.git@27a05b04a35510afb1d767ae7e5990cbd278f8fe
|
||||
|
||||
2
setup.py
2
setup.py
@@ -80,7 +80,7 @@ setup(
|
||||
dependency_links=dependency_links,
|
||||
extras_require={
|
||||
"flash-attn": [
|
||||
"flash-attn==2.6.2",
|
||||
"flash-attn==2.6.3",
|
||||
],
|
||||
"fused-dense-lib": [
|
||||
"fused-dense-lib @ git+https://github.com/Dao-AILab/flash-attention@v2.6.2#subdirectory=csrc/fused_dense_lib",
|
||||
|
||||
204
src/axolotl/cli/merge_sharded_fsdp_weights.py
Normal file
204
src/axolotl/cli/merge_sharded_fsdp_weights.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""
|
||||
This module provides a CLI to merge sharded FSDP model checkpoints into a single combined checkpoint
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Dict, Union
|
||||
|
||||
import fire
|
||||
import torch
|
||||
import torch.distributed.checkpoint as dist_cp
|
||||
import torch.distributed.checkpoint.format_utils as dist_cp_format_utils
|
||||
import transformers
|
||||
from accelerate.utils import (
|
||||
SAFE_WEIGHTS_INDEX_NAME,
|
||||
SAFE_WEIGHTS_NAME,
|
||||
WEIGHTS_INDEX_NAME,
|
||||
WEIGHTS_NAME,
|
||||
is_torch_version,
|
||||
)
|
||||
from dotenv import load_dotenv
|
||||
from huggingface_hub import split_torch_state_dict_into_shards
|
||||
from safetensors.torch import save_file as safe_save_file
|
||||
from torch.distributed.checkpoint.format_utils import _EmptyStateDictLoadPlanner
|
||||
|
||||
from axolotl.cli import load_cfg, print_axolotl_text_art
|
||||
from axolotl.common.cli import TrainerCliArgs
|
||||
|
||||
LOG = logging.getLogger("axolotl.cli.merge_sharded_fsdp_weights")
|
||||
|
||||
|
||||
class BFloat16CastPlanner(_EmptyStateDictLoadPlanner):
|
||||
"""
|
||||
A custom planner to cast tensors to bfloat16 on the fly during loading.
|
||||
"""
|
||||
|
||||
def commit_tensor(self, read_item, tensor): # pylint: disable=unused-argument
|
||||
tensor.copy_(tensor.to(torch.bfloat16))
|
||||
|
||||
|
||||
def _distributed_checkpoint_to_merged_weights(
|
||||
checkpoint_dir: Union[str, Path],
|
||||
save_path: str,
|
||||
safe_serialization: bool = False,
|
||||
max_shard_size: str = "5GB",
|
||||
):
|
||||
"""
|
||||
Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save`
|
||||
|
||||
Will save under `save_path` as either `model.safetensors` or `pytorch_model.bin`.
|
||||
"""
|
||||
|
||||
state_dict: Dict = {}
|
||||
save_path_ = Path(save_path)
|
||||
save_path_.mkdir(exist_ok=True)
|
||||
dist_cp_format_utils._load_state_dict( # pylint: disable=protected-access
|
||||
state_dict,
|
||||
storage_reader=dist_cp.FileSystemReader(checkpoint_dir),
|
||||
planner=BFloat16CastPlanner(), # pylint: disable=protected-access
|
||||
no_dist=True,
|
||||
)
|
||||
|
||||
# To handle if state is a dict like {model: {...}}
|
||||
if len(state_dict.keys()) == 1:
|
||||
state_dict = state_dict[list(state_dict)[0]]
|
||||
|
||||
# Ensure all tensors are in bfloat16
|
||||
for key, value in state_dict.items():
|
||||
if isinstance(value, torch.Tensor) and value.dtype != torch.bfloat16:
|
||||
state_dict[key] = value.to(torch.bfloat16)
|
||||
|
||||
weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
|
||||
|
||||
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(
|
||||
".safetensors", "{suffix}.safetensors"
|
||||
)
|
||||
state_dict_split = split_torch_state_dict_into_shards(
|
||||
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
|
||||
)
|
||||
# Save index if sharded
|
||||
index = None
|
||||
if state_dict_split.is_sharded:
|
||||
index = {
|
||||
"metadata": state_dict_split.metadata,
|
||||
"weight_map": state_dict_split.tensor_to_filename,
|
||||
}
|
||||
|
||||
# Save the model
|
||||
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
||||
|
||||
for shard_file, tensors in filename_to_tensors:
|
||||
shard = {tensor: state_dict[tensor] for tensor in tensors}
|
||||
|
||||
if safe_serialization:
|
||||
safe_save_file(
|
||||
shard, os.path.join(save_path_, shard_file), metadata={"format": "pt"}
|
||||
)
|
||||
else:
|
||||
torch.save(shard, os.path.join(save_path_, shard_file))
|
||||
|
||||
if index is not None:
|
||||
save_index_file = (
|
||||
SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
|
||||
)
|
||||
save_index_file = os.path.join(save_path_, save_index_file)
|
||||
# Save the index as well
|
||||
with open(save_index_file, "w", encoding="utf-8") as fout:
|
||||
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
||||
fout.write(content)
|
||||
|
||||
return save_path_
|
||||
|
||||
|
||||
def merge_fsdp_weights(
|
||||
checkpoint_dir: str,
|
||||
output_path: str,
|
||||
safe_serialization: bool = False,
|
||||
remove_checkpoint_dir: bool = False,
|
||||
):
|
||||
"""
|
||||
Merge the weights from sharded FSDP model checkpoints into a single combined checkpoint. Should be used if
|
||||
`SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}/model.safetensors` if
|
||||
`safe_serialization` else `pytorch_model.bin`.
|
||||
|
||||
Note: this is a CPU-bound process.
|
||||
|
||||
Args:
|
||||
checkpoint_dir (`str`):
|
||||
The directory containing the FSDP checkpoints (can be either the model or optimizer).
|
||||
output_path (`str`):
|
||||
The path to save the merged checkpoint.
|
||||
safe_serialization (`bool`, *optional*, defaults to `True`):
|
||||
Whether to save the merged weights with safetensors (recommended).
|
||||
remove_checkpoint_dir (`bool`, *optional*, defaults to `False`):
|
||||
Whether to remove the checkpoint directory after merging.
|
||||
"""
|
||||
checkpoint_dir_ = Path(checkpoint_dir)
|
||||
from accelerate.state import PartialState
|
||||
|
||||
if not is_torch_version(">=", "2.3.0"):
|
||||
raise ValueError("`merge_fsdp_weights` requires PyTorch >= 2.3.0`")
|
||||
|
||||
# Verify that the checkpoint directory exists
|
||||
if not checkpoint_dir_.exists():
|
||||
model_path_exists = (checkpoint_dir_ / "pytorch_model_fsdp_0").exists()
|
||||
optimizer_path_exists = (checkpoint_dir_ / "optimizer_0").exists()
|
||||
err = f"Tried to load from {checkpoint_dir_} but couldn't find a valid metadata file."
|
||||
if model_path_exists and optimizer_path_exists:
|
||||
err += (
|
||||
" However, potential model and optimizer checkpoint directories exist."
|
||||
)
|
||||
err += f"Please pass in either {checkpoint_dir_}/pytorch_model_fsdp_0 or {checkpoint_dir_}/optimizer_0"
|
||||
err += "instead."
|
||||
elif model_path_exists:
|
||||
err += " However, a potential model checkpoint directory exists."
|
||||
err += (
|
||||
f"Please try passing in {checkpoint_dir_}/pytorch_model_fsdp_0 instead."
|
||||
)
|
||||
elif optimizer_path_exists:
|
||||
err += " However, a potential optimizer checkpoint directory exists."
|
||||
err += f"Please try passing in {checkpoint_dir_}/optimizer_0 instead."
|
||||
raise ValueError(err)
|
||||
|
||||
# To setup `save` to work
|
||||
state = PartialState()
|
||||
if state.is_main_process:
|
||||
LOG.info(f"Merging FSDP weights from {checkpoint_dir_}")
|
||||
save_path = _distributed_checkpoint_to_merged_weights(
|
||||
checkpoint_dir_, output_path, safe_serialization
|
||||
)
|
||||
LOG.info(f"Successfully merged FSDP weights and saved to {save_path}")
|
||||
if remove_checkpoint_dir:
|
||||
LOG.info(f"Removing old checkpoint directory {checkpoint_dir_}")
|
||||
shutil.rmtree(checkpoint_dir_)
|
||||
state.wait_for_everyone()
|
||||
|
||||
|
||||
def do_cli(config: Path = Path("examples/"), **kwargs):
|
||||
# pylint: disable=duplicate-code
|
||||
print_axolotl_text_art()
|
||||
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||
return_remaining_strings=True
|
||||
)
|
||||
parsed_cli_args.merge_lora = True
|
||||
|
||||
parsed_cfg = load_cfg(
|
||||
config,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
fsdp_dir = Path(parsed_cfg.output_dir) / "pytorch_model_fsdp_0"
|
||||
merge_fsdp_weights(
|
||||
checkpoint_dir=str(fsdp_dir),
|
||||
output_path=str(Path(parsed_cfg.output_dir) / "merged"),
|
||||
safe_serialization=True,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
fire.Fire(do_cli)
|
||||
@@ -82,7 +82,14 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
||||
# "copying from a non-meta parameter in the checkpoint to a meta parameter in the current model"
|
||||
warnings.simplefilter("ignore")
|
||||
with init_empty_weights(include_buffers=True):
|
||||
AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
||||
# fmt: off
|
||||
try:
|
||||
AutoModelForCausalLM.from_pretrained(
|
||||
model_name, trust_remote_code=True
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught,unused-variable # nosec B110 # noqa F841
|
||||
pass
|
||||
# fmt: on
|
||||
|
||||
LOG.info(
|
||||
Fore.GREEN
|
||||
|
||||
@@ -20,6 +20,14 @@ from typing import Dict, List, Literal, Optional, Type, Union
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import Dataset
|
||||
from torch.distributed._tensor import Replicate, Shard
|
||||
from torch.distributed.tensor.parallel import (
|
||||
ColwiseParallel,
|
||||
PrepareModuleInput,
|
||||
RowwiseParallel,
|
||||
SequenceParallel,
|
||||
parallelize_module,
|
||||
)
|
||||
from torch.optim.lr_scheduler import OneCycleLR
|
||||
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
|
||||
from transformers import (
|
||||
@@ -1233,6 +1241,20 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
||||
training_arguments_kwargs["fsdp_config"] = {
|
||||
k.lstrip("fsdp_"): v for k, v in dict(self.cfg.fsdp_config).items()
|
||||
}
|
||||
# FIXME: hardcoded testing sizes
|
||||
tp_size = int(os.environ.get("FSDP_TP_SIZE", 0))
|
||||
if tp_size > 0:
|
||||
world_size = int(os.environ.get("WORLD_SIZE", 1))
|
||||
dp_size = world_size // tp_size
|
||||
from torch.distributed.device_mesh import init_device_mesh
|
||||
|
||||
device_mesh = init_device_mesh(
|
||||
"cuda", (dp_size, tp_size), mesh_dim_names=("dp", "tp")
|
||||
)
|
||||
dp_mesh = device_mesh["dp"]
|
||||
tp_mesh = device_mesh["tp"]
|
||||
training_arguments_kwargs["fsdp_config"]["device_mesh"] = dp_mesh
|
||||
self.parallelize_model(tp_mesh)
|
||||
|
||||
if self.cfg.adapter == "qlora":
|
||||
training_arguments_kwargs["qlora"] = True
|
||||
@@ -1605,6 +1627,67 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
||||
|
||||
return trainer
|
||||
|
||||
def parallelize_model(self, device_mesh, loss_parallel=False):
|
||||
# FIXME hardcoded for llama
|
||||
tp_mesh = device_mesh["tp"]
|
||||
|
||||
parallelize_module(
|
||||
self.model,
|
||||
tp_mesh,
|
||||
{
|
||||
"lm_head": ColwiseParallel(
|
||||
input_layouts=Shard(1),
|
||||
output_layouts=Shard(-1) if loss_parallel else Replicate(),
|
||||
use_local_output=not loss_parallel,
|
||||
),
|
||||
},
|
||||
)
|
||||
parallelize_module(
|
||||
self.model.model,
|
||||
tp_mesh,
|
||||
{
|
||||
"embed_tokens": RowwiseParallel(
|
||||
input_layouts=Replicate(),
|
||||
output_layouts=Shard(1),
|
||||
),
|
||||
"norm": SequenceParallel(),
|
||||
},
|
||||
)
|
||||
|
||||
for _, transformer_block in enumerate(self.model.model.layers):
|
||||
layer_plan = {
|
||||
"input_layernorm": SequenceParallel(),
|
||||
"self_attn": PrepareModuleInput(
|
||||
input_layouts=(Shard(1),),
|
||||
desired_input_layouts=(Replicate()),
|
||||
),
|
||||
"self_attn.q_proj": ColwiseParallel(),
|
||||
"self_attn.k_proj": ColwiseParallel(),
|
||||
"self_attn.v_proj": ColwiseParallel(),
|
||||
"self_attn.o_proj": RowwiseParallel(output_layouts=Shard(1)),
|
||||
"post_attention_layernorm": SequenceParallel(),
|
||||
"mlp": PrepareModuleInput(
|
||||
input_layouts=(Shard(1),),
|
||||
desired_input_layouts=(Replicate(),),
|
||||
),
|
||||
"mlp.gate_proj": ColwiseParallel(),
|
||||
"mlp.up_proj": ColwiseParallel(),
|
||||
"mlp.down_proj": RowwiseParallel(output_layouts=Shard(1)),
|
||||
}
|
||||
self_attn = transformer_block.self_attn
|
||||
self_attn.num_heads = self_attn.num_heads // tp_mesh.size()
|
||||
self_attn.num_key_value_heads = (
|
||||
self_attn.num_key_value_heads // tp_mesh.size()
|
||||
)
|
||||
|
||||
# TODO need to fix self_attn.rotary_emb
|
||||
|
||||
parallelize_module(
|
||||
transformer_block,
|
||||
tp_mesh,
|
||||
layer_plan,
|
||||
)
|
||||
|
||||
def build_collator(
|
||||
self, training_args: AxolotlTrainingArguments, is_eval=False, **kwargs
|
||||
):
|
||||
@@ -1846,6 +1929,8 @@ class HFRLTrainerBuilder(TrainerBuilderBase):
|
||||
)
|
||||
if self.cfg.fsdp:
|
||||
ensure_dtype(dpo_trainer.model, dtype=self.cfg.torch_dtype)
|
||||
if self.cfg.rl in ["dpo", "ipo"] and dpo_trainer.ref_model:
|
||||
ensure_dtype(dpo_trainer.ref_model, dtype=self.cfg.torch_dtype)
|
||||
|
||||
dpo_trainer = self.hook_post_create_trainer(dpo_trainer)
|
||||
for callback in self.get_post_trainer_create_callbacks(dpo_trainer):
|
||||
|
||||
@@ -17,6 +17,7 @@ SUPPORTED_MULTIPACK_MODEL_TYPES = [
|
||||
"qwen2_moe",
|
||||
"falcon",
|
||||
"phi",
|
||||
"phi3",
|
||||
"gemma",
|
||||
"gemma2",
|
||||
"gemmoe",
|
||||
|
||||
@@ -357,7 +357,7 @@ def load(tokenizer, cfg, ds_cfg: Optional[Dict[str, Any]] = None):
|
||||
"train_on_inputs": cfg.train_on_inputs,
|
||||
"sequence_len": cfg.sequence_len,
|
||||
"roles_to_train": ds_cfg.get("roles_to_train", ["gpt", "assistant"]),
|
||||
"train_on_eos": ds_cfg.get("train_on_eos", "last"),
|
||||
"train_on_eos": ds_cfg.get("train_on_eos", "turn"),
|
||||
}
|
||||
|
||||
strategy = ChatTemplateStrategy(
|
||||
|
||||
@@ -65,8 +65,10 @@ class AlpacaPrompter(Prompter):
|
||||
self.system_format = "<|im_start|>system\n{system}<|im_end|>\n"
|
||||
elif self.prompt_style == PromptStyle.PHI.value:
|
||||
self.turn_format = "<|user|>\n{instruction}<|end|>{input}<|assistant|>"
|
||||
self.turn_no_input_format = "<|user|>\n{instruction}<|end|><|assistant|>"
|
||||
self.system_format = "<|system|>{system}\n"
|
||||
self.turn_no_input_format = (
|
||||
"<|user|>\n{instruction}<|end|>\n<|assistant|>\n"
|
||||
)
|
||||
self.system_format = "<|system|>\n{system}<|end|>\n"
|
||||
|
||||
def _build_result(self, instruction, input_text, output):
|
||||
# returns the full prompt from instruction and optional input
|
||||
|
||||
@@ -12,6 +12,7 @@ import torch
|
||||
import transformers.modelcard
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import save_fsdp_model
|
||||
from datasets import Dataset
|
||||
from peft import PeftModel
|
||||
from pkg_resources import get_distribution # type: ignore
|
||||
@@ -194,9 +195,12 @@ def train(
|
||||
if hasattr(module, "_post_training"):
|
||||
module._post_training(model, name) # pylint: disable=protected-access
|
||||
|
||||
state_dict_type = "FULL_STATE_DICT"
|
||||
if trainer.is_fsdp_enabled:
|
||||
trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT")
|
||||
LOG.info("Set FSDP state dict type to FULL_STATE_DICT for saving.")
|
||||
if cfg.fsdp_final_state_dict_type:
|
||||
state_dict_type = cfg.fsdp_final_state_dict_type
|
||||
trainer.accelerator.state.fsdp_plugin.set_state_dict_type(state_dict_type)
|
||||
LOG.info(f"Set FSDP state dict type to {state_dict_type} for saving.")
|
||||
|
||||
if cfg.relora_steps:
|
||||
if cfg.adapter == "lora" and not (cfg.load_in_4bit or cfg.load_in_8bit):
|
||||
@@ -208,7 +212,18 @@ def train(
|
||||
# TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading
|
||||
# only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
|
||||
if cfg.fsdp:
|
||||
trainer.save_model(cfg.output_dir)
|
||||
if (
|
||||
state_dict_type == "SHARDED_STATE_DICT"
|
||||
and cfg.fsdp_config.fsdp_state_dict_type == "SHARDED_STATE_DICT"
|
||||
):
|
||||
save_fsdp_model(
|
||||
trainer.accelerator.state.fsdp_plugin,
|
||||
trainer.accelerator,
|
||||
trainer.model,
|
||||
cfg.output_dir,
|
||||
)
|
||||
elif state_dict_type == "FULL_STATE_DICT":
|
||||
trainer.save_model(cfg.output_dir)
|
||||
elif cfg.deepspeed and is_deepspeed_zero3_enabled():
|
||||
# Copied over from: https://github.com/huggingface/accelerate/blob/5ae611118057232f441055f7ef9ba0b0f2b8d533/docs/source/usage_guides/deepspeed.md#saving-and-loading
|
||||
trainer.accelerator.wait_for_everyone()
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -190,6 +190,7 @@ class ChatTemplate(str, Enum):
|
||||
llama3 = "llama3" # pylint: disable=invalid-name
|
||||
phi_3 = "phi_3" # pylint: disable=invalid-name
|
||||
deepseek_v2 = "deepseek_v2" # pylint: disable=invalid-name
|
||||
jamba = "jamba" # pylint: disable=invalid-name
|
||||
|
||||
|
||||
class LoftQConfig(BaseModel):
|
||||
@@ -321,6 +322,8 @@ class ModelInputConfig(BaseModel):
|
||||
)
|
||||
trust_remote_code: Optional[bool] = None
|
||||
|
||||
model_kwargs: Optional[Dict[str, Any]] = None
|
||||
|
||||
@field_validator("trust_remote_code")
|
||||
@classmethod
|
||||
def hint_trust_remote_code(cls, trust_remote_code):
|
||||
@@ -614,6 +617,8 @@ class AxolotlInputConfig(
|
||||
flash_attn_fuse_mlp: Optional[bool] = None
|
||||
flash_optimum: Optional[bool] = None
|
||||
|
||||
eager_attention: Optional[bool] = None
|
||||
|
||||
unsloth_cross_entropy_loss: Optional[bool] = None
|
||||
unsloth_lora_mlp: Optional[bool] = None
|
||||
unsloth_lora_qkv: Optional[bool] = None
|
||||
@@ -624,6 +629,9 @@ class AxolotlInputConfig(
|
||||
deepspeed: Optional[Union[str, Dict[str, Any]]] = None
|
||||
fsdp: Optional[List[str]] = None
|
||||
fsdp_config: Optional[Dict[str, Any]] = None
|
||||
fsdp_final_state_dict_type: Optional[
|
||||
Literal["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
|
||||
] = None
|
||||
|
||||
val_set_size: Optional[float] = Field(default=0.0)
|
||||
|
||||
@@ -1144,6 +1152,20 @@ class AxolotlInputConfig(
|
||||
)
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_fsdp_sharded_state_dict_w_safetensors(cls, data):
|
||||
if (
|
||||
data.get("fsdp")
|
||||
and data.get("save_safetensors")
|
||||
and data.get("fsdp_config")
|
||||
and data["fsdp_config"].get("fsdp_state_dict_type") == "SHARDED_STATE_DICT"
|
||||
):
|
||||
raise ValueError(
|
||||
"FSDP SHARDED_STATE_DICT not compatible with save_safetensors"
|
||||
)
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_causal_lm_evals(cls, data):
|
||||
@@ -1263,6 +1285,19 @@ class AxolotlConfigWCapabilities(AxolotlInputConfig):
|
||||
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_hopper_8bit_lora(cls, data):
|
||||
is_sm_90: bool = (
|
||||
data["capabilities"]
|
||||
and data["capabilities"].get("compute_capability") == "sm_90"
|
||||
)
|
||||
if data.get("adapter") and data.get("load_in_8bit") and is_sm_90:
|
||||
# see https://github.com/bitsandbytes-foundation/bitsandbytes/issues/538#issuecomment-2262945464
|
||||
raise ValueError("8-bit LoRA is not supported on Hopper GPUs")
|
||||
|
||||
return data
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_fsdp_deepspeed(cls, data):
|
||||
|
||||
@@ -18,10 +18,10 @@ LOG = logging.getLogger("axolotl")
|
||||
|
||||
|
||||
def encode_pretraining(
|
||||
tokenizer: PreTrainedTokenizerBase, max_tokens: int, examples: List[str]
|
||||
tokenizer: PreTrainedTokenizerBase, max_tokens: int, examples: Dict[str, List]
|
||||
) -> Dict[str, List]:
|
||||
res = tokenizer(
|
||||
examples,
|
||||
examples["text"],
|
||||
truncation=True,
|
||||
max_length=max_tokens - 2,
|
||||
add_special_tokens=True,
|
||||
|
||||
@@ -544,7 +544,9 @@ def load_model(
|
||||
"bnb_4bit_quant_type": "nf4",
|
||||
"bnb_4bit_quant_storage": torch.bfloat16,
|
||||
}
|
||||
if cfg.model_config_type in ["jamba", "qwen2_moe"] and not cfg.deepspeed:
|
||||
if cfg.model_config_type in ["jamba", "qwen2_moe"] and not (
|
||||
cfg.deepspeed or cfg.fsdp
|
||||
):
|
||||
# for some reason, this causes the loss to be off by an order of magnitude
|
||||
# but deepspeed needs this still in bfloat16
|
||||
bnb_config["bnb_4bit_quant_storage"] = torch.float32
|
||||
@@ -589,16 +591,10 @@ def load_model(
|
||||
"flash_attention_2"
|
||||
)
|
||||
else:
|
||||
if model_config.model_type in SUPPORTED_MULTIPACK_MODEL_TYPES:
|
||||
model_kwargs["attn_implementation"] = "flash_attention_2"
|
||||
model_config._attn_implementation = ( # pylint: disable=protected-access
|
||||
"flash_attention_2"
|
||||
)
|
||||
else:
|
||||
model_kwargs["attn_implementation"] = "eager"
|
||||
model_config._attn_implementation = ( # pylint: disable=protected-access
|
||||
"eager"
|
||||
)
|
||||
model_kwargs["attn_implementation"] = "flash_attention_2"
|
||||
model_config._attn_implementation = ( # pylint: disable=protected-access
|
||||
"flash_attention_2"
|
||||
)
|
||||
elif cfg.sdp_attention:
|
||||
model_kwargs["attn_implementation"] = "sdpa"
|
||||
model_config._attn_implementation = "sdpa" # pylint: disable=protected-access
|
||||
@@ -655,19 +651,11 @@ def load_model(
|
||||
if "device_map" in model_kwargs:
|
||||
del model_kwargs["device_map"]
|
||||
|
||||
if cfg.fsdp and not cfg.adapter and cfg.local_rank != 0:
|
||||
with init_empty_weights():
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
config=model_config,
|
||||
**model_kwargs,
|
||||
)
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
config=model_config,
|
||||
**model_kwargs,
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
config=model_config,
|
||||
**model_kwargs,
|
||||
)
|
||||
|
||||
if cfg.flash_attention and not inference:
|
||||
from axolotl.monkeypatch.llama_attn_hijack_flash import (
|
||||
@@ -1108,9 +1096,20 @@ def load_lora(model, cfg, inference=False, config_only=False):
|
||||
|
||||
def ensure_dtype(model, dtype=torch.bfloat16):
|
||||
for name, module in model.named_modules():
|
||||
weight_mismatch = False
|
||||
bias_mismatch = False
|
||||
try:
|
||||
if module.weight.dtype != dtype:
|
||||
print(f"Converting module {name}: {module.weight.dtype} -> {dtype}")
|
||||
module.to(dtype)
|
||||
weight_mismatch = module.weight.dtype != dtype
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
bias_mismatch = module.bias.dtype != dtype
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if weight_mismatch:
|
||||
print(f"Converting module {name}.weight: {module.weight.dtype} -> {dtype}")
|
||||
if bias_mismatch:
|
||||
print(f"Converting module {name}.bias: {module.bias.dtype} -> {dtype}")
|
||||
if weight_mismatch or bias_mismatch:
|
||||
module.to(dtype)
|
||||
|
||||
@@ -390,13 +390,24 @@ def calculate_total_num_steps(cfg, train_dataset, update=True):
|
||||
return total_num_steps
|
||||
|
||||
|
||||
def setup_torch_compile_env(cfg):
|
||||
if cfg.torch_compile:
|
||||
if not cfg.torch_compile_backend:
|
||||
os.environ["ACCELERATE_DYNAMO_BACKEND"] = "INDUCTOR"
|
||||
else:
|
||||
os.environ["ACCELERATE_DYNAMO_BACKEND"] = cfg.torch_compile_backend.upper()
|
||||
|
||||
|
||||
def setup_deepspeed_env(cfg, stage=None):
|
||||
from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
|
||||
|
||||
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
||||
os.environ["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = cfg.deepspeed
|
||||
if stage:
|
||||
os.environ["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(stage)
|
||||
if stage == 3:
|
||||
os.environ["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = "true"
|
||||
HfTrainerDeepSpeedConfig(cfg.deepspeed)
|
||||
|
||||
|
||||
def setup_fsdp_envs(cfg):
|
||||
@@ -434,6 +445,8 @@ def prepare_optim_env(cfg):
|
||||
stage = deepspeed_config.get("zero_optimization", {}).get("stage", None)
|
||||
setup_deepspeed_env(cfg, stage=stage)
|
||||
|
||||
setup_torch_compile_env(cfg)
|
||||
|
||||
if (cfg.bf16 == "auto" and is_torch_bf16_gpu_available()) or cfg.bf16 is True:
|
||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "bf16"
|
||||
elif cfg.fp16:
|
||||
|
||||
98
tests/e2e/multigpu/test_qwen2.py
Normal file
98
tests/e2e/multigpu/test_qwen2.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
E2E tests for multigpu qwen2
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from accelerate.test_utils import execute_subprocess_async
|
||||
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
from ..utils import with_temp_dir
|
||||
|
||||
LOG = logging.getLogger("axolotl.tests.e2e.multigpu")
|
||||
os.environ["WANDB_DISABLED"] = "true"
|
||||
|
||||
|
||||
class TestMultiGPUQwen2(unittest.TestCase):
|
||||
"""
|
||||
Test case for Llama models using LoRA
|
||||
"""
|
||||
|
||||
@with_temp_dir
|
||||
def test_qlora_fsdp_dpo(self, temp_dir):
|
||||
# pylint: disable=duplicate-code
|
||||
cfg = DictDefault(
|
||||
{
|
||||
"base_model": "Qwen/Qwen2-1.5B",
|
||||
"load_in_4bit": True,
|
||||
"rl": "dpo",
|
||||
"chat_template": "chatml",
|
||||
"sequence_len": 2048,
|
||||
"adapter": "qlora",
|
||||
"lora_r": 8,
|
||||
"lora_alpha": 16,
|
||||
"lora_dropout": 0.05,
|
||||
"lora_target_linear": True,
|
||||
"val_set_size": 0.05,
|
||||
"datasets": [
|
||||
{
|
||||
"path": "Intel/orca_dpo_pairs",
|
||||
"split": "train",
|
||||
"type": "chatml.intel",
|
||||
},
|
||||
],
|
||||
"num_epochs": 1,
|
||||
"max_steps": 100,
|
||||
"warmup_steps": 20,
|
||||
"micro_batch_size": 4,
|
||||
"gradient_accumulation_steps": 2,
|
||||
"output_dir": temp_dir,
|
||||
"learning_rate": 0.00001,
|
||||
"optimizer": "adamw_torch",
|
||||
"lr_scheduler": "cosine",
|
||||
"flash_attention": True,
|
||||
"bf16": "auto",
|
||||
"tf32": True,
|
||||
"gradient_checkpointing": True,
|
||||
"gradient_checkpointing_kwargs": {
|
||||
"use_reentrant": False,
|
||||
},
|
||||
"fsdp": [
|
||||
"full_shard",
|
||||
"auto_wrap",
|
||||
],
|
||||
"fsdp_config": {
|
||||
"fsdp_limit_all_gathers": True,
|
||||
"fsdp_offload_params": False,
|
||||
"fsdp_sync_module_states": True,
|
||||
"fsdp_use_orig_params": False,
|
||||
"fsdp_cpu_ram_efficient_loading": False,
|
||||
"fsdp_transformer_layer_cls_to_wrap": "Qwen2DecoderLayer",
|
||||
"fsdp_state_dict_type": "FULL_STATE_DICT",
|
||||
"fsdp_auto_wrap_policy": "TRANSFORMER_BASED_WRAP",
|
||||
"fsdp_sharding_strategy": "FULL_SHARD",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
# write cfg to yaml file
|
||||
Path(temp_dir).mkdir(parents=True, exist_ok=True)
|
||||
with open(Path(temp_dir) / "config.yaml", "w", encoding="utf-8") as fout:
|
||||
fout.write(yaml.dump(cfg.to_dict(), Dumper=yaml.Dumper))
|
||||
|
||||
execute_subprocess_async(
|
||||
[
|
||||
"accelerate",
|
||||
"launch",
|
||||
"--num-processes",
|
||||
"2",
|
||||
"-m",
|
||||
"axolotl.cli.train",
|
||||
str(Path(temp_dir) / "config.yaml"),
|
||||
]
|
||||
)
|
||||
@@ -35,7 +35,7 @@ class TestEncodePretraining(unittest.TestCase):
|
||||
"hello, hello",
|
||||
]
|
||||
}
|
||||
result = encode_pretraining(self.tokenizer, self.max_tokens, examples["text"])
|
||||
result = encode_pretraining(self.tokenizer, self.max_tokens, examples)
|
||||
|
||||
self.assertEqual(len(result["input_ids"]), 3)
|
||||
|
||||
|
||||
@@ -42,6 +42,19 @@ class AlpacaPrompterTest(unittest.TestCase):
|
||||
assert "USER:" not in res
|
||||
assert "ASSISTANT:" not in res
|
||||
|
||||
def test_prompt_style_w_phi(self):
|
||||
prompter = AlpacaPrompter(prompt_style=PromptStyle.PHI.value)
|
||||
res = next(prompter.build_prompt("tell me a joke about the following"))
|
||||
assert (
|
||||
"""<|system|>
|
||||
Below is an instruction that describes a task. Write a response that appropriately completes the request.<|end|>
|
||||
<|user|>
|
||||
tell me a joke about the following<|end|>
|
||||
<|assistant|>
|
||||
"""
|
||||
== res
|
||||
)
|
||||
|
||||
def test_prompt_style_w_chat(self):
|
||||
prompter = AlpacaPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||
res = next(
|
||||
|
||||
Reference in New Issue
Block a user