Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8836986a92 |
7
.github/workflows/base.yml
vendored
7
.github/workflows/base.yml
vendored
@@ -28,12 +28,7 @@ jobs:
|
|||||||
- cuda: "118"
|
- cuda: "118"
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.1.1
|
pytorch: 2.1.0
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
|
||||||
- cuda: "121"
|
|
||||||
cuda_version: 12.1.0
|
|
||||||
python_version: "3.10"
|
|
||||||
pytorch: 2.1.1
|
|
||||||
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
torch_cuda_arch_list: "7.0 7.5 8.0 8.6 9.0+PTX"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
51
.github/workflows/main.yml
vendored
51
.github/workflows/main.yml
vendored
@@ -27,56 +27,38 @@ jobs:
|
|||||||
- cuda: 118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.1.1
|
pytorch: 2.1.0
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 121
|
|
||||||
cuda_version: 12.1.0
|
|
||||||
python_version: "3.10"
|
|
||||||
pytorch: 2.1.1
|
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
runs-on: [self-hosted, gpu, docker]
|
runs-on: [self-hosted, gpu, docker]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Docker metadata
|
- name: Docker metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: winglian/axolotl
|
images: winglian/axolotl
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
# guidance for testing before pushing: https://docs.docker.com/build/ci/github-actions/test-before-push/
|
- name: Set up Docker Buildx
|
||||||
- name: Build and export to Docker
|
uses: docker/setup-buildx-action@v2
|
||||||
uses: docker/build-push-action@v5
|
- name: Build
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
load: true
|
|
||||||
build-args: |
|
build-args: |
|
||||||
BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
|
BASE_TAG=${{ github.ref_name }}-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}
|
||||||
CUDA=${{ matrix.cuda }}
|
CUDA=${{ matrix.cuda }}
|
||||||
PYTORCH_VERSION=${{ matrix.pytorch }}
|
PYTORCH_VERSION=${{ matrix.pytorch }}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: |
|
tags: |
|
||||||
${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
||||||
${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
|
${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
- name: Unit Tests
|
|
||||||
run: |
|
|
||||||
docker run --rm ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }} pytest --ignore=tests/e2e/ /workspace/axolotl/tests/
|
|
||||||
- name: Push to Docker Hub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
run: |
|
|
||||||
docker push ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
|
||||||
latest_tag=${{ (matrix.is_latest) && format('{0}-latest', steps.metadata.outputs.tags) || '' }}
|
|
||||||
if [ -n "$latest_tag" ]; then
|
|
||||||
docker push "$latest_tag"
|
|
||||||
fi
|
|
||||||
|
|
||||||
build-axolotl-runpod:
|
build-axolotl-runpod:
|
||||||
needs: build-axolotl
|
needs: build-axolotl
|
||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
||||||
@@ -98,31 +80,26 @@ jobs:
|
|||||||
- cuda: 118
|
- cuda: 118
|
||||||
cuda_version: 11.8.0
|
cuda_version: 11.8.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.1.1
|
pytorch: 2.1.0
|
||||||
axolotl_extras:
|
|
||||||
- cuda: 121
|
|
||||||
cuda_version: 12.1.0
|
|
||||||
python_version: "3.10"
|
|
||||||
pytorch: 2.1.1
|
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
runs-on: [self-hosted, gpu, docker]
|
runs-on: [self-hosted, gpu, docker]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Docker metadata
|
- name: Docker metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: winglian/axolotl-runpod
|
images: winglian/axolotl-runpod
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
build-args: |
|
build-args: |
|
||||||
|
|||||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -71,9 +71,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip3 install --extra-index-url https://download.pytorch.org/whl/cu118 -U torch==2.0.1
|
|
||||||
pip3 uninstall -y transformers accelerate
|
pip3 uninstall -y transformers accelerate
|
||||||
pip3 install -U -e .[flash-attn,mamba-ssm]
|
pip3 install -U -e .[flash-attn]
|
||||||
pip3 install -r requirements-tests.txt
|
pip3 install -r requirements-tests.txt
|
||||||
|
|
||||||
- name: Run e2e tests
|
- name: Run e2e tests
|
||||||
|
|||||||
@@ -8,9 +8,6 @@ ignore_missing_imports = True
|
|||||||
[mypy-axolotl.monkeypatch.*]
|
[mypy-axolotl.monkeypatch.*]
|
||||||
ignore_errors = True
|
ignore_errors = True
|
||||||
|
|
||||||
[mypy-axolotl.models.mixtral.*]
|
|
||||||
ignore_errors = True
|
|
||||||
|
|
||||||
[mypy-axolotl.models.phi.*]
|
[mypy-axolotl.models.phi.*]
|
||||||
ignore_errors = True
|
ignore_errors = True
|
||||||
|
|
||||||
|
|||||||
187
README.md
187
README.md
@@ -25,10 +25,8 @@ Features:
|
|||||||
- [Installation](#installation)
|
- [Installation](#installation)
|
||||||
- [Docker](#docker)
|
- [Docker](#docker)
|
||||||
- [Conda/Pip venv](#condapip-venv)
|
- [Conda/Pip venv](#condapip-venv)
|
||||||
- [Runpod](#runpod)
|
|
||||||
- [LambdaLabs](#lambdalabs)
|
- [LambdaLabs](#lambdalabs)
|
||||||
- [Windows](#windows)
|
- [Windows](#windows)
|
||||||
- [Launching on public clouds via SkyPilot](#launching-on-public-clouds-via-skypilot)
|
|
||||||
- [Dataset](#dataset)
|
- [Dataset](#dataset)
|
||||||
- [How to Add Custom Prompts](#how-to-add-custom-prompts)
|
- [How to Add Custom Prompts](#how-to-add-custom-prompts)
|
||||||
- [How to Use Custom Pretokenized Dataset](#how-to-use-your-custom-pretokenized-dataset)
|
- [How to Use Custom Pretokenized Dataset](#how-to-use-your-custom-pretokenized-dataset)
|
||||||
@@ -36,9 +34,7 @@ Features:
|
|||||||
- [Train](#train)
|
- [Train](#train)
|
||||||
- [Inference](#inference)
|
- [Inference](#inference)
|
||||||
- [Merge LORA to Base](#merge-lora-to-base)
|
- [Merge LORA to Base](#merge-lora-to-base)
|
||||||
- [Special Tokens](#special-tokens)
|
|
||||||
- [Common Errors](#common-errors-)
|
- [Common Errors](#common-errors-)
|
||||||
- [Tokenization Mismatch b/w Training & Inference](#tokenization-mismatch-bw-inference--training)
|
|
||||||
- [Need Help?](#need-help-)
|
- [Need Help?](#need-help-)
|
||||||
- [Badge](#badge-)
|
- [Badge](#badge-)
|
||||||
- [Community Showcase](#community-showcase)
|
- [Community Showcase](#community-showcase)
|
||||||
@@ -67,21 +63,18 @@ Features:
|
|||||||
|
|
||||||
## Axolotl supports
|
## Axolotl supports
|
||||||
|
|
||||||
| | fp16/fp32 | lora | qlora | gptq | gptq w/flash attn | flash attn | xformers attn |
|
| | fp16/fp32 | lora | qlora | gptq | gptq w/flash attn | flash attn | xformers attn |
|
||||||
|-------------|:----------|:-----|-------|------|-------------------|------------|--------------|
|
|----------|:----------|:-----|-------|------|-------------------|------------|--------------|
|
||||||
| llama | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| llama | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| Mistral | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| Pythia | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
||||||
| Mixtral-MoE | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
| cerebras | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
||||||
| Pythia | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
| btlm | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
||||||
| cerebras | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
| mpt | ✅ | ❌ | ❓ | ❌ | ❌ | ❌ | ❓ |
|
||||||
| btlm | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
| falcon | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
||||||
| mpt | ✅ | ❌ | ❓ | ❌ | ❌ | ❌ | ❓ |
|
| gpt-j | ✅ | ✅ | ✅ | ❌ | ❌ | ❓ | ❓ |
|
||||||
| falcon | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❓ |
|
| XGen | ✅ | ❓ | ✅ | ❓ | ❓ | ❓ | ✅ |
|
||||||
| gpt-j | ✅ | ✅ | ✅ | ❌ | ❌ | ❓ | ❓ |
|
| phi | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
||||||
| XGen | ✅ | ❓ | ✅ | ❓ | ❓ | ❓ | ✅ |
|
| RWKV | ✅ | ❓ | ❓ | ❓ | ❓ | ❓ | ❓ |
|
||||||
| phi | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| RWKV | ✅ | ❓ | ❓ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
| Qwen | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
|
||||||
|
|
||||||
|
|
||||||
## Quickstart ⚡
|
## Quickstart ⚡
|
||||||
@@ -90,19 +83,14 @@ Get started with Axolotl in just a few steps! This quickstart guide will walk yo
|
|||||||
|
|
||||||
**Requirements**: Python >=3.9 and Pytorch >=2.0.
|
**Requirements**: Python >=3.9 and Pytorch >=2.0.
|
||||||
|
|
||||||
`pip3 install "axolotl[flash-attn,deepspeed] @ git+https://github.com/OpenAccess-AI-Collective/axolotl"`
|
|
||||||
|
|
||||||
### For developers
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging
|
pip3 install packaging
|
||||||
pip3 install -e '.[flash-attn,deepspeed]'
|
pip3 install -e '.[flash-attn,deepspeed]'
|
||||||
```
|
pip3 install -U git+https://github.com/huggingface/peft.git
|
||||||
|
|
||||||
### Usage
|
|
||||||
```bash
|
|
||||||
# finetune lora
|
# finetune lora
|
||||||
accelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml
|
accelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml
|
||||||
|
|
||||||
@@ -123,6 +111,7 @@ accelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \
|
|||||||
```bash
|
```bash
|
||||||
docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.10-cu118-2.0.1
|
docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.10-cu118-2.0.1
|
||||||
```
|
```
|
||||||
|
- `winglian/axolotl-runpod:main-latest`: for runpod or use this [direct link](https://runpod.io/gsc?template=v2ickqhz9s&ref=6i7fkpdz)
|
||||||
|
|
||||||
Or run on the current files for development:
|
Or run on the current files for development:
|
||||||
|
|
||||||
@@ -137,15 +126,13 @@ accelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \
|
|||||||
A more powerful Docker command to run would be this:
|
A more powerful Docker command to run would be this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --privileged --gpus '"all"' --shm-size 10g --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=volume,src=axolotl,target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface winglian/axolotl:main-py3.10-cu118-2.0.1
|
docker run --gpus '"all"' --rm -it --name axolotl --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --mount type=volume,src=axolotl,target=/workspace/axolotl -v ${HOME}/.cache/huggingface:/root/.cache/huggingface winglian/axolotl:main-py3.10-cu118-2.0.1
|
||||||
```
|
```
|
||||||
|
|
||||||
It additionally:
|
It additionally:
|
||||||
* Prevents memory issues when running e.g. deepspeed (e.g. you could hit SIGBUS/signal 7 error) through `--ipc` and `--ulimit` args.
|
* Prevents memory issues when running e.g. deepspeed (e.g. you could hit SIGBUS/signal 7 error) through `--ipc` and `--ulimit` args.
|
||||||
* Persists the downloaded HF data (models etc.) and your modifications to axolotl code through `--mount`/`-v` args.
|
* Persists the downloaded HF data (models etc.) and your modifications to axolotl code through `--mount`/`-v` args.
|
||||||
* The `--name` argument simply makes it easier to refer to the container in vscode (`Dev Containers: Attach to Running Container...`) or in your terminal.
|
* The `--name` argument simply makes it easier to refer to the container in vscode (`Dev Containers: Attach to Running Container...`) or in your terminal.
|
||||||
* The `--privileged` flag gives all capabilities to the container.
|
|
||||||
* The `--shm-size 10g` argument increases the shared memory size. Use this if you see `exitcode: -7` errors using deepspeed.
|
|
||||||
|
|
||||||
[More information on nvidia website](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#setincshmem)
|
[More information on nvidia website](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#setincshmem)
|
||||||
|
|
||||||
@@ -167,10 +154,6 @@ accelerate launch -m axolotl.cli.inference examples/openllama-3b/lora.yml \
|
|||||||
```
|
```
|
||||||
Get the token at huggingface.co/settings/tokens
|
Get the token at huggingface.co/settings/tokens
|
||||||
|
|
||||||
#### Runpod
|
|
||||||
|
|
||||||
Use `winglian/axolotl-runpod:main-latest` or use this [direct link](https://runpod.io/gsc?template=v2ickqhz9s&ref=6i7fkpdz)
|
|
||||||
|
|
||||||
#### LambdaLabs
|
#### LambdaLabs
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
@@ -218,28 +201,6 @@ Use `winglian/axolotl-runpod:main-latest` or use this [direct link](https://runp
|
|||||||
#### Windows
|
#### Windows
|
||||||
Please use WSL or Docker!
|
Please use WSL or Docker!
|
||||||
|
|
||||||
|
|
||||||
#### Launching on public clouds via SkyPilot
|
|
||||||
To launch on GPU instances (both on-demand and spot instances) on 7+ clouds (GCP, AWS, Azure, OCI, and more), you can use [SkyPilot](https://skypilot.readthedocs.io/en/latest/index.html):
|
|
||||||
```bash
|
|
||||||
pip install "skypilot-nightly[gcp,aws,azure,oci,lambda,kubernetes,ibm,scp]" # choose your clouds
|
|
||||||
sky check
|
|
||||||
```
|
|
||||||
Get the [example YAMLs](https://github.com/skypilot-org/skypilot/tree/master/llm/axolotl) of using Axolotl to finetune `mistralai/Mistral-7B-v0.1`:
|
|
||||||
```
|
|
||||||
git clone https://github.com/skypilot-org/skypilot.git
|
|
||||||
cd skypilot/llm/axolotl
|
|
||||||
```
|
|
||||||
Use one command to launch:
|
|
||||||
```bash
|
|
||||||
# On-demand
|
|
||||||
HF_TOKEN=xx sky launch axolotl.yaml --env HF_TOKEN
|
|
||||||
|
|
||||||
# Managed spot (auto-recovery on preemption)
|
|
||||||
HF_TOKEN=xx BUCKET=<unique-name> sky spot launch axolotl-spot.yaml --env HF_TOKEN --env BUCKET
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Dataset
|
### Dataset
|
||||||
|
|
||||||
Axolotl supports a variety of dataset formats. Below are some of the formats you can use.
|
Axolotl supports a variety of dataset formats. Below are some of the formats you can use.
|
||||||
@@ -249,17 +210,10 @@ Have dataset(s) in one of the following format (JSONL recommended):
|
|||||||
```json
|
```json
|
||||||
{"instruction": "...", "input": "...", "output": "..."}
|
{"instruction": "...", "input": "...", "output": "..."}
|
||||||
```
|
```
|
||||||
- `sharegpt`: conversations where `from` is `human`/`gpt`. (optional: `system` to override default system prompt)
|
- `sharegpt`: conversations where `from` is `human`/`gpt`
|
||||||
```json
|
```json
|
||||||
{"conversations": [{"from": "...", "value": "..."}]}
|
{"conversations": [{"from": "...", "value": "..."}]}
|
||||||
```
|
```
|
||||||
- `llama-2`: the json is the same format as `sharegpt` above, with the following config (see the [config section](#config) for more details)
|
|
||||||
```yml
|
|
||||||
datasets:
|
|
||||||
- path: <your-path>
|
|
||||||
type: sharegpt
|
|
||||||
conversation: llama-2
|
|
||||||
```
|
|
||||||
- `completion`: raw corpus
|
- `completion`: raw corpus
|
||||||
```json
|
```json
|
||||||
{"text": "..."}
|
{"text": "..."}
|
||||||
@@ -443,12 +397,6 @@ See [examples](examples) for quick start. It is recommended to duplicate and mod
|
|||||||
- path: knowrohit07/know_sql
|
- path: knowrohit07/know_sql
|
||||||
type: context_qa.load_v2
|
type: context_qa.load_v2
|
||||||
train_on_split: validation
|
train_on_split: validation
|
||||||
|
|
||||||
# loading from s3 or gcs
|
|
||||||
# s3 creds will be loaded from the system default and gcs only supports public access
|
|
||||||
dataset:
|
|
||||||
- path: s3://path_to_ds # Accepts folder with arrow/parquet or file path like above. Supports s3, gcs.
|
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- loading
|
- loading
|
||||||
@@ -511,23 +459,6 @@ is_falcon_derived_model:
|
|||||||
is_llama_derived_model:
|
is_llama_derived_model:
|
||||||
# Please note that if you set this to true, `padding_side` will be set to "left" by default
|
# Please note that if you set this to true, `padding_side` will be set to "left" by default
|
||||||
is_mistral_derived_model:
|
is_mistral_derived_model:
|
||||||
is_qwen_derived_model:
|
|
||||||
|
|
||||||
# optional overrides to the base model configuration
|
|
||||||
model_config:
|
|
||||||
# RoPE Scaling https://github.com/huggingface/transformers/pull/24653
|
|
||||||
rope_scaling:
|
|
||||||
type: # linear | dynamic
|
|
||||||
factor: # float
|
|
||||||
|
|
||||||
# optional overrides to the bnb 4bit quantization configuration
|
|
||||||
# https://huggingface.co/docs/transformers/main/main_classes/quantization#transformers.BitsAndBytesConfig
|
|
||||||
bnb_config_kwargs:
|
|
||||||
# These are default values
|
|
||||||
llm_int8_has_fp16_weight: false
|
|
||||||
bnb_4bit_quant_type: nf4
|
|
||||||
bnb_4bit_use_double_quant: true
|
|
||||||
|
|
||||||
|
|
||||||
# Whether you are training a 4-bit GPTQ quantized model
|
# Whether you are training a 4-bit GPTQ quantized model
|
||||||
gptq: true
|
gptq: true
|
||||||
@@ -552,7 +483,7 @@ float16: true
|
|||||||
|
|
||||||
# A list of one or more datasets to finetune the model with
|
# A list of one or more datasets to finetune the model with
|
||||||
datasets:
|
datasets:
|
||||||
# HuggingFace dataset repo | s3://,gs:// path | "json" for local dataset, make sure to fill data_files
|
# HuggingFace dataset repo | "json" for local dataset, make sure to fill data_files
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
||||||
type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
||||||
@@ -560,12 +491,9 @@ datasets:
|
|||||||
data_files: # Optional[str] path to source data files
|
data_files: # Optional[str] path to source data files
|
||||||
shards: # Optional[int] number of shards to split data into
|
shards: # Optional[int] number of shards to split data into
|
||||||
name: # Optional[str] name of dataset configuration to load
|
name: # Optional[str] name of dataset configuration to load
|
||||||
train_on_split: train # Optional[str] name of dataset split to load from
|
|
||||||
|
|
||||||
# Optional[str] fastchat conversation type, only used with type: sharegpt
|
# Optional[str] fastchat conversation type, only used with type: sharegpt
|
||||||
conversation: # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
conversation: # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
||||||
field_human: # Optional[str]. Human key to use for conversation.
|
|
||||||
field_model: # Optional[str]. Assistant key to use for conversation.
|
|
||||||
|
|
||||||
# Custom user prompt
|
# Custom user prompt
|
||||||
- path: repo
|
- path: repo
|
||||||
@@ -589,9 +517,6 @@ datasets:
|
|||||||
# For `completion` datsets only, uses the provided field instead of `text` column
|
# For `completion` datsets only, uses the provided field instead of `text` column
|
||||||
field:
|
field:
|
||||||
|
|
||||||
# Saves the desired chat template to the tokenizer_config.json for easier inferencing
|
|
||||||
# Currently supports chatml and inst (mistral/mixtral)
|
|
||||||
chat_template: chatml
|
|
||||||
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
# Axolotl attempts to save the dataset as an arrow after packing the data together so
|
||||||
# subsequent training attempts load faster, relative path
|
# subsequent training attempts load faster, relative path
|
||||||
dataset_prepared_path: data/last_run_prepared
|
dataset_prepared_path: data/last_run_prepared
|
||||||
@@ -634,12 +559,6 @@ eval_sample_packing:
|
|||||||
sample_packing_eff_est:
|
sample_packing_eff_est:
|
||||||
total_num_tokens:
|
total_num_tokens:
|
||||||
|
|
||||||
# Passed through to transformers when loading the model when launched without accelerate
|
|
||||||
# Use `sequential` when training w/ model parallelism to limit memory
|
|
||||||
device_map:
|
|
||||||
# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.
|
|
||||||
max_memory:
|
|
||||||
|
|
||||||
# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
||||||
adapter: lora
|
adapter: lora
|
||||||
# If you already have a lora model trained that you want to load, put that here.
|
# If you already have a lora model trained that you want to load, put that here.
|
||||||
@@ -683,13 +602,11 @@ relora_warmup_steps: # Number of per-restart warmup steps
|
|||||||
relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings
|
relora_cpu_offload: # True to perform lora weight merges on cpu during restarts, for modest gpu memory savings
|
||||||
|
|
||||||
# wandb configuration if you're using it
|
# wandb configuration if you're using it
|
||||||
# Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.
|
|
||||||
wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
|
wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
|
||||||
wandb_project: # Your wandb project name
|
wandb_project: # Your wandb project name
|
||||||
wandb_entity: # A wandb Team name if using a Team
|
wandb_entity: # A wandb Team name if using a Team
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name: # Set the name of your wandb run
|
wandb_run_id: # Set the name of your wandb run
|
||||||
wandb_run_id: # Set the ID of your wandb run
|
|
||||||
wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
||||||
|
|
||||||
# Where to save the full-finetuned model to
|
# Where to save the full-finetuned model to
|
||||||
@@ -707,16 +624,13 @@ gradient_accumulation_steps: 1
|
|||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
eval_batch_size:
|
eval_batch_size:
|
||||||
num_epochs: 4
|
num_epochs: 4
|
||||||
warmup_steps: 100 # cannot use with warmup_ratio
|
warmup_steps: 100
|
||||||
warmup_ratio: 0.05 # cannot use with warmup_steps
|
|
||||||
learning_rate: 0.00003
|
learning_rate: 0.00003
|
||||||
lr_quadratic_warmup:
|
lr_quadratic_warmup:
|
||||||
logging_steps:
|
logging_steps:
|
||||||
eval_steps: # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps
|
|
||||||
evals_per_epoch: # number of times per epoch to run evals, mutually exclusive with eval_steps
|
|
||||||
save_strategy: # Set to `no` to skip checkpoint saves
|
save_strategy: # Set to `no` to skip checkpoint saves
|
||||||
save_steps: # Leave empty to save at each epoch
|
save_steps: # Leave empty to save at each epoch
|
||||||
saves_per_epoch: # number of times per epoch to save a checkpoint, mutually exclusive with save_steps
|
eval_steps: # Leave empty to eval at each epoch, integers for every N steps. decimal for fraction of total steps
|
||||||
save_total_limit: # Checkpoints saved at a time
|
save_total_limit: # Checkpoints saved at a time
|
||||||
# Maximum number of iterations to train for. It precedes num_epochs which means that
|
# Maximum number of iterations to train for. It precedes num_epochs which means that
|
||||||
# if both are set, num_epochs will not be guaranteed.
|
# if both are set, num_epochs will not be guaranteed.
|
||||||
@@ -726,9 +640,6 @@ max_steps:
|
|||||||
eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
||||||
eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
||||||
|
|
||||||
loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)
|
|
||||||
loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)
|
|
||||||
|
|
||||||
# Save model as safetensors (require safetensors package)
|
# Save model as safetensors (require safetensors package)
|
||||||
save_safetensors:
|
save_safetensors:
|
||||||
|
|
||||||
@@ -795,7 +706,7 @@ max_grad_norm:
|
|||||||
# Augmentation techniques
|
# Augmentation techniques
|
||||||
# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings
|
# NEFT https://arxiv.org/abs/2310.05914, set this to a number (paper default is 5) to add noise to embeddings
|
||||||
# currently only supported on Llama and Mistral
|
# currently only supported on Llama and Mistral
|
||||||
neftune_noise_alpha:
|
noisy_embedding_alpha:
|
||||||
|
|
||||||
# Whether to bettertransformers
|
# Whether to bettertransformers
|
||||||
flash_optimum:
|
flash_optimum:
|
||||||
@@ -810,6 +721,15 @@ flash_attn_fuse_mlp: # Whether to fuse part of the MLP into a single operation
|
|||||||
# Whether to use scaled-dot-product attention
|
# Whether to use scaled-dot-product attention
|
||||||
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
||||||
sdp_attention:
|
sdp_attention:
|
||||||
|
# Landmark attention (only llama)
|
||||||
|
landmark_attention:
|
||||||
|
# xpos RoPE see https://github.com/kaiokendev/cutoff-len-is-context-len/blob/main/util/xpos_rope_llama_monkey_patch.py
|
||||||
|
# LLaMA only
|
||||||
|
xpos_rope:
|
||||||
|
# RoPE Scaling https://github.com/huggingface/transformers/pull/24653
|
||||||
|
rope_scaling:
|
||||||
|
type: # linear | dynamic
|
||||||
|
factor: # float
|
||||||
|
|
||||||
# Resume from a specific checkpoint dir
|
# Resume from a specific checkpoint dir
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
@@ -976,40 +896,19 @@ fsdp_config:
|
|||||||
|
|
||||||
##### Weights & Biases Logging
|
##### Weights & Biases Logging
|
||||||
|
|
||||||
Make sure your `WANDB_API_KEY` environment variable is set (recommended) or you login to wandb with `wandb login`.
|
|
||||||
|
|
||||||
- wandb options
|
- wandb options
|
||||||
```yaml
|
```yaml
|
||||||
wandb_mode:
|
wandb_mode:
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Special Tokens
|
### Inference
|
||||||
|
|
||||||
It is important to have special tokens like delimiters, end-of-sequence, beginning-of-sequence in your tokenizer's vocabulary. This will help you avoid tokenization issues and help your model train better. You can do this in axolotl like this:
|
Pass the appropriate flag to the train command:
|
||||||
|
|
||||||
```yml
|
|
||||||
special_tokens:
|
|
||||||
bos_token: "<s>"
|
|
||||||
eos_token: "</s>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
tokens: # these are delimiters
|
|
||||||
- "<|im_start|>"
|
|
||||||
- "<|im_end|>"
|
|
||||||
```
|
|
||||||
|
|
||||||
When you include these tokens in your axolotl config, axolotl adds these tokens to the tokenizer's vocabulary.
|
|
||||||
|
|
||||||
### Inference Playground
|
|
||||||
|
|
||||||
Axolotl allows you to load your model in an interactive terminal playground for quick experimentation.
|
|
||||||
The config file is the same config file used for training.
|
|
||||||
|
|
||||||
Pass the appropriate flag to the inference command, depending upon what kind of model was trained:
|
|
||||||
|
|
||||||
- Pretrained LORA:
|
- Pretrained LORA:
|
||||||
```bash
|
```bash
|
||||||
@@ -1038,7 +937,7 @@ Please use `--sample_packing False` if you have it on and receive the error simi
|
|||||||
Add below flag to train command above
|
Add below flag to train command above
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 -m axolotl.cli.merge_lora examples/your_config.yml --lora_model_dir="./completed-model"
|
python3 -m axolotl.cli.merge_lora examples/your_config.yml --lora_model_dir="./completed-model" --load_in_8bit=False --load_in_4bit=False
|
||||||
```
|
```
|
||||||
|
|
||||||
If you run out of CUDA memory, you can try to merge in system RAM with
|
If you run out of CUDA memory, you can try to merge in system RAM with
|
||||||
@@ -1059,10 +958,6 @@ Please reduce any below
|
|||||||
- `gradient_accumulation_steps`
|
- `gradient_accumulation_steps`
|
||||||
- `sequence_len`
|
- `sequence_len`
|
||||||
|
|
||||||
If it does not help, try running without deepspeed and without accelerate (replace "accelerate launch" with "python") in the command.
|
|
||||||
|
|
||||||
Using adamw_bnb_8bit might also save you some memory.
|
|
||||||
|
|
||||||
> `failed (exitcode: -9)`
|
> `failed (exitcode: -9)`
|
||||||
|
|
||||||
Usually means your system has run out of system memory.
|
Usually means your system has run out of system memory.
|
||||||
@@ -1085,20 +980,6 @@ It's safe to ignore it.
|
|||||||
|
|
||||||
See the [NCCL](docs/nccl.md) guide.
|
See the [NCCL](docs/nccl.md) guide.
|
||||||
|
|
||||||
|
|
||||||
### Tokenization Mismatch b/w Inference & Training
|
|
||||||
|
|
||||||
For many formats, Axolotl constructs prompts by concatenating token ids _after_ tokenizing strings. The reason for concatenating token ids rather than operating on strings is to maintain precise accounting for attention masks.
|
|
||||||
|
|
||||||
If you decode a prompt constructed by axolotl, you might see spaces between tokens (or lack thereof) that you do not expect, especially around delimiters and special tokens. When you are starting out with a new format, you should always do the following:
|
|
||||||
|
|
||||||
1. Materialize some data using `python -m axolotl.cli.preprocess your_config.yml --debug`, and then decode the first few rows with your model's tokenizer.
|
|
||||||
2. During inference, right before you pass a tensor of token ids to your model, decode these tokens back into a string.
|
|
||||||
3. Make sure the inference string from #2 looks **exactly** like the data you fine tuned on from #1, including spaces and new lines. If they aren't the same adjust your inference server accordingly.
|
|
||||||
4. As an additional troubleshooting step, you can look look at the token ids between 1 and 2 to make sure they are identical.
|
|
||||||
|
|
||||||
Having misalignment between your prompts during training and inference can cause models to perform very poorly, so it is worth checking this. See [this blog post](https://hamel.dev/notes/llm/05_tokenizer_gotchas.html) for a concrete example.
|
|
||||||
|
|
||||||
## Need help? 🙋♂️
|
## Need help? 🙋♂️
|
||||||
|
|
||||||
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
|
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
|
||||||
|
|||||||
@@ -24,6 +24,16 @@
|
|||||||
"weight_decay": "auto"
|
"weight_decay": "auto"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"type": "WarmupDecayLR",
|
||||||
|
"params": {
|
||||||
|
"warmup_min_lr": "auto",
|
||||||
|
"warmup_max_lr": "auto",
|
||||||
|
"warmup_num_steps": "auto",
|
||||||
|
"warmup_type": "linear",
|
||||||
|
"total_num_steps": "auto"
|
||||||
|
}
|
||||||
|
},
|
||||||
"gradient_accumulation_steps": "auto",
|
"gradient_accumulation_steps": "auto",
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
|||||||
@@ -28,6 +28,16 @@
|
|||||||
"weight_decay": "auto"
|
"weight_decay": "auto"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"type": "WarmupDecayLR",
|
||||||
|
"params": {
|
||||||
|
"warmup_min_lr": "auto",
|
||||||
|
"warmup_max_lr": "auto",
|
||||||
|
"warmup_num_steps": "auto",
|
||||||
|
"warmup_type": "linear",
|
||||||
|
"total_num_steps": "auto"
|
||||||
|
}
|
||||||
|
},
|
||||||
"gradient_accumulation_steps": "auto",
|
"gradient_accumulation_steps": "auto",
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
|||||||
@@ -32,6 +32,16 @@
|
|||||||
"weight_decay": "auto"
|
"weight_decay": "auto"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"type": "WarmupDecayLR",
|
||||||
|
"params": {
|
||||||
|
"warmup_min_lr": "auto",
|
||||||
|
"warmup_max_lr": "auto",
|
||||||
|
"warmup_num_steps": "auto",
|
||||||
|
"warmup_type": "linear",
|
||||||
|
"total_num_steps": "auto"
|
||||||
|
}
|
||||||
|
},
|
||||||
"gradient_accumulation_steps": "auto",
|
"gradient_accumulation_steps": "auto",
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
{
|
|
||||||
"zero_optimization": {
|
|
||||||
"stage": 3,
|
|
||||||
"overlap_comm": true,
|
|
||||||
"contiguous_gradients": true,
|
|
||||||
"sub_group_size": 0,
|
|
||||||
"reduce_bucket_size": "auto",
|
|
||||||
"stage3_prefetch_bucket_size": "auto",
|
|
||||||
"stage3_param_persistence_threshold": "auto",
|
|
||||||
"stage3_max_live_parameters": 0,
|
|
||||||
"stage3_max_reuse_distance": 0,
|
|
||||||
"stage3_gather_16bit_weights_on_model_save": true
|
|
||||||
},
|
|
||||||
"bf16": {
|
|
||||||
"enabled": true
|
|
||||||
},
|
|
||||||
"fp16": {
|
|
||||||
"enabled": "auto",
|
|
||||||
"auto_cast": false,
|
|
||||||
"loss_scale": 0,
|
|
||||||
"initial_scale_power": 32,
|
|
||||||
"loss_scale_window": 1000,
|
|
||||||
"hysteresis": 2,
|
|
||||||
"min_loss_scale": 1
|
|
||||||
},
|
|
||||||
"optimizer": {
|
|
||||||
"type": "AdamW",
|
|
||||||
"params": {
|
|
||||||
"lr": "auto",
|
|
||||||
"betas": "auto",
|
|
||||||
"eps": "auto",
|
|
||||||
"weight_decay": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gradient_accumulation_steps": "auto",
|
|
||||||
"train_batch_size": "auto",
|
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
|
||||||
"wall_clock_breakdown": false
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
{
|
|
||||||
"zero_optimization": {
|
|
||||||
"stage": 3,
|
|
||||||
"offload_optimizer": {
|
|
||||||
"device": "cpu",
|
|
||||||
"pin_memory": true
|
|
||||||
},
|
|
||||||
"offload_param": {
|
|
||||||
"device": "cpu",
|
|
||||||
"pin_memory": true
|
|
||||||
},
|
|
||||||
"overlap_comm": true,
|
|
||||||
"contiguous_gradients": true,
|
|
||||||
"sub_group_size": 0,
|
|
||||||
"reduce_bucket_size": "auto",
|
|
||||||
"stage3_prefetch_bucket_size": "auto",
|
|
||||||
"stage3_param_persistence_threshold": "auto",
|
|
||||||
"stage3_max_live_parameters": 0,
|
|
||||||
"stage3_max_reuse_distance": 0,
|
|
||||||
"stage3_gather_16bit_weights_on_model_save": true
|
|
||||||
},
|
|
||||||
"bf16": {
|
|
||||||
"enabled": "auto"
|
|
||||||
},
|
|
||||||
"fp16": {
|
|
||||||
"enabled": "auto",
|
|
||||||
"auto_cast": false,
|
|
||||||
"loss_scale": 0,
|
|
||||||
"initial_scale_power": 32,
|
|
||||||
"loss_scale_window": 1000,
|
|
||||||
"hysteresis": 2,
|
|
||||||
"min_loss_scale": 1
|
|
||||||
},
|
|
||||||
"optimizer": {
|
|
||||||
"type": "AdamW",
|
|
||||||
"params": {
|
|
||||||
"lr": "auto",
|
|
||||||
"betas": "auto",
|
|
||||||
"eps": "auto",
|
|
||||||
"weight_decay": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gradient_accumulation_steps": "auto",
|
|
||||||
"train_batch_size": "auto",
|
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
|
||||||
"wall_clock_breakdown": false
|
|
||||||
}
|
|
||||||
@@ -10,7 +10,7 @@ ARG PYTORCH_VERSION="2.0.1"
|
|||||||
ENV PYTORCH_VERSION=$PYTORCH_VERSION
|
ENV PYTORCH_VERSION=$PYTORCH_VERSION
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --allow-change-held-packages vim curl nano libnccl2 libnccl-dev
|
apt-get install -y vim curl
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
@@ -19,15 +19,13 @@ RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
|||||||
WORKDIR /workspace/axolotl
|
WORKDIR /workspace/axolotl
|
||||||
|
|
||||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
|
RUN sed -i "s/torch==.*/torch==$PYTORCH_VERSION/" requirements.txt
|
||||||
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
pip install -e .[deepspeed,flash-attn,$AXOLOTL_EXTRAS]; \
|
pip install -e .[deepspeed,flash-attn,$AXOLOTL_EXTRAS]; \
|
||||||
else \
|
else \
|
||||||
pip install -e .[deepspeed,flash-attn]; \
|
pip install -e .[deepspeed,flash-attn]; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# So we can test the Docker image
|
|
||||||
RUN pip install pytest
|
|
||||||
|
|
||||||
# fix so that git fetch/pull from remote works
|
# fix so that git fetch/pull from remote works
|
||||||
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
RUN git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" && \
|
||||||
git config --get remote.origin.fetch
|
git config --get remote.origin.fetch
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ FROM winglian/axolotl:$BASE_TAG
|
|||||||
ENV HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
|
ENV HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
|
||||||
ENV HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
|
ENV HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
|
||||||
ENV TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
|
ENV TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
|
||||||
ENV HF_HOME="/workspace/data/huggingface-cache/hub"
|
|
||||||
|
|
||||||
COPY scripts/runpod-entrypoint.sh /root/runpod-entrypoint.sh
|
COPY scripts/runpod-entrypoint.sh /root/runpod-entrypoint.sh
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
output_dir: btlm-out
|
output_dir: btlm-out
|
||||||
@@ -72,8 +72,8 @@ gptq_groupsize:
|
|||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
|
|
||||||
warmup_steps: 32
|
warmup_steps: 32
|
||||||
evals_per_epoch: 4
|
eval_steps:
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
save_total_limit:
|
save_total_limit:
|
||||||
|
|
||||||
debug:
|
debug:
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
@@ -49,8 +49,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -54,8 +54,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -56,8 +56,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -54,8 +54,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -56,8 +56,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -54,8 +54,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -56,8 +56,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./falcon-7b
|
output_dir: ./falcon-7b
|
||||||
batch_size: 2
|
batch_size: 2
|
||||||
@@ -51,8 +51,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 40
|
warmup_steps: 40
|
||||||
evals_per_epoch: 4
|
eval_steps: 5
|
||||||
saves_per_epoch: 1
|
save_steps: 43
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
@@ -80,8 +80,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 5
|
||||||
saves_per_epoch: 1
|
save_steps: 10
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.000001
|
weight_decay: 0.000001
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./falcon-7b
|
output_dir: ./falcon-7b
|
||||||
batch_size: 2
|
batch_size: 2
|
||||||
@@ -51,8 +51,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 40
|
warmup_steps: 40
|
||||||
evals_per_epoch: 4
|
eval_steps: 5
|
||||||
saves_per_epoch: 1
|
save_steps: 43
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 2
|
||||||
@@ -46,8 +46,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./jeopardy-bot-7b
|
output_dir: ./jeopardy-bot-7b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -42,8 +42,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 110
|
||||||
saves_per_epoch: 1
|
save_steps: 660
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -58,9 +58,9 @@ flash_attn_fuse_qkv: false
|
|||||||
flash_attn_fuse_mlp: true
|
flash_attn_fuse_mlp: true
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed: #deepspeed/zero2.json # multi-gpu only
|
deepspeed: #deepspeed/zero2.json # multi-gpu only
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ lora_target_linear:
|
|||||||
lora_fan_in_fan_out:
|
lora_fan_in_fan_out:
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./model-out
|
output_dir: ./model-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -62,8 +62,8 @@ flash_attention:
|
|||||||
sdp_attention:
|
sdp_attention:
|
||||||
flash_optimum:
|
flash_optimum:
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 4
|
eval_steps:
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -54,10 +54,10 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
eval_table_max_new_tokens: 128
|
eval_table_max_new_tokens: 128
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -56,9 +56,9 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ relora_cpu_offload: false
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -60,8 +60,8 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps: 50
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
base_model: PY007/TinyLlama-1.1B-intermediate-step-715k-1.5T
|
base_model: PY007/TinyLlama-1.1B-step-50K-105b
|
||||||
|
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -54,9 +54,9 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,61 +0,0 @@
|
|||||||
base_model: state-spaces/mamba-2.8b
|
|
||||||
model_type: MambaLMHeadModel
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
tokenizer_config: EleutherAI/gpt-neox-20b
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path:
|
|
||||||
val_set_size: 0.0
|
|
||||||
output_dir: ./out
|
|
||||||
|
|
||||||
sequence_len: 2048
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len: false
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 2
|
|
||||||
optimizer: paged_adamw_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 5e-5
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: true
|
|
||||||
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: true
|
|
||||||
|
|
||||||
gradient_checkpointing: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention:
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
tokens:
|
|
||||||
save_safetensors: False
|
|
||||||
@@ -17,12 +17,11 @@ output_dir: ./out
|
|||||||
sequence_len: 8192
|
sequence_len: 8192
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
pad_to_sequence_len: true
|
pad_to_sequence_len: true
|
||||||
eval_sample_packing: false
|
|
||||||
|
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -47,10 +46,10 @@ xformers_attention:
|
|||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
eval_table_max_new_tokens: 128
|
eval_table_max_new_tokens: 128
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,91 +0,0 @@
|
|||||||
base_model: mistralai/Mixtral-8x7B-v0.1
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
trust_remote_code: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: true
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: tatsu-lab/alpaca
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.0
|
|
||||||
output_dir: ./qlora-out
|
|
||||||
|
|
||||||
## You can optionally freeze the entire model and unfreeze a subset of parameters
|
|
||||||
unfrozen_parameters:
|
|
||||||
# - lm_head.*
|
|
||||||
# - model.embed_tokens.*
|
|
||||||
# - model.layers.2[0-9]+.block_sparse_moe.gate.*
|
|
||||||
# - model.layers.2[0-9]+.block_sparse_moe.experts.*
|
|
||||||
# - model.layers.3[0-9]+.block_sparse_moe.gate.*
|
|
||||||
# - model.layers.3[0-9]+.block_sparse_moe.experts.*
|
|
||||||
|
|
||||||
model_config:
|
|
||||||
output_router_logits: true
|
|
||||||
|
|
||||||
adapter: qlora
|
|
||||||
lora_model_dir:
|
|
||||||
|
|
||||||
sequence_len: 4096
|
|
||||||
sample_packing: true
|
|
||||||
pad_to_sequence_len: true
|
|
||||||
|
|
||||||
lora_r: 32
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_linear: true
|
|
||||||
lora_fan_in_fan_out:
|
|
||||||
#lora_target_modules:
|
|
||||||
# - gate
|
|
||||||
# - q_proj
|
|
||||||
# - k_proj
|
|
||||||
# - v_proj
|
|
||||||
# - o_proj
|
|
||||||
# - w1
|
|
||||||
# - w2
|
|
||||||
# - w3
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 2
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: true
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
|
||||||
loss_watchdog_patience: 3
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed: deepspeed/zero2.json
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
@@ -11,7 +11,7 @@ datasets:
|
|||||||
- path: mhenrichsen/alpaca_2k_test
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
type: alpaca
|
type: alpaca
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path: last_run_prepared
|
||||||
val_set_size: 0.1
|
val_set_size: 0.05
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
adapter: qlora
|
adapter: qlora
|
||||||
@@ -38,7 +38,7 @@ lora_target_modules:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -62,14 +62,11 @@ logging_steps: 1
|
|||||||
xformers_attention:
|
xformers_attention:
|
||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
|
||||||
loss_watchdog_patience: 3
|
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
eval_table_max_new_tokens: 128
|
eval_table_max_new_tokens: 128
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project: mpt-alpaca-7b
|
wandb_project: mpt-alpaca-7b
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./mpt-alpaca-7b
|
output_dir: ./mpt-alpaca-7b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -44,8 +44,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 110
|
||||||
saves_per_epoch: 1
|
save_steps: 660
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0001
|
weight_decay: 0.0001
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./openllama-out
|
output_dir: ./openllama-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -49,8 +49,8 @@ flash_attention: true
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -54,8 +54,8 @@ flash_attention: true
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -48,8 +48,8 @@ flash_attention: true
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
base_model: microsoft/phi-1_5
|
base_model: microsoft/phi-1_5
|
||||||
model_type: PhiForCausalLM
|
model_type: MixFormerSequentialForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
is_llama_derived_model: false
|
is_llama_derived_model: false
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -59,8 +59,8 @@ xformers_attention:
|
|||||||
flash_attention:
|
flash_attention:
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -59,8 +59,8 @@ xformers_attention:
|
|||||||
flash_attention:
|
flash_attention:
|
||||||
|
|
||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./pythia-12b
|
output_dir: ./pythia-12b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-alpaca-pythia
|
output_dir: ./lora-alpaca-pythia
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
@@ -33,5 +33,5 @@ early_stopping_patience:
|
|||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
weight_decay: 0.1
|
weight_decay: 0.1
|
||||||
evals_per_epoch: 4
|
eval_steps: 0.05
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
|
|||||||
@@ -1,68 +0,0 @@
|
|||||||
base_model: Qwen/Qwen-7B
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
|
|
||||||
is_qwen_derived_model: true
|
|
||||||
trust_remote_code: true
|
|
||||||
|
|
||||||
load_in_8bit: true
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path:
|
|
||||||
val_set_size: 0.05
|
|
||||||
output_dir: ./lora-out
|
|
||||||
|
|
||||||
sequence_len: 2048 # supports up to 8192
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len:
|
|
||||||
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
lora_r: 32
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_linear: true
|
|
||||||
lora_fan_in_fan_out:
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 2
|
|
||||||
num_epochs: 4
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention:
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
base_model: Qwen/Qwen-7B
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
|
|
||||||
is_qwen_derived_model: true
|
|
||||||
trust_remote_code: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: true
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path:
|
|
||||||
val_set_size: 0.05
|
|
||||||
output_dir: ./lora-out
|
|
||||||
|
|
||||||
sequence_len: 2048 # supports up to 8192
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len:
|
|
||||||
|
|
||||||
adapter: qlora
|
|
||||||
lora_model_dir:
|
|
||||||
lora_r: 32
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_linear: true
|
|
||||||
lora_fan_in_fan_out:
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 2
|
|
||||||
num_epochs: 4
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention:
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project: redpajama-alpaca-3b
|
wandb_project: redpajama-alpaca-3b
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./redpajama-alpaca-3b
|
output_dir: ./redpajama-alpaca-3b
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
@@ -45,8 +45,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 110
|
||||||
saves_per_epoch: 1
|
save_steps: 660
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0001
|
weight_decay: 0.0001
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project: lora-replit
|
wandb_project: lora-replit
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-replit
|
output_dir: ./lora-replit
|
||||||
batch_size: 8
|
batch_size: 8
|
||||||
@@ -45,8 +45,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 20
|
warmup_steps: 20
|
||||||
evals_per_epoch: 4
|
eval_steps: 50
|
||||||
saves_per_epoch: 1
|
save_steps:
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0
|
weight_decay: 0
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_name:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
@@ -78,8 +78,8 @@ flash_attention:
|
|||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
evals_per_epoch: 4
|
eval_steps: 50
|
||||||
saves_per_epoch: 1
|
save_steps: 50
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0
|
weight_decay: 0.0
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
base_model: models/yayi2-30b
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
is_mistral_derived_model: false
|
|
||||||
trust_remote_code: true
|
|
||||||
model_revision: refs/pr/5
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path:
|
|
||||||
val_set_size: 0.05
|
|
||||||
output_dir: ./out
|
|
||||||
|
|
||||||
sequence_len: 2048
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len: false
|
|
||||||
eval_sample_packing: false
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.000005
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: true
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed: deepspeed/zero3_cpu.json
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
bos_token: "<s>"
|
|
||||||
eos_token: "</s>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
base_model: wenge-research/yayi2-30b
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
is_mistral_derived_model: false
|
|
||||||
trust_remote_code: true
|
|
||||||
model_revision: refs/pr/5
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: true
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.1
|
|
||||||
output_dir: ./qlora-out
|
|
||||||
|
|
||||||
adapter: qlora
|
|
||||||
lora_model_dir:
|
|
||||||
|
|
||||||
sequence_len: 2048 # Fits in 40gb VRAM. Can easily do 4096 in A100 80 or a A6000
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len: false
|
|
||||||
|
|
||||||
lora_r: 32
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_linear: true
|
|
||||||
lora_fan_in_fan_out:
|
|
||||||
lora_target_modules:
|
|
||||||
|
|
||||||
wandb_project: yayi2
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0005
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: false
|
|
||||||
|
|
||||||
loss_watchdog_threshold: 5.0
|
|
||||||
loss_watchdog_patience: 3
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
bos_token: "<s>"
|
|
||||||
eos_token: "</s>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# Overview
|
|
||||||
|
|
||||||
This is an example of a Yi-34B-Chat configuration. It demonstrates that it is possible to finetune a 34B model on a GPU with 24GB of VRAM.
|
|
||||||
|
|
||||||
Tested on an RTX 4090 with `python -m axolotl.cli.train examples/mistral/qlora.yml`, a single epoch of finetuning on the alpaca dataset using qlora runs in 47 mins, using 97% of available memory.
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
base_model: 01-ai/Yi-34B-Chat
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
is_mistral_derived_model: false
|
|
||||||
is_llama_derived_model: true
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: true
|
|
||||||
strict: false
|
|
||||||
sequence_len: 1024
|
|
||||||
bf16: true
|
|
||||||
fp16: false
|
|
||||||
tf32: false
|
|
||||||
flash_attention: true
|
|
||||||
special_tokens:
|
|
||||||
bos_token: "<|startoftext|>"
|
|
||||||
eos_token: "<|endoftext|>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
|
|
||||||
# Data
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
warmup_steps: 10
|
|
||||||
|
|
||||||
# Iterations
|
|
||||||
num_epochs: 1
|
|
||||||
|
|
||||||
# Evaluation
|
|
||||||
val_set_size: 0.1
|
|
||||||
evals_per_epoch: 5
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
eval_sample_packing: false
|
|
||||||
eval_batch_size: 1
|
|
||||||
|
|
||||||
# LoRA
|
|
||||||
output_dir: ./qlora-out
|
|
||||||
adapter: qlora
|
|
||||||
lora_model_dir:
|
|
||||||
lora_r: 32
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_linear: true
|
|
||||||
lora_fan_in_fan_out:
|
|
||||||
lora_target_modules:
|
|
||||||
|
|
||||||
# Sampling
|
|
||||||
sample_packing: false
|
|
||||||
pad_to_sequence_len: false
|
|
||||||
|
|
||||||
# Batching
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 1
|
|
||||||
gradient_checkpointing: true
|
|
||||||
|
|
||||||
# wandb
|
|
||||||
wandb_project:
|
|
||||||
|
|
||||||
# Optimizer
|
|
||||||
optimizer: paged_adamw_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
# Misc
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
@@ -1,21 +1,22 @@
|
|||||||
|
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||||
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
||||||
auto-gptq==0.5.1
|
torch==2.0.1
|
||||||
|
auto-gptq==0.4.2
|
||||||
packaging
|
packaging
|
||||||
peft==0.6.0
|
peft==0.6.0
|
||||||
transformers==4.36.2
|
transformers @ git+https://github.com/huggingface/transformers.git@acc394c4f5e1283c19783581790b3dc3105a3697
|
||||||
tokenizers==0.15.0
|
|
||||||
bitsandbytes>=0.41.1
|
bitsandbytes>=0.41.1
|
||||||
accelerate==0.24.1
|
accelerate @ git+https://github.com/huggingface/accelerate@80da9cfb09bb3cc9f1b385cb55d6b90d025a5fd9
|
||||||
deepspeed
|
deepspeed
|
||||||
addict
|
addict
|
||||||
fire
|
fire
|
||||||
PyYAML>=6.0
|
PyYAML>=6.0
|
||||||
datasets>=2.15.0
|
datasets
|
||||||
flash-attn==2.3.3
|
flash-attn>=2.3.0
|
||||||
sentencepiece
|
sentencepiece
|
||||||
wandb
|
wandb
|
||||||
einops
|
einops
|
||||||
xformers==0.0.22
|
xformers>=0.0.22
|
||||||
optimum==1.13.2
|
optimum==1.13.2
|
||||||
hf_transfer
|
hf_transfer
|
||||||
colorama
|
colorama
|
||||||
@@ -29,11 +30,5 @@ scipy
|
|||||||
scikit-learn==1.2.2
|
scikit-learn==1.2.2
|
||||||
pynvml
|
pynvml
|
||||||
art
|
art
|
||||||
fschat==0.2.34
|
fschat==0.2.29
|
||||||
gradio==3.50.2
|
gradio
|
||||||
tensorboard
|
|
||||||
|
|
||||||
# remote filesystems
|
|
||||||
s3fs
|
|
||||||
gcsfs
|
|
||||||
# adlfs
|
|
||||||
|
|||||||
20
setup.py
20
setup.py
@@ -1,7 +1,5 @@
|
|||||||
"""setup.py for axolotl"""
|
"""setup.py for axolotl"""
|
||||||
|
|
||||||
from importlib.metadata import PackageNotFoundError, version
|
|
||||||
|
|
||||||
from setuptools import find_packages, setup
|
from setuptools import find_packages, setup
|
||||||
|
|
||||||
|
|
||||||
@@ -24,13 +22,12 @@ def parse_requirements():
|
|||||||
# Handle standard packages
|
# Handle standard packages
|
||||||
_install_requires.append(line)
|
_install_requires.append(line)
|
||||||
|
|
||||||
try:
|
# TODO(wing) remove once xformers release supports torch 2.1.0
|
||||||
torch_version = version("torch")
|
if "torch==2.1.0" in _install_requires:
|
||||||
if torch_version.startswith("2.1.1"):
|
_install_requires.pop(_install_requires.index("xformers>=0.0.22"))
|
||||||
_install_requires.pop(_install_requires.index("xformers==0.0.22"))
|
_install_requires.append(
|
||||||
_install_requires.append("xformers==0.0.23")
|
"xformers @ git+https://github.com/facebookresearch/xformers.git@main"
|
||||||
except PackageNotFoundError:
|
)
|
||||||
pass
|
|
||||||
|
|
||||||
return _install_requires, _dependency_links
|
return _install_requires, _dependency_links
|
||||||
|
|
||||||
@@ -49,13 +46,10 @@ setup(
|
|||||||
dependency_links=dependency_links,
|
dependency_links=dependency_links,
|
||||||
extras_require={
|
extras_require={
|
||||||
"flash-attn": [
|
"flash-attn": [
|
||||||
"flash-attn==2.3.3",
|
"flash-attn>=2.3.0",
|
||||||
],
|
],
|
||||||
"deepspeed": [
|
"deepspeed": [
|
||||||
"deepspeed",
|
"deepspeed",
|
||||||
],
|
],
|
||||||
"mamba-ssm": [
|
|
||||||
"mamba-ssm==1.0.1",
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ from axolotl.utils.dict import DictDefault
|
|||||||
from axolotl.utils.distributed import is_main_process
|
from axolotl.utils.distributed import is_main_process
|
||||||
from axolotl.utils.models import load_tokenizer
|
from axolotl.utils.models import load_tokenizer
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
from axolotl.utils.trainer import prepare_optim_env
|
|
||||||
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
||||||
|
|
||||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
@@ -47,7 +46,7 @@ def print_axolotl_text_art(suffix=None):
|
|||||||
ascii_text = " axolotl"
|
ascii_text = " axolotl"
|
||||||
if suffix:
|
if suffix:
|
||||||
ascii_text += f" x {suffix}"
|
ascii_text += f" x {suffix}"
|
||||||
ascii_art = text2art(ascii_text, font=font)
|
ascii_art = text2art(" axolotl", font=font)
|
||||||
|
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
print(ascii_art)
|
print(ascii_art)
|
||||||
@@ -72,7 +71,7 @@ def do_merge_lora(
|
|||||||
|
|
||||||
LOG.info("running merge of LoRA with base model")
|
LOG.info("running merge of LoRA with base model")
|
||||||
model = model.merge_and_unload()
|
model = model.merge_and_unload()
|
||||||
model.to(dtype=cfg.torch_dtype)
|
model.to(dtype=torch.float16)
|
||||||
|
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
LOG.info(f"saving merged model to: {str(Path(cfg.output_dir) / 'merged')}")
|
LOG.info(f"saving merged model to: {str(Path(cfg.output_dir) / 'merged')}")
|
||||||
@@ -103,7 +102,15 @@ def do_inference(
|
|||||||
importlib.import_module("axolotl.prompters"), prompter
|
importlib.import_module("axolotl.prompters"), prompter
|
||||||
)
|
)
|
||||||
|
|
||||||
model = model.to(cfg.device, dtype=cfg.torch_dtype)
|
if cfg.landmark_attention:
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import set_model_mem_id
|
||||||
|
|
||||||
|
set_model_mem_id(model, tokenizer)
|
||||||
|
model.set_mem_cache_args(
|
||||||
|
max_seq_len=255, mem_freq=50, top_k=5, max_cache_size=None
|
||||||
|
)
|
||||||
|
|
||||||
|
model = model.to(cfg.device)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
print("=" * 80)
|
print("=" * 80)
|
||||||
@@ -168,7 +175,15 @@ def do_inference_gradio(
|
|||||||
importlib.import_module("axolotl.prompters"), prompter
|
importlib.import_module("axolotl.prompters"), prompter
|
||||||
)
|
)
|
||||||
|
|
||||||
model = model.to(cfg.device, dtype=cfg.torch_dtype)
|
if cfg.landmark_attention:
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import set_model_mem_id
|
||||||
|
|
||||||
|
set_model_mem_id(model, tokenizer)
|
||||||
|
model.set_mem_cache_args(
|
||||||
|
max_seq_len=255, mem_freq=50, top_k=5, max_cache_size=None
|
||||||
|
)
|
||||||
|
|
||||||
|
model = model.to(cfg.device)
|
||||||
|
|
||||||
def generate(instruction):
|
def generate(instruction):
|
||||||
if not instruction:
|
if not instruction:
|
||||||
@@ -281,8 +296,6 @@ def load_cfg(config: Path = Path("examples/"), **kwargs):
|
|||||||
|
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
prepare_optim_env(cfg)
|
|
||||||
|
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
|
|
||||||
setup_wandb_env_vars(cfg)
|
setup_wandb_env_vars(cfg)
|
||||||
|
|||||||
@@ -18,15 +18,7 @@ def do_cli(config: Path = Path("examples/"), **kwargs):
|
|||||||
return_remaining_strings=True
|
return_remaining_strings=True
|
||||||
)
|
)
|
||||||
parsed_cli_args.merge_lora = True
|
parsed_cli_args.merge_lora = True
|
||||||
|
parsed_cfg = load_cfg(config, merge_lora=True, **kwargs)
|
||||||
parsed_cfg = load_cfg(
|
|
||||||
config,
|
|
||||||
merge_lora=True,
|
|
||||||
load_in_8bit=False,
|
|
||||||
load_in_4bit=False,
|
|
||||||
flash_attention=False,
|
|
||||||
**kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
do_merge_lora(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
do_merge_lora(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ LOG = logging.getLogger("axolotl.cli.train")
|
|||||||
|
|
||||||
def do_cli(config: Path = Path("examples/"), **kwargs):
|
def do_cli(config: Path = Path("examples/"), **kwargs):
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
parsed_cfg = load_cfg(config, **kwargs)
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
check_user_token()
|
check_user_token()
|
||||||
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||||
|
|||||||
@@ -9,9 +9,9 @@ import math
|
|||||||
import sys
|
import sys
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from functools import wraps
|
from functools import partial
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
@@ -25,16 +25,13 @@ from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
|
|||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
EvalFirstStepCallback,
|
EvalFirstStepCallback,
|
||||||
GPUStatsCallback,
|
GPUStatsCallback,
|
||||||
LossWatchDogCallback,
|
|
||||||
SaveAxolotlConfigtoWandBCallback,
|
SaveAxolotlConfigtoWandBCallback,
|
||||||
SaveBetterTransformerModelCallback,
|
SaveBetterTransformerModelCallback,
|
||||||
bench_eval_callback_factory,
|
bench_eval_callback_factory,
|
||||||
log_prediction_callback_factory,
|
log_prediction_callback_factory,
|
||||||
)
|
)
|
||||||
from axolotl.utils.collators import (
|
from axolotl.utils.collators import BatchSamplerDataCollatorForSeq2Seq
|
||||||
BatchSamplerDataCollatorForSeq2Seq,
|
from axolotl.utils.dataloader import MultipackDistributedDataloader
|
||||||
MambaDataCollator,
|
|
||||||
)
|
|
||||||
from axolotl.utils.samplers import MultipackBatchSampler
|
from axolotl.utils.samplers import MultipackBatchSampler
|
||||||
from axolotl.utils.schedulers import get_cosine_schedule_with_quadratic_warmup
|
from axolotl.utils.schedulers import get_cosine_schedule_with_quadratic_warmup
|
||||||
|
|
||||||
@@ -52,9 +49,6 @@ class AxolotlTrainingArguments(TrainingArguments):
|
|||||||
Extend the base TrainingArguments for axolotl helpers
|
Extend the base TrainingArguments for axolotl helpers
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model_type: Optional[str] = field(
|
|
||||||
default=None, metadata={"help": "HF model configuration model_type."}
|
|
||||||
)
|
|
||||||
lr_quadratic_warmup: bool = field(
|
lr_quadratic_warmup: bool = field(
|
||||||
default=False,
|
default=False,
|
||||||
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
||||||
@@ -120,7 +114,6 @@ class AxolotlTrainer(Trainer):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
args = None # type: AxolotlTrainingArguments
|
args = None # type: AxolotlTrainingArguments
|
||||||
tag_names = ["axolotl"]
|
|
||||||
|
|
||||||
def __init__(self, *args, num_epochs=1, bench_data_collator=None, **kwargs):
|
def __init__(self, *args, num_epochs=1, bench_data_collator=None, **kwargs):
|
||||||
self.num_epochs = num_epochs
|
self.num_epochs = num_epochs
|
||||||
@@ -222,7 +215,9 @@ class AxolotlTrainer(Trainer):
|
|||||||
)
|
)
|
||||||
return super().get_train_dataloader()
|
return super().get_train_dataloader()
|
||||||
|
|
||||||
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
def get_eval_dataloader(
|
||||||
|
self, eval_dataset: Optional[Dataset] = None
|
||||||
|
) -> Union[DataLoader, MultipackDistributedDataloader]:
|
||||||
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
||||||
eval_dataset = (
|
eval_dataset = (
|
||||||
eval_dataset if eval_dataset is not None else self.eval_dataset
|
eval_dataset if eval_dataset is not None else self.eval_dataset
|
||||||
@@ -265,7 +260,7 @@ class AxolotlTrainer(Trainer):
|
|||||||
def get_bench_dataloader(
|
def get_bench_dataloader(
|
||||||
self,
|
self,
|
||||||
bench_dataset: Dataset,
|
bench_dataset: Dataset,
|
||||||
) -> DataLoader:
|
) -> Union[DataLoader, MultipackDistributedDataloader]:
|
||||||
dataloader_params = {
|
dataloader_params = {
|
||||||
"batch_size": self.args.eval_batch_size,
|
"batch_size": self.args.eval_batch_size,
|
||||||
"collate_fn": self.bench_data_collator,
|
"collate_fn": self.bench_data_collator,
|
||||||
@@ -291,69 +286,12 @@ class AxolotlTrainer(Trainer):
|
|||||||
# return (loss, outputs) if return_outputs else loss
|
# return (loss, outputs) if return_outputs else loss
|
||||||
return super().compute_loss(model, inputs, return_outputs=return_outputs)
|
return super().compute_loss(model, inputs, return_outputs=return_outputs)
|
||||||
|
|
||||||
def _sanitize_kwargs_for_tagging(self, tag_names, kwargs=None):
|
|
||||||
if isinstance(tag_names, str):
|
|
||||||
tag_names = [tag_names]
|
|
||||||
|
|
||||||
if kwargs is not None:
|
|
||||||
if "tags" not in kwargs:
|
|
||||||
kwargs["tags"] = tag_names
|
|
||||||
elif "tags" in kwargs and isinstance(kwargs["tags"], list):
|
|
||||||
kwargs["tags"].extend(tag_names)
|
|
||||||
elif "tags" in kwargs and isinstance(kwargs["tags"], str):
|
|
||||||
tag_names.append(kwargs["tags"])
|
|
||||||
kwargs["tags"] = tag_names
|
|
||||||
|
|
||||||
return kwargs
|
|
||||||
|
|
||||||
@wraps(Trainer.push_to_hub)
|
|
||||||
def push_to_hub(self, *args, **kwargs) -> str:
|
|
||||||
"""
|
|
||||||
Overwrite the `push_to_hub` method in order to force-add the tags when pushing the
|
|
||||||
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
|
|
||||||
"""
|
|
||||||
kwargs = self._sanitize_kwargs_for_tagging(
|
|
||||||
tag_names=self.tag_names, kwargs=kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
return super().push_to_hub(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class AxolotlMambaTrainer(AxolotlTrainer):
|
|
||||||
"""
|
|
||||||
Mamba specific trainer to handle loss calculation
|
|
||||||
"""
|
|
||||||
|
|
||||||
tag_names = ["axolotl", "mamba"]
|
|
||||||
|
|
||||||
def compute_loss(
|
|
||||||
self,
|
|
||||||
model,
|
|
||||||
inputs,
|
|
||||||
return_outputs=False, # pylint: disable=unused-argument
|
|
||||||
):
|
|
||||||
input_ids = inputs.pop("input_ids")
|
|
||||||
lm_logits = model(input_ids).logits
|
|
||||||
|
|
||||||
labels = input_ids.to(lm_logits.device)
|
|
||||||
shift_logits = lm_logits[:, :-1, :].contiguous()
|
|
||||||
labels = labels[:, 1:].contiguous()
|
|
||||||
|
|
||||||
loss_fct = torch.nn.CrossEntropyLoss()
|
|
||||||
lm_loss = loss_fct(
|
|
||||||
shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)
|
|
||||||
)
|
|
||||||
|
|
||||||
return lm_loss
|
|
||||||
|
|
||||||
|
|
||||||
class OneCycleLRSchedulerTrainer(AxolotlTrainer):
|
class OneCycleLRSchedulerTrainer(AxolotlTrainer):
|
||||||
"""
|
"""
|
||||||
Trainer subclass that uses the OneCycleLR scheduler
|
Trainer subclass that uses the OneCycleLR scheduler
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tag_names = ["axolotl", "onecycle"]
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.lr_scheduler = None
|
self.lr_scheduler = None
|
||||||
@@ -383,8 +321,6 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
Trainer subclass that uses the OneCycleLR scheduler
|
Trainer subclass that uses the OneCycleLR scheduler
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tag_names = ["axolotl", "relora"]
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.lr_scheduler = None
|
self.lr_scheduler = None
|
||||||
@@ -497,9 +433,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.cfg.loss_watchdog_threshold is not None:
|
|
||||||
callbacks.append(LossWatchDogCallback(self.cfg))
|
|
||||||
|
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
def get_post_trainer_create_callbacks(self, trainer):
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
@@ -528,19 +461,14 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
return OneCycleLRSchedulerTrainer
|
return OneCycleLRSchedulerTrainer
|
||||||
if self.cfg.relora_steps:
|
if self.cfg.relora_steps:
|
||||||
return ReLoRATrainer
|
return ReLoRATrainer
|
||||||
if self.cfg.model_config_type == "mamba":
|
|
||||||
return AxolotlMambaTrainer
|
|
||||||
return AxolotlTrainer
|
return AxolotlTrainer
|
||||||
|
|
||||||
def build(self, total_num_steps):
|
def build(self, total_num_steps):
|
||||||
warmup_steps = None
|
warmup_steps = (
|
||||||
if self.cfg.warmup_steps is not None:
|
self.cfg.warmup_steps
|
||||||
warmup_steps = self.cfg.warmup_steps
|
if self.cfg.warmup_steps is not None
|
||||||
elif self.cfg.warmup_ratio is not None:
|
else min(int(0.03 * total_num_steps), 100)
|
||||||
warmup_steps = max(int(self.cfg.warmup_ratio * total_num_steps), 0)
|
)
|
||||||
else:
|
|
||||||
warmup_steps = min(int(0.03 * total_num_steps), 100)
|
|
||||||
|
|
||||||
logging_steps = (
|
logging_steps = (
|
||||||
self.cfg.logging_steps
|
self.cfg.logging_steps
|
||||||
if self.cfg.logging_steps is not None
|
if self.cfg.logging_steps is not None
|
||||||
@@ -555,6 +483,10 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["fp16"] = (
|
training_arguments_kwargs["fp16"] = (
|
||||||
self.cfg.fp16 and not self.cfg.bf16
|
self.cfg.fp16 and not self.cfg.bf16
|
||||||
) or False
|
) or False
|
||||||
|
if self.cfg.fp8:
|
||||||
|
training_arguments_kwargs["fp16"] = False
|
||||||
|
training_arguments_kwargs["bf16"] = False
|
||||||
|
|
||||||
training_arguments_kwargs["tf32"] = self.cfg.tf32
|
training_arguments_kwargs["tf32"] = self.cfg.tf32
|
||||||
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
||||||
training_arguments_kwargs["logging_steps"] = logging_steps
|
training_arguments_kwargs["logging_steps"] = logging_steps
|
||||||
@@ -597,7 +529,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.hub_strategy:
|
if self.cfg.hub_strategy:
|
||||||
training_arguments_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
training_arguments_kwargs["hub_strategy"] = self.cfg.hub_strategy
|
||||||
|
|
||||||
if self.cfg.save_safetensors is not None:
|
if self.cfg.save_safetensors:
|
||||||
training_arguments_kwargs["save_safetensors"] = self.cfg.save_safetensors
|
training_arguments_kwargs["save_safetensors"] = self.cfg.save_safetensors
|
||||||
|
|
||||||
if self.cfg.sample_packing_eff_est:
|
if self.cfg.sample_packing_eff_est:
|
||||||
@@ -618,16 +550,16 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
"dataloader_prefetch_factor"
|
"dataloader_prefetch_factor"
|
||||||
] = self.cfg.dataloader_prefetch_factor
|
] = self.cfg.dataloader_prefetch_factor
|
||||||
|
|
||||||
if self.cfg.val_set_size == 0:
|
if self.cfg.eval_steps:
|
||||||
# no eval set, so don't eval
|
|
||||||
training_arguments_kwargs["evaluation_strategy"] = "no"
|
|
||||||
elif self.cfg.eval_steps:
|
|
||||||
training_arguments_kwargs["evaluation_strategy"] = "steps"
|
training_arguments_kwargs["evaluation_strategy"] = "steps"
|
||||||
training_arguments_kwargs["eval_steps"] = self.cfg.eval_steps
|
training_arguments_kwargs["eval_steps"] = self.cfg.eval_steps
|
||||||
elif self.cfg.evaluation_strategy:
|
elif self.cfg.evaluation_strategy:
|
||||||
training_arguments_kwargs[
|
training_arguments_kwargs[
|
||||||
"evaluation_strategy"
|
"evaluation_strategy"
|
||||||
] = self.cfg.evaluation_strategy
|
] = self.cfg.evaluation_strategy
|
||||||
|
elif self.cfg.val_set_size == 0:
|
||||||
|
# no eval set, so don't eval
|
||||||
|
training_arguments_kwargs["evaluation_strategy"] = "no"
|
||||||
else:
|
else:
|
||||||
# we have an eval set, but no steps defined, default to use epoch
|
# we have an eval set, but no steps defined, default to use epoch
|
||||||
training_arguments_kwargs["evaluation_strategy"] = "epoch"
|
training_arguments_kwargs["evaluation_strategy"] = "epoch"
|
||||||
@@ -715,7 +647,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["group_by_length"] = self.cfg.group_by_length
|
training_arguments_kwargs["group_by_length"] = self.cfg.group_by_length
|
||||||
training_arguments_kwargs["report_to"] = "wandb" if self.cfg.use_wandb else None
|
training_arguments_kwargs["report_to"] = "wandb" if self.cfg.use_wandb else None
|
||||||
training_arguments_kwargs["run_name"] = (
|
training_arguments_kwargs["run_name"] = (
|
||||||
self.cfg.wandb_name if self.cfg.use_wandb else None
|
self.cfg.wandb_run_id if self.cfg.use_wandb else None
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["optim"] = (
|
training_arguments_kwargs["optim"] = (
|
||||||
self.cfg.optimizer if self.cfg.optimizer else "adamw_hf"
|
self.cfg.optimizer if self.cfg.optimizer else "adamw_hf"
|
||||||
@@ -726,9 +658,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
and self.cfg.lr_scheduler not in ("one_cycle", "log_sweep")
|
and self.cfg.lr_scheduler not in ("one_cycle", "log_sweep")
|
||||||
else "cosine"
|
else "cosine"
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["lr_scheduler_kwargs"] = (
|
|
||||||
self.cfg.lr_scheduler_kwargs if self.cfg.lr_scheduler_kwargs else {}
|
|
||||||
)
|
|
||||||
training_arguments_kwargs["weight_decay"] = (
|
training_arguments_kwargs["weight_decay"] = (
|
||||||
self.cfg.weight_decay if self.cfg.weight_decay is not None else 0.0
|
self.cfg.weight_decay if self.cfg.weight_decay is not None else 0.0
|
||||||
)
|
)
|
||||||
@@ -736,9 +665,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
self.cfg.sample_packing if self.cfg.sample_packing else False
|
self.cfg.sample_packing if self.cfg.sample_packing else False
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["eval_sample_packing"] = (
|
training_arguments_kwargs["eval_sample_packing"] = (
|
||||||
self.cfg.sample_packing
|
self.cfg.sample_packing if self.cfg.sample_packing else False
|
||||||
if self.cfg.eval_sample_packing is not False
|
|
||||||
else False
|
|
||||||
)
|
)
|
||||||
training_arguments_kwargs[
|
training_arguments_kwargs[
|
||||||
"sample_packing_seq_len_multiplier"
|
"sample_packing_seq_len_multiplier"
|
||||||
@@ -748,13 +675,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs = self.hook_pre_create_training_args(
|
training_arguments_kwargs = self.hook_pre_create_training_args(
|
||||||
training_arguments_kwargs
|
training_arguments_kwargs
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["model_type"] = self.cfg.model_config_type
|
|
||||||
|
|
||||||
if self.cfg.neftune_noise_alpha is not None:
|
|
||||||
training_arguments_kwargs[
|
|
||||||
"neftune_noise_alpha"
|
|
||||||
] = self.cfg.neftune_noise_alpha
|
|
||||||
|
|
||||||
training_args = (
|
training_args = (
|
||||||
AxolotlTrainingArguments( # pylint: disable=unexpected-keyword-arg
|
AxolotlTrainingArguments( # pylint: disable=unexpected-keyword-arg
|
||||||
**training_arguments_kwargs,
|
**training_arguments_kwargs,
|
||||||
@@ -780,6 +700,26 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = 64
|
data_collator_kwargs["pad_to_multiple_of"] = 64
|
||||||
|
|
||||||
|
if self.cfg.is_llama_derived_model and self.cfg.landmark_attention:
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import (
|
||||||
|
add_mem_tokens,
|
||||||
|
get_mem_id,
|
||||||
|
set_model_mem_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
set_model_mem_id(self.model, self.tokenizer)
|
||||||
|
|
||||||
|
LOG.info("Adding landmark attention tokens to dataset")
|
||||||
|
|
||||||
|
for dataset in [self.train_dataset, self.eval_dataset]:
|
||||||
|
dataset = dataset.map(
|
||||||
|
partial(
|
||||||
|
add_mem_tokens, mem_freq=50, mem_id=get_mem_id(self.tokenizer)
|
||||||
|
),
|
||||||
|
batched=False,
|
||||||
|
num_proc=32,
|
||||||
|
)
|
||||||
|
|
||||||
trainer_cls = self._get_trainer_cls()
|
trainer_cls = self._get_trainer_cls()
|
||||||
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
|
||||||
trainer_kwargs, trainer_cls
|
trainer_kwargs, trainer_cls
|
||||||
@@ -789,7 +729,11 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
train_dataset=self.train_dataset,
|
train_dataset=self.train_dataset,
|
||||||
eval_dataset=self.eval_dataset,
|
eval_dataset=self.eval_dataset,
|
||||||
args=training_args,
|
args=training_args,
|
||||||
data_collator=self.build_collator(**data_collator_kwargs),
|
data_collator=BatchSamplerDataCollatorForSeq2Seq(
|
||||||
|
self.tokenizer,
|
||||||
|
return_tensors="pt",
|
||||||
|
**data_collator_kwargs,
|
||||||
|
),
|
||||||
bench_data_collator=transformers.DataCollatorForSeq2Seq(
|
bench_data_collator=transformers.DataCollatorForSeq2Seq(
|
||||||
self.tokenizer,
|
self.tokenizer,
|
||||||
return_tensors="pt",
|
return_tensors="pt",
|
||||||
@@ -809,13 +753,3 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
] = self.cfg.micro_batch_size
|
] = self.cfg.micro_batch_size
|
||||||
|
|
||||||
return trainer
|
return trainer
|
||||||
|
|
||||||
def build_collator(self, **kwargs):
|
|
||||||
if self.cfg.model_config_type == "mamba":
|
|
||||||
return MambaDataCollator(tokenizer=self.tokenizer)
|
|
||||||
|
|
||||||
return BatchSamplerDataCollatorForSeq2Seq(
|
|
||||||
self.tokenizer,
|
|
||||||
return_tensors="pt",
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
"""
|
|
||||||
Modeling module for Mamba models
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def fix_mamba_attn_for_loss():
|
|
||||||
from mamba_ssm.models import mixer_seq_simple
|
|
||||||
|
|
||||||
from .modeling_mamba import MambaLMHeadModel as MambaLMHeadModelFixed
|
|
||||||
|
|
||||||
mixer_seq_simple.MambaLMHeadModel = MambaLMHeadModelFixed
|
|
||||||
return mixer_seq_simple.MambaLMHeadModel # pylint: disable=invalid-name
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
"""
|
|
||||||
HF Transformers MambaConfig
|
|
||||||
"""
|
|
||||||
from transformers import PretrainedConfig
|
|
||||||
|
|
||||||
|
|
||||||
class MambaConfig(PretrainedConfig):
|
|
||||||
"""
|
|
||||||
modeling configuration for state space model/mamba
|
|
||||||
"""
|
|
||||||
|
|
||||||
model_type = "mamba"
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
vocab_size=50280,
|
|
||||||
d_model=2560,
|
|
||||||
n_layer=64,
|
|
||||||
rms_norm=True,
|
|
||||||
residual_in_fp32=True,
|
|
||||||
fused_add_norm=True,
|
|
||||||
pad_vocab_size_multiple=8,
|
|
||||||
pad_token_id=50277,
|
|
||||||
bos_token_id=0,
|
|
||||||
eos_token_id=0,
|
|
||||||
tie_word_embeddings=False,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
self.vocab_size = vocab_size
|
|
||||||
self.d_model = d_model
|
|
||||||
self.n_layer = n_layer
|
|
||||||
self.rms_norm = rms_norm
|
|
||||||
self.residual_in_fp32 = residual_in_fp32
|
|
||||||
self.fused_add_norm = fused_add_norm
|
|
||||||
self.pad_vocab_size_multiple = pad_vocab_size_multiple
|
|
||||||
super().__init__(
|
|
||||||
pad_token_id=pad_token_id,
|
|
||||||
bos_token_id=bos_token_id,
|
|
||||||
eos_token_id=eos_token_id,
|
|
||||||
tie_word_embeddings=tie_word_embeddings,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
# pylint: skip-file
|
|
||||||
import os
|
|
||||||
from collections import namedtuple
|
|
||||||
from functools import partial
|
|
||||||
from typing import Optional, Union
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from mamba_ssm.models.mixer_seq_simple import MixerModel, _init_weights
|
|
||||||
from mamba_ssm.utils.generation import GenerationMixin
|
|
||||||
from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf
|
|
||||||
from torch import nn
|
|
||||||
from torch.nn import CrossEntropyLoss
|
|
||||||
|
|
||||||
from axolotl.models.mamba.configuration_mamba import MambaConfig
|
|
||||||
|
|
||||||
|
|
||||||
class MambaLMHeadModel(nn.Module, GenerationMixin):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
d_model: int,
|
|
||||||
n_layer: int,
|
|
||||||
vocab_size: int,
|
|
||||||
initializer_cfg=None,
|
|
||||||
pad_vocab_size_multiple: int = 1,
|
|
||||||
device=None,
|
|
||||||
dtype=None,
|
|
||||||
**backbone_kwargs,
|
|
||||||
) -> None:
|
|
||||||
factory_kwargs = {"device": device, "dtype": dtype}
|
|
||||||
super().__init__()
|
|
||||||
if vocab_size % pad_vocab_size_multiple != 0:
|
|
||||||
vocab_size += pad_vocab_size_multiple - (
|
|
||||||
vocab_size % pad_vocab_size_multiple
|
|
||||||
)
|
|
||||||
self.config = MambaConfig(
|
|
||||||
vocab_size=vocab_size,
|
|
||||||
d_model=d_model,
|
|
||||||
n_layer=n_layer,
|
|
||||||
pad_vocab_size_multiple=pad_vocab_size_multiple,
|
|
||||||
)
|
|
||||||
self.backbone = MixerModel(
|
|
||||||
d_model=d_model,
|
|
||||||
n_layer=n_layer,
|
|
||||||
vocab_size=vocab_size,
|
|
||||||
initializer_cfg=initializer_cfg,
|
|
||||||
**backbone_kwargs,
|
|
||||||
**factory_kwargs,
|
|
||||||
)
|
|
||||||
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
|
|
||||||
|
|
||||||
# Initialize weights and apply final processing
|
|
||||||
self.apply(
|
|
||||||
partial(
|
|
||||||
_init_weights,
|
|
||||||
n_layer=n_layer,
|
|
||||||
**(initializer_cfg if initializer_cfg is not None else {}),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
self.tie_weights()
|
|
||||||
|
|
||||||
def tie_weights(self):
|
|
||||||
self.lm_head.weight = self.backbone.embedding.weight
|
|
||||||
|
|
||||||
def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
|
|
||||||
return self.backbone.allocate_inference_cache(
|
|
||||||
batch_size, max_seqlen, dtype=dtype, **kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self,
|
|
||||||
input_ids,
|
|
||||||
position_ids=None,
|
|
||||||
inference_params=None,
|
|
||||||
num_last_tokens=0,
|
|
||||||
labels=None,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
"position_ids" is just to be compatible with Transformer generation. We don't use it.
|
|
||||||
num_last_tokens: if > 0, only return the logits for the last n tokens
|
|
||||||
"""
|
|
||||||
hidden_states = self.backbone(input_ids, inference_params=inference_params)
|
|
||||||
if num_last_tokens > 0:
|
|
||||||
hidden_states = hidden_states[:, -num_last_tokens:]
|
|
||||||
lm_logits = self.lm_head(hidden_states)
|
|
||||||
|
|
||||||
CausalLMOutput = namedtuple("CausalLMOutput", ["logits"])
|
|
||||||
return CausalLMOutput(logits=lm_logits)
|
|
||||||
|
|
||||||
loss = None
|
|
||||||
if labels is not None:
|
|
||||||
logits = lm_logits
|
|
||||||
# Shift so that tokens < n predict n
|
|
||||||
shift_logits = logits[..., :-1, :].contiguous()
|
|
||||||
shift_labels = labels[..., 1:].contiguous()
|
|
||||||
# Flatten the tokens
|
|
||||||
loss_fct = CrossEntropyLoss()
|
|
||||||
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
|
||||||
shift_labels = shift_labels.view(-1)
|
|
||||||
# Enable model parallelism
|
|
||||||
shift_labels = shift_labels.to(shift_logits.device)
|
|
||||||
loss = loss_fct(shift_logits, shift_labels)
|
|
||||||
CausalLMOutput = namedtuple("CausalLMOutput", ["logits", "loss"])
|
|
||||||
print(loss)
|
|
||||||
return CausalLMOutput(logits=lm_logits, loss=loss)
|
|
||||||
|
|
||||||
else:
|
|
||||||
CausalLMOutput = namedtuple("CausalLMOutput", ["logits"])
|
|
||||||
return CausalLMOutput(logits=lm_logits)
|
|
||||||
|
|
||||||
def save_pretrained(
|
|
||||||
self,
|
|
||||||
save_directory: Union[str, os.PathLike],
|
|
||||||
state_dict: Optional[dict] = None,
|
|
||||||
safe_serialization: Optional[bool] = None, # pylint: disable=unused-argument
|
|
||||||
):
|
|
||||||
if state_dict is None:
|
|
||||||
state_dict = self.state_dict()
|
|
||||||
torch.save(state_dict, os.path.join(save_directory, "pytorch_model.bin"))
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs):
|
|
||||||
config = load_config_hf(pretrained_model_name)
|
|
||||||
model = cls(**config, device=device, dtype=dtype, **kwargs)
|
|
||||||
model.load_state_dict(
|
|
||||||
load_state_dict_hf(pretrained_model_name, device={"": device}, dtype=dtype)
|
|
||||||
)
|
|
||||||
return model
|
|
||||||
@@ -3,6 +3,4 @@ MixFormers model architecture used for phi models
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from .configuration_mixformer_sequential import MixFormerSequentialConfig # noqa
|
from .configuration_mixformer_sequential import MixFormerSequentialConfig # noqa
|
||||||
from .configuration_phi import PhiConfig # noqa
|
|
||||||
from .modeling_mixformer_sequential import MixFormerSequentialForCausalLM # noqa
|
from .modeling_mixformer_sequential import MixFormerSequentialForCausalLM # noqa
|
||||||
from .modeling_phi import PhiForCausalLM # noqa
|
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
# pylint: skip-file
|
|
||||||
# Copyright (c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT license.
|
|
||||||
|
|
||||||
import math
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from transformers import PretrainedConfig
|
|
||||||
|
|
||||||
|
|
||||||
class PhiConfig(PretrainedConfig):
|
|
||||||
"""Phi configuration."""
|
|
||||||
|
|
||||||
model_type = "phi"
|
|
||||||
attribute_map = {
|
|
||||||
"max_position_embeddings": "n_positions",
|
|
||||||
"hidden_size": "n_embd",
|
|
||||||
"num_attention_heads": "n_head",
|
|
||||||
"num_hidden_layers": "n_layer",
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
vocab_size: int = 50304,
|
|
||||||
n_positions: int = 2048,
|
|
||||||
n_embd: int = 1024,
|
|
||||||
n_layer: int = 20,
|
|
||||||
n_inner: Optional[int] = None,
|
|
||||||
n_head: int = 16,
|
|
||||||
n_head_kv: Optional[int] = None,
|
|
||||||
rotary_dim: Optional[int] = 32,
|
|
||||||
activation_function: Optional[str] = "gelu_new",
|
|
||||||
flash_attn: bool = False,
|
|
||||||
flash_rotary: bool = False,
|
|
||||||
fused_dense: bool = False,
|
|
||||||
attn_pdrop: float = 0.0,
|
|
||||||
embd_pdrop: float = 0.0,
|
|
||||||
resid_pdrop: float = 0.0,
|
|
||||||
layer_norm_epsilon: float = 1e-5,
|
|
||||||
initializer_range: float = 0.02,
|
|
||||||
tie_word_embeddings: bool = False,
|
|
||||||
pad_vocab_size_multiple: int = 64,
|
|
||||||
**kwargs
|
|
||||||
) -> None:
|
|
||||||
self.vocab_size = int(
|
|
||||||
math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple
|
|
||||||
)
|
|
||||||
self.n_positions = n_positions
|
|
||||||
self.n_embd = n_embd
|
|
||||||
self.n_layer = n_layer
|
|
||||||
self.n_inner = n_inner
|
|
||||||
self.n_head = n_head
|
|
||||||
self.n_head_kv = n_head_kv
|
|
||||||
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
|
||||||
self.activation_function = activation_function
|
|
||||||
self.flash_attn = flash_attn
|
|
||||||
self.flash_rotary = flash_rotary
|
|
||||||
self.fused_dense = fused_dense
|
|
||||||
self.attn_pdrop = attn_pdrop
|
|
||||||
self.embd_pdrop = embd_pdrop
|
|
||||||
self.resid_pdrop = resid_pdrop
|
|
||||||
self.layer_norm_epsilon = layer_norm_epsilon
|
|
||||||
self.initializer_range = initializer_range
|
|
||||||
|
|
||||||
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -82,44 +82,15 @@ def get_turns( # pylint: disable=too-many-return-statements
|
|||||||
else:
|
else:
|
||||||
yield role + ":", ""
|
yield role + ":", ""
|
||||||
return
|
return
|
||||||
if self.sep_style == SeparatorStyle.LLAMA2 and self.name != "mistral":
|
if self.sep_style == SeparatorStyle.LLAMA2:
|
||||||
|
seps = [self.sep, self.sep2]
|
||||||
if self.system_message:
|
if self.system_message:
|
||||||
if self.messages:
|
|
||||||
# For llama, the system message is incorporated into the first human instruction
|
|
||||||
first_role, first_msg = self.messages[0]
|
|
||||||
if first_role == self.roles[0]:
|
|
||||||
system_prompt += first_msg
|
|
||||||
self.messages.pop(0)
|
|
||||||
yield "", system_prompt
|
yield "", system_prompt
|
||||||
for i, (role, message) in enumerate(self.messages):
|
else:
|
||||||
|
yield "", "[INST] "
|
||||||
|
for i, (role, message) in enumerate(self.messages[1:]):
|
||||||
if message:
|
if message:
|
||||||
if (i % 2 == 0 and not self.system_message) or (
|
yield role + " ", message + seps[i % 2]
|
||||||
i % 2 != 0 and self.system_message
|
|
||||||
):
|
|
||||||
role = "<s> " + role
|
|
||||||
yield role + " ", message
|
|
||||||
else:
|
|
||||||
yield role, ""
|
|
||||||
return
|
|
||||||
if self.sep_style == SeparatorStyle.LLAMA2 and self.name == "mistral":
|
|
||||||
contains_sys_msg = False
|
|
||||||
if self.system_message:
|
|
||||||
contains_sys_msg = True
|
|
||||||
if self.messages:
|
|
||||||
# There is no clear guidance on how to handle system messages in Mistral so we just prepend it to the first human instruction seperated by a newline
|
|
||||||
first_role, first_msg = self.messages[0]
|
|
||||||
if first_role == self.roles[0]:
|
|
||||||
system_prompt = self.system_template.format(
|
|
||||||
system_message=" " + self.system_message
|
|
||||||
)
|
|
||||||
system_prompt += first_msg
|
|
||||||
self.messages.pop(0)
|
|
||||||
yield "", system_prompt
|
|
||||||
for i, (role, message) in enumerate(self.messages):
|
|
||||||
if message and i == 0 and not contains_sys_msg:
|
|
||||||
yield "", system_prompt.strip() + " " + message # if there is no system message, we need to make sure there is the a `<s> [INST]` at the beginning of the first instruction.
|
|
||||||
elif message:
|
|
||||||
yield role + " ", message
|
|
||||||
else:
|
else:
|
||||||
yield role, ""
|
yield role, ""
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -321,8 +321,6 @@ def flashattn_forward(
|
|||||||
# only on first autoregressive step q,k,v have same seqlen
|
# only on first autoregressive step q,k,v have same seqlen
|
||||||
is_causal = key_states.shape == query_states.shape
|
is_causal = key_states.shape == query_states.shape
|
||||||
|
|
||||||
dropout_rate = 0.0 if not self.training else getattr(self, "attention_dropout", 0.0)
|
|
||||||
|
|
||||||
if cu_seqlens is not None and max_seqlen is not None and cu_seqlens.dim() == 1:
|
if cu_seqlens is not None and max_seqlen is not None and cu_seqlens.dim() == 1:
|
||||||
# special handling using sample packing
|
# special handling using sample packing
|
||||||
qkv = torch.stack(
|
qkv = torch.stack(
|
||||||
@@ -332,12 +330,7 @@ def flashattn_forward(
|
|||||||
qkv = rearrange(qkv, "b s ... -> (b s) ...")
|
qkv = rearrange(qkv, "b s ... -> (b s) ...")
|
||||||
|
|
||||||
output = flash_attn_varlen_qkvpacked_func(
|
output = flash_attn_varlen_qkvpacked_func(
|
||||||
qkv,
|
qkv, cu_seqlens, max_seqlen, 0.0, softmax_scale=None, causal=True
|
||||||
cu_seqlens,
|
|
||||||
max_seqlen,
|
|
||||||
dropout_p=dropout_rate,
|
|
||||||
softmax_scale=None,
|
|
||||||
causal=True,
|
|
||||||
)
|
)
|
||||||
output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
|
output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
|
||||||
elif query_states.shape == key_states.shape:
|
elif query_states.shape == key_states.shape:
|
||||||
@@ -360,7 +353,7 @@ def flashattn_forward(
|
|||||||
qkv_unpad,
|
qkv_unpad,
|
||||||
cu_seqlens_q,
|
cu_seqlens_q,
|
||||||
max_seqlen_q,
|
max_seqlen_q,
|
||||||
dropout_p=dropout_rate,
|
0.0,
|
||||||
softmax_scale=None,
|
softmax_scale=None,
|
||||||
causal=is_causal,
|
causal=is_causal,
|
||||||
)
|
)
|
||||||
@@ -373,7 +366,6 @@ def flashattn_forward(
|
|||||||
output = flash_attn_kvpacked_func(
|
output = flash_attn_kvpacked_func(
|
||||||
query_states,
|
query_states,
|
||||||
torch.stack([key_states, value_states], 2),
|
torch.stack([key_states, value_states], 2),
|
||||||
dropout_p=dropout_rate,
|
|
||||||
causal=is_causal,
|
causal=is_causal,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@@ -406,7 +398,7 @@ def flashattn_forward(
|
|||||||
cu_seqlens_k,
|
cu_seqlens_k,
|
||||||
max_seqlen_q,
|
max_seqlen_q,
|
||||||
max_seqlen_k,
|
max_seqlen_k,
|
||||||
dropout_p=dropout_rate,
|
0.0,
|
||||||
softmax_scale=None,
|
softmax_scale=None,
|
||||||
causal=is_causal,
|
causal=is_causal,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -25,8 +25,6 @@ def sdp_attention_forward(
|
|||||||
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
||||||
output_attentions: bool = False,
|
output_attentions: bool = False,
|
||||||
use_cache: bool = False,
|
use_cache: bool = False,
|
||||||
padding_mask: Optional[torch.LongTensor] = None, # pylint: disable=unused-argument
|
|
||||||
**kwargs, # pylint: disable=unused-argument
|
|
||||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
bsz, q_len, _ = hidden_states.size()
|
bsz, q_len, _ = hidden_states.size()
|
||||||
|
|||||||
@@ -29,8 +29,6 @@ def xformers_forward(
|
|||||||
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
||||||
output_attentions: bool = False,
|
output_attentions: bool = False,
|
||||||
use_cache: bool = False,
|
use_cache: bool = False,
|
||||||
padding_mask: Optional[torch.LongTensor] = None, # pylint: disable=unused-argument
|
|
||||||
**kwargs, # pylint: disable=unused-argument
|
|
||||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
bsz, q_len, _ = hidden_states.size()
|
bsz, q_len, _ = hidden_states.size()
|
||||||
|
|||||||
1249
src/axolotl/monkeypatch/llama_landmark_attn.py
Normal file
1249
src/axolotl/monkeypatch/llama_landmark_attn.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -201,8 +201,6 @@ def flashattn_forward(
|
|||||||
# only on first autoregressive step q,k,v have same seqlen
|
# only on first autoregressive step q,k,v have same seqlen
|
||||||
is_causal = key_states.shape == query_states.shape
|
is_causal = key_states.shape == query_states.shape
|
||||||
|
|
||||||
dropout_rate = 0.0 if not self.training else getattr(self, "attention_dropout", 0.0)
|
|
||||||
|
|
||||||
if cu_seqlens is not None and max_seqlen is not None and cu_seqlens.dim() == 1:
|
if cu_seqlens is not None and max_seqlen is not None and cu_seqlens.dim() == 1:
|
||||||
# special handling using sample packing
|
# special handling using sample packing
|
||||||
qkv = torch.stack(
|
qkv = torch.stack(
|
||||||
@@ -215,7 +213,7 @@ def flashattn_forward(
|
|||||||
qkv,
|
qkv,
|
||||||
cu_seqlens,
|
cu_seqlens,
|
||||||
max_seqlen,
|
max_seqlen,
|
||||||
dropout_p=dropout_rate,
|
0.0,
|
||||||
softmax_scale=None,
|
softmax_scale=None,
|
||||||
causal=True,
|
causal=True,
|
||||||
window_size=window_size,
|
window_size=window_size,
|
||||||
@@ -241,7 +239,7 @@ def flashattn_forward(
|
|||||||
qkv_unpad,
|
qkv_unpad,
|
||||||
cu_seqlens_q,
|
cu_seqlens_q,
|
||||||
max_seqlen_q,
|
max_seqlen_q,
|
||||||
dropout_p=dropout_rate,
|
0.0,
|
||||||
softmax_scale=None,
|
softmax_scale=None,
|
||||||
causal=is_causal,
|
causal=is_causal,
|
||||||
window_size=window_size,
|
window_size=window_size,
|
||||||
@@ -255,7 +253,6 @@ def flashattn_forward(
|
|||||||
output = flash_attn_kvpacked_func(
|
output = flash_attn_kvpacked_func(
|
||||||
query_states,
|
query_states,
|
||||||
torch.stack([key_states, value_states], 2),
|
torch.stack([key_states, value_states], 2),
|
||||||
dropout_p=dropout_rate,
|
|
||||||
causal=is_causal,
|
causal=is_causal,
|
||||||
window_size=window_size,
|
window_size=window_size,
|
||||||
)
|
)
|
||||||
@@ -289,7 +286,7 @@ def flashattn_forward(
|
|||||||
cu_seqlens_k,
|
cu_seqlens_k,
|
||||||
max_seqlen_q,
|
max_seqlen_q,
|
||||||
max_seqlen_k,
|
max_seqlen_k,
|
||||||
dropout_p=dropout_rate,
|
0.0,
|
||||||
softmax_scale=None,
|
softmax_scale=None,
|
||||||
causal=is_causal,
|
causal=is_causal,
|
||||||
window_size=window_size,
|
window_size=window_size,
|
||||||
|
|||||||
@@ -1,22 +0,0 @@
|
|||||||
"""
|
|
||||||
Patches to support multipack for mixtral
|
|
||||||
"""
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
|
|
||||||
def replace_mixtral_attn_with_multipack_flash_attn():
|
|
||||||
from .modeling_mixtral import (
|
|
||||||
MixtralMultipackFlashAttention2,
|
|
||||||
mixtral_decoder_layer_forward,
|
|
||||||
mixtral_model_forward,
|
|
||||||
)
|
|
||||||
|
|
||||||
transformers.models.mixtral.modeling_mixtral.MixtralDecoderLayer.forward = (
|
|
||||||
mixtral_decoder_layer_forward
|
|
||||||
)
|
|
||||||
transformers.models.mixtral.modeling_mixtral.MixtralModel.forward = (
|
|
||||||
mixtral_model_forward
|
|
||||||
)
|
|
||||||
transformers.models.mixtral.modeling_mixtral.MISTRAL_ATTENTION_CLASSES[
|
|
||||||
"flash_attention_2"
|
|
||||||
] = MixtralMultipackFlashAttention2
|
|
||||||
@@ -1,379 +0,0 @@
|
|||||||
"""
|
|
||||||
Mixtral modeling for multipack
|
|
||||||
"""
|
|
||||||
# pylint: disable=missing-module-docstring,unused-argument,protected-access,pointless-string-statement,duplicate-code
|
|
||||||
import logging
|
|
||||||
import warnings
|
|
||||||
from typing import List, Optional, Tuple, Union
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from einops import rearrange
|
|
||||||
from flash_attn import flash_attn_varlen_qkvpacked_func
|
|
||||||
from transformers import Cache, DynamicCache
|
|
||||||
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
|
||||||
from transformers.modeling_outputs import MoeModelOutputWithPast
|
|
||||||
from transformers.models.mixtral.modeling_mixtral import (
|
|
||||||
MixtralFlashAttention2,
|
|
||||||
apply_rotary_pos_emb,
|
|
||||||
repeat_kv,
|
|
||||||
)
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.monkeypatch.mixtral")
|
|
||||||
|
|
||||||
|
|
||||||
class MixtralMultipackFlashAttention2(MixtralFlashAttention2):
|
|
||||||
"""
|
|
||||||
Custom multipack implementation w flash attention 2
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self._flash_attn_uses_top_left_mask = True
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self,
|
|
||||||
hidden_states: torch.Tensor,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
position_ids: Optional[torch.LongTensor] = None,
|
|
||||||
past_key_value: Optional[Cache] = None,
|
|
||||||
output_attentions: bool = False,
|
|
||||||
use_cache: bool = False,
|
|
||||||
cu_seqlens: Optional[torch.Tensor] = None,
|
|
||||||
max_seqlen: Optional[torch.Tensor] = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
|
||||||
if "padding_mask" in kwargs:
|
|
||||||
warnings.warn(
|
|
||||||
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
|
||||||
)
|
|
||||||
bsz, q_len, _ = hidden_states.size()
|
|
||||||
|
|
||||||
query_states = self.q_proj(hidden_states)
|
|
||||||
key_states = self.k_proj(hidden_states)
|
|
||||||
value_states = self.v_proj(hidden_states)
|
|
||||||
|
|
||||||
query_states = query_states.view(
|
|
||||||
bsz, q_len, self.num_heads, self.head_dim
|
|
||||||
).transpose(1, 2)
|
|
||||||
key_states = key_states.view(
|
|
||||||
bsz, q_len, self.num_key_value_heads, self.head_dim
|
|
||||||
).transpose(1, 2)
|
|
||||||
value_states = value_states.view(
|
|
||||||
bsz, q_len, self.num_key_value_heads, self.head_dim
|
|
||||||
).transpose(1, 2)
|
|
||||||
|
|
||||||
kv_seq_len = key_states.shape[-2]
|
|
||||||
if past_key_value is not None:
|
|
||||||
if self.layer_idx is None:
|
|
||||||
raise ValueError(
|
|
||||||
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
|
||||||
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
|
||||||
"with a layer index."
|
|
||||||
)
|
|
||||||
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
|
||||||
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
|
||||||
query_states, key_states = apply_rotary_pos_emb(
|
|
||||||
query_states, key_states, cos, sin, position_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
if past_key_value is not None:
|
|
||||||
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
|
||||||
key_states, value_states = past_key_value.update(
|
|
||||||
key_states, value_states, self.layer_idx, cache_kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
# repeat k/v heads if n_kv_heads < n_heads
|
|
||||||
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
|
||||||
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
|
||||||
|
|
||||||
if cu_seqlens is not None and max_seqlen is not None and cu_seqlens.dim() == 1:
|
|
||||||
# special handling using sample packing
|
|
||||||
qkv = torch.stack(
|
|
||||||
[query_states, key_states, value_states], dim=2
|
|
||||||
) # [bsz, nh, 3, q_len, hd]
|
|
||||||
qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
|
|
||||||
qkv = rearrange(qkv, "b s ... -> (b s) ...")
|
|
||||||
|
|
||||||
attn_output = flash_attn_varlen_qkvpacked_func(
|
|
||||||
qkv,
|
|
||||||
cu_seqlens,
|
|
||||||
max_seqlen,
|
|
||||||
dropout_p=self.attention_dropout,
|
|
||||||
softmax_scale=None,
|
|
||||||
causal=True,
|
|
||||||
)
|
|
||||||
attn_output = rearrange(attn_output, "(b s) ... -> b s ...", b=bsz)
|
|
||||||
|
|
||||||
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
|
||||||
attn_output = self.o_proj(attn_output)
|
|
||||||
|
|
||||||
if not output_attentions:
|
|
||||||
attn_weights = None
|
|
||||||
|
|
||||||
return attn_output, attn_weights, past_key_value
|
|
||||||
|
|
||||||
|
|
||||||
def mixtral_decoder_layer_forward(
|
|
||||||
self,
|
|
||||||
hidden_states: torch.Tensor,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
position_ids: Optional[torch.LongTensor] = None,
|
|
||||||
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
|
||||||
output_attentions: Optional[bool] = False,
|
|
||||||
output_router_logits: Optional[bool] = False,
|
|
||||||
use_cache: Optional[bool] = False,
|
|
||||||
cu_seqlens: Optional[torch.Tensor] = None,
|
|
||||||
max_seqlen: Optional[torch.Tensor] = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
|
||||||
if "padding_mask" in kwargs:
|
|
||||||
warnings.warn(
|
|
||||||
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
|
||||||
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
|
||||||
`(batch, sequence_length)` where padding elements are indicated by 0.
|
|
||||||
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
|
||||||
output_attentions (`bool`, *optional*):
|
|
||||||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
||||||
returned tensors for more detail.
|
|
||||||
output_router_logits (`bool`, *optional*):
|
|
||||||
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
|
||||||
should not be returned during inference.
|
|
||||||
use_cache (`bool`, *optional*):
|
|
||||||
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
|
||||||
(see `past_key_values`).
|
|
||||||
"""
|
|
||||||
|
|
||||||
residual = hidden_states
|
|
||||||
|
|
||||||
hidden_states = self.input_layernorm(hidden_states)
|
|
||||||
|
|
||||||
# Self Attention
|
|
||||||
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
|
||||||
hidden_states=hidden_states,
|
|
||||||
attention_mask=attention_mask,
|
|
||||||
position_ids=position_ids,
|
|
||||||
past_key_value=past_key_value,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
use_cache=use_cache,
|
|
||||||
cu_seqlens=cu_seqlens,
|
|
||||||
max_seqlen=max_seqlen,
|
|
||||||
)
|
|
||||||
hidden_states = residual + hidden_states
|
|
||||||
|
|
||||||
# Fully Connected
|
|
||||||
residual = hidden_states
|
|
||||||
hidden_states = self.post_attention_layernorm(hidden_states)
|
|
||||||
hidden_states, router_logits = self.block_sparse_moe(hidden_states)
|
|
||||||
hidden_states = residual + hidden_states
|
|
||||||
|
|
||||||
outputs = (hidden_states,)
|
|
||||||
|
|
||||||
if output_attentions:
|
|
||||||
outputs += (self_attn_weights,)
|
|
||||||
|
|
||||||
if use_cache:
|
|
||||||
outputs += (present_key_value,)
|
|
||||||
|
|
||||||
if output_router_logits:
|
|
||||||
outputs += (router_logits,)
|
|
||||||
|
|
||||||
return outputs
|
|
||||||
|
|
||||||
|
|
||||||
def mixtral_model_forward(
|
|
||||||
self,
|
|
||||||
input_ids: torch.LongTensor = None,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
position_ids: Optional[torch.LongTensor] = None,
|
|
||||||
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
||||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
||||||
use_cache: Optional[bool] = None,
|
|
||||||
output_attentions: Optional[bool] = None,
|
|
||||||
output_hidden_states: Optional[bool] = None,
|
|
||||||
output_router_logits: Optional[bool] = None,
|
|
||||||
return_dict: Optional[bool] = None,
|
|
||||||
) -> Union[Tuple, MoeModelOutputWithPast]:
|
|
||||||
output_attentions = (
|
|
||||||
output_attentions
|
|
||||||
if output_attentions is not None
|
|
||||||
else self.config.output_attentions
|
|
||||||
)
|
|
||||||
output_router_logits = (
|
|
||||||
output_router_logits
|
|
||||||
if output_router_logits is not None
|
|
||||||
else self.config.output_router_logits
|
|
||||||
)
|
|
||||||
output_hidden_states = (
|
|
||||||
output_hidden_states
|
|
||||||
if output_hidden_states is not None
|
|
||||||
else self.config.output_hidden_states
|
|
||||||
)
|
|
||||||
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
||||||
|
|
||||||
return_dict = (
|
|
||||||
return_dict if return_dict is not None else self.config.use_return_dict
|
|
||||||
)
|
|
||||||
|
|
||||||
# retrieve input_ids and inputs_embeds
|
|
||||||
if input_ids is not None and inputs_embeds is not None:
|
|
||||||
raise ValueError(
|
|
||||||
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
|
|
||||||
)
|
|
||||||
if input_ids is not None:
|
|
||||||
batch_size, seq_length = input_ids.shape
|
|
||||||
elif inputs_embeds is not None:
|
|
||||||
batch_size, seq_length, _ = inputs_embeds.shape
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
|
|
||||||
)
|
|
||||||
|
|
||||||
past_key_values_length = 0
|
|
||||||
|
|
||||||
if use_cache:
|
|
||||||
use_legacy_cache = not isinstance(past_key_values, Cache)
|
|
||||||
if use_legacy_cache:
|
|
||||||
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
|
||||||
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
|
||||||
|
|
||||||
cu_seqlens = None
|
|
||||||
max_seqlen = None
|
|
||||||
if position_ids is None:
|
|
||||||
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
|
||||||
position_ids = torch.arange(
|
|
||||||
past_key_values_length,
|
|
||||||
seq_length + past_key_values_length,
|
|
||||||
dtype=torch.long,
|
|
||||||
device=device,
|
|
||||||
)
|
|
||||||
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
|
||||||
else:
|
|
||||||
position_ids = position_ids.view(-1, seq_length).long()
|
|
||||||
cu_seqlens, max_seqlen = get_cu_seqlens_from_pos_ids(position_ids)
|
|
||||||
cu_seqlens = cu_seqlens.squeeze()
|
|
||||||
|
|
||||||
if inputs_embeds is None:
|
|
||||||
inputs_embeds = self.embed_tokens(input_ids)
|
|
||||||
|
|
||||||
if attention_mask is not None and self._use_flash_attention_2 and use_cache:
|
|
||||||
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
|
||||||
if is_padding_right:
|
|
||||||
raise ValueError(
|
|
||||||
"You are attempting to perform batched generation with padding_side='right'"
|
|
||||||
" this may lead to unexpected behaviour for Flash Attention version of Mixtral. Make sure to "
|
|
||||||
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
|
||||||
)
|
|
||||||
|
|
||||||
if self._use_flash_attention_2:
|
|
||||||
# 2d mask is passed through the layers
|
|
||||||
attention_mask = (
|
|
||||||
attention_mask
|
|
||||||
if (attention_mask is not None and 0 in attention_mask)
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# 4d mask is passed through the layers
|
|
||||||
attention_mask = _prepare_4d_causal_attention_mask(
|
|
||||||
attention_mask,
|
|
||||||
(batch_size, seq_length),
|
|
||||||
inputs_embeds,
|
|
||||||
past_key_values_length,
|
|
||||||
sliding_window=self.config.sliding_window,
|
|
||||||
)
|
|
||||||
|
|
||||||
hidden_states = inputs_embeds
|
|
||||||
|
|
||||||
if self.gradient_checkpointing and self.training:
|
|
||||||
if use_cache:
|
|
||||||
LOG.warning_once(
|
|
||||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
|
||||||
)
|
|
||||||
use_cache = False
|
|
||||||
|
|
||||||
# decoder layers
|
|
||||||
all_hidden_states = () if output_hidden_states else None
|
|
||||||
all_self_attns = () if output_attentions else None
|
|
||||||
all_router_logits = () if output_router_logits else None
|
|
||||||
next_decoder_cache = None
|
|
||||||
|
|
||||||
for decoder_layer in self.layers:
|
|
||||||
if output_hidden_states:
|
|
||||||
all_hidden_states += (hidden_states,)
|
|
||||||
|
|
||||||
if self.gradient_checkpointing and self.training:
|
|
||||||
layer_outputs = self._gradient_checkpointing_func(
|
|
||||||
decoder_layer.__call__,
|
|
||||||
hidden_states,
|
|
||||||
attention_mask,
|
|
||||||
position_ids,
|
|
||||||
past_key_values,
|
|
||||||
output_attentions,
|
|
||||||
output_router_logits,
|
|
||||||
use_cache,
|
|
||||||
cu_seqlens,
|
|
||||||
max_seqlen,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
layer_outputs = decoder_layer(
|
|
||||||
hidden_states,
|
|
||||||
attention_mask=attention_mask,
|
|
||||||
position_ids=position_ids,
|
|
||||||
past_key_value=past_key_values,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
output_router_logits=output_router_logits,
|
|
||||||
use_cache=use_cache,
|
|
||||||
cu_seqlens=cu_seqlens,
|
|
||||||
max_seqlen=max_seqlen,
|
|
||||||
)
|
|
||||||
|
|
||||||
hidden_states = layer_outputs[0]
|
|
||||||
|
|
||||||
if use_cache:
|
|
||||||
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
|
||||||
|
|
||||||
if output_attentions:
|
|
||||||
all_self_attns += (layer_outputs[1],)
|
|
||||||
|
|
||||||
if output_router_logits:
|
|
||||||
all_router_logits += (layer_outputs[-1],)
|
|
||||||
|
|
||||||
hidden_states = self.norm(hidden_states)
|
|
||||||
|
|
||||||
# add hidden states from the last decoder layer
|
|
||||||
if output_hidden_states:
|
|
||||||
all_hidden_states += (hidden_states,)
|
|
||||||
|
|
||||||
next_cache = None
|
|
||||||
if use_cache:
|
|
||||||
next_cache = (
|
|
||||||
next_decoder_cache.to_legacy_cache()
|
|
||||||
if use_legacy_cache
|
|
||||||
else next_decoder_cache
|
|
||||||
)
|
|
||||||
|
|
||||||
if not return_dict:
|
|
||||||
return tuple(
|
|
||||||
v
|
|
||||||
for v in [
|
|
||||||
hidden_states,
|
|
||||||
next_cache,
|
|
||||||
all_hidden_states,
|
|
||||||
all_self_attns,
|
|
||||||
all_router_logits,
|
|
||||||
]
|
|
||||||
if v is not None
|
|
||||||
)
|
|
||||||
|
|
||||||
return MoeModelOutputWithPast(
|
|
||||||
last_hidden_state=hidden_states,
|
|
||||||
past_key_values=next_cache,
|
|
||||||
hidden_states=all_hidden_states,
|
|
||||||
attentions=all_self_attns,
|
|
||||||
router_logits=all_router_logits,
|
|
||||||
)
|
|
||||||
65
src/axolotl/monkeypatch/neft_embeddings.py
Normal file
65
src/axolotl/monkeypatch/neft_embeddings.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
"""
|
||||||
|
patches implemented through the trainer hooks to enable NEFT/noisy embeddings per https://arxiv.org/abs/2310.05914
|
||||||
|
"""
|
||||||
|
import torch
|
||||||
|
from peft import PeftModel
|
||||||
|
from transformers import PreTrainedModel
|
||||||
|
|
||||||
|
|
||||||
|
def patch_neft(alpha, model):
|
||||||
|
embeddings = None
|
||||||
|
if isinstance(model, PreTrainedModel):
|
||||||
|
embeddings = model.get_input_embeddings()
|
||||||
|
if isinstance(model, PeftModel):
|
||||||
|
embeddings = model.base_model.get_input_embeddings()
|
||||||
|
if not embeddings:
|
||||||
|
raise ValueError(f"unhandled model class for neft: {model.__class__.__name__}")
|
||||||
|
embeddings.noisy_embedding_alpha = alpha
|
||||||
|
old_forward = embeddings.forward
|
||||||
|
|
||||||
|
# This hack seems to be needed to properly use a custom forward pass
|
||||||
|
# all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11
|
||||||
|
bound_method = neft_forward.__get__( # pylint: disable=no-value-for-parameter
|
||||||
|
embeddings, embeddings.__class__
|
||||||
|
)
|
||||||
|
setattr(embeddings, "forward", bound_method)
|
||||||
|
|
||||||
|
embeddings._old_forward = old_forward # pylint: disable=protected-access
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
def unpatch_neft(model):
|
||||||
|
embeddings = None
|
||||||
|
if isinstance(model, PreTrainedModel):
|
||||||
|
embeddings = model.get_input_embeddings()
|
||||||
|
if isinstance(model, PeftModel):
|
||||||
|
embeddings = model.base_model.get_input_embeddings()
|
||||||
|
if not embeddings:
|
||||||
|
raise ValueError(f"unhandled model class for neft: {model.__class__.__name__}")
|
||||||
|
if hasattr(embeddings, "_old_forward"):
|
||||||
|
embeddings.forward = embeddings._old_forward # pylint: disable=protected-access
|
||||||
|
del embeddings._old_forward # pylint: disable=protected-access
|
||||||
|
del embeddings.noisy_embedding_alpha
|
||||||
|
|
||||||
|
|
||||||
|
def neft_forward(self, inputs: torch.Tensor):
|
||||||
|
embeddings = self._old_forward(inputs) # pylint: disable=protected-access
|
||||||
|
|
||||||
|
if self.training:
|
||||||
|
dims = torch.tensor(embeddings.size(1) * embeddings.size(2))
|
||||||
|
mag_norm = self.noisy_embedding_alpha / torch.sqrt(dims)
|
||||||
|
embeddings = embeddings + torch.zeros_like(embeddings).uniform_(
|
||||||
|
-mag_norm, mag_norm
|
||||||
|
)
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
|
def pretrain_hook(cfg, trainer):
|
||||||
|
if cfg.noisy_embedding_alpha:
|
||||||
|
trainer.model = patch_neft(cfg.noisy_embedding_alpha, trainer.model)
|
||||||
|
|
||||||
|
|
||||||
|
def post_train_hook(cfg, trainer):
|
||||||
|
if cfg.noisy_embedding_alpha:
|
||||||
|
unpatch_neft(trainer.model)
|
||||||
94
src/axolotl/monkeypatch/xpos_rope_llama_monkey_patch.py
Normal file
94
src/axolotl/monkeypatch/xpos_rope_llama_monkey_patch.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# pylint: skip-file
|
||||||
|
"""
|
||||||
|
Copied from https://github.com/kaiokendev/cutoff-len-is-context-len/blob/main/util/xpos_rope_llama_monkey_patch.py
|
||||||
|
"""
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
import transformers.models.llama.modeling_llama
|
||||||
|
from einops import rearrange
|
||||||
|
|
||||||
|
|
||||||
|
class XposRotaryEmbedding(torch.nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
max_position_embeddings=2048,
|
||||||
|
base=10000,
|
||||||
|
device=None,
|
||||||
|
scale_base=2048,
|
||||||
|
use_xpos=True,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.max_seq_len_cached = max_position_embeddings
|
||||||
|
self.scale_base = scale_base
|
||||||
|
|
||||||
|
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
|
||||||
|
t = torch.arange(self.max_seq_len_cached, device=device).type_as(inv_freq)
|
||||||
|
freqs = torch.einsum("i , j -> i j", t, inv_freq)
|
||||||
|
freqs = torch.cat((freqs, freqs), dim=-1)
|
||||||
|
|
||||||
|
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||||
|
self.register_buffer("freqs_cached", freqs, persistent=False)
|
||||||
|
|
||||||
|
if not use_xpos:
|
||||||
|
self.register_buffer("scale", None)
|
||||||
|
self.register_buffer("scale_cached", torch.ones(1))
|
||||||
|
return
|
||||||
|
|
||||||
|
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
|
||||||
|
power = (t - (self.max_seq_len_cached // 2)) / self.scale_base
|
||||||
|
scale_cached = scale ** rearrange(power, "n -> n 1")
|
||||||
|
scale_cached = torch.cat((scale_cached, scale_cached), dim=-1)
|
||||||
|
|
||||||
|
self.register_buffer("scale", scale, persistent=False)
|
||||||
|
self.register_buffer("scale_cached", scale_cached, persistent=False)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
x,
|
||||||
|
seq_len,
|
||||||
|
):
|
||||||
|
if seq_len > self.max_seq_len_cached:
|
||||||
|
self.max_seq_len_cached = seq_len
|
||||||
|
t = torch.arange(self.max_seq_len_cached, device=x.device).type_as(
|
||||||
|
self.inv_freq
|
||||||
|
)
|
||||||
|
freqs = torch.einsum("i , j -> i j", t, self.inv_freq)
|
||||||
|
freqs = torch.cat((freqs, freqs), dim=-1).to(dtype=x.dtype)
|
||||||
|
|
||||||
|
self.register_buffer("freqs_cached", freqs)
|
||||||
|
|
||||||
|
if self.scale is None:
|
||||||
|
self.register_buffer(
|
||||||
|
"scale_cached", torch.ones(1, device=x.device).to(dtype=x.dtype)
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.freqs_cached.to(dtype=x.dtype), self.scale_cached
|
||||||
|
|
||||||
|
power = (t - (seq_len // 2)) / self.scale_base
|
||||||
|
scale = self.scale ** rearrange(power, "n -> n 1")
|
||||||
|
scale = torch.cat((scale, scale), dim=-1).to(dtype=x.dtype)
|
||||||
|
self.register_buffer("scale_cached", scale)
|
||||||
|
|
||||||
|
return self.freqs_cached.to(dtype=x.dtype), self.scale_cached.to(dtype=x.dtype)
|
||||||
|
|
||||||
|
|
||||||
|
def rotate_half(x):
|
||||||
|
x1, x2 = x.chunk(2, dim=-1)
|
||||||
|
return torch.cat((-x2, x1), dim=-1)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_rotary_pos_emb(q, k, freqs, scale=1, position_ids=None):
|
||||||
|
freqs = freqs[position_ids, :]
|
||||||
|
if scale.shape[-1] != 1:
|
||||||
|
scale = scale[position_ids, :]
|
||||||
|
|
||||||
|
q_embed = (q * freqs.cos() * scale) + (rotate_half(q) * freqs.sin() * scale)
|
||||||
|
k_embed = (k * freqs.cos() * 1 / scale) + (rotate_half(k) * freqs.sin() * 1 / scale)
|
||||||
|
|
||||||
|
return q_embed, k_embed
|
||||||
|
|
||||||
|
|
||||||
|
def replace_llama_rope_with_xpos_rope():
|
||||||
|
transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = XposRotaryEmbedding
|
||||||
|
transformers.models.llama.modeling_llama.apply_rotary_pos_emb = apply_rotary_pos_emb
|
||||||
@@ -81,9 +81,8 @@ class LLama2ChatTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.tokenizer.add_special_tokens(
|
self.sequence_len = 4096
|
||||||
{"pad_token": getattr(self.tokenizer, "pad_token", "<pad>")}
|
self.tokenizer.add_special_tokens({"pad_token": "<pad>"})
|
||||||
)
|
|
||||||
# https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/blob/main/added_tokens.json
|
# https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/blob/main/added_tokens.json
|
||||||
|
|
||||||
def tokenize_prompt(self, prompt):
|
def tokenize_prompt(self, prompt):
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ register_conv_template(
|
|||||||
system_message="You are a helpful assistant.",
|
system_message="You are a helpful assistant.",
|
||||||
roles=["<|im_start|>user", "<|im_start|>assistant"],
|
roles=["<|im_start|>user", "<|im_start|>assistant"],
|
||||||
sep_style=SeparatorStyle.CHATML,
|
sep_style=SeparatorStyle.CHATML,
|
||||||
sep="<|im_end|>",
|
sep="<|im_end|>\n",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -39,23 +39,6 @@ def load(tokenizer, cfg, ds_cfg: Optional[Dict[str, Any]] = None):
|
|||||||
return strategy
|
return strategy
|
||||||
|
|
||||||
|
|
||||||
def load_ultrachat(tokenizer, cfg, ds_cfg: Optional[Dict[str, Any]] = None):
|
|
||||||
conversation = (
|
|
||||||
ds_cfg["conversation"] if ds_cfg and "conversation" in ds_cfg else None
|
|
||||||
)
|
|
||||||
strategy = UltrachatShareGPTPromptTokenizingStrategy(
|
|
||||||
ShareGPTPrompterV2(
|
|
||||||
conversation=conversation,
|
|
||||||
),
|
|
||||||
tokenizer,
|
|
||||||
cfg.train_on_inputs,
|
|
||||||
cfg.sequence_len,
|
|
||||||
)
|
|
||||||
if ds_cfg and "strict" in ds_cfg:
|
|
||||||
strategy.strict = ds_cfg["strict"]
|
|
||||||
return strategy
|
|
||||||
|
|
||||||
|
|
||||||
def load_role(tokenizer, cfg):
|
def load_role(tokenizer, cfg):
|
||||||
return SimpleRoleShareGPTPromptTokenizingStrategy(
|
return SimpleRoleShareGPTPromptTokenizingStrategy(
|
||||||
ShareGPTPrompterV2(),
|
ShareGPTPrompterV2(),
|
||||||
@@ -126,17 +109,3 @@ class GuanacoShareGPTPromptTokenizingStrategy(ShareGPTPromptTokenizingStrategy):
|
|||||||
{"from": role_map[t["role"]], "value": t["text"]} for t in conversations
|
{"from": role_map[t["role"]], "value": t["text"]} for t in conversations
|
||||||
]
|
]
|
||||||
return turns
|
return turns
|
||||||
|
|
||||||
|
|
||||||
class UltrachatShareGPTPromptTokenizingStrategy(SimpleShareGPTPromptTokenizingStrategy):
|
|
||||||
"""
|
|
||||||
sharegpt strategy that remaps ultrachat data to sharegpt format
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_conversation_thread(self, prompt):
|
|
||||||
conversations = prompt["messages"]
|
|
||||||
role_map = {"user": "human", "assistant": "gpt"}
|
|
||||||
turns = [
|
|
||||||
{"from": role_map[t["role"]], "value": t["content"]} for t in conversations
|
|
||||||
]
|
|
||||||
return turns
|
|
||||||
|
|||||||
@@ -22,19 +22,13 @@ class PromptStyle(Enum):
|
|||||||
CHATML = "chatml"
|
CHATML = "chatml"
|
||||||
|
|
||||||
|
|
||||||
class Prompter:
|
class AlpacaPrompter:
|
||||||
"""
|
|
||||||
Base prompter class for all prompters
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class AlpacaPrompter(Prompter):
|
|
||||||
"""
|
"""
|
||||||
Base class for alpaca prompters
|
Base class for alpaca prompters
|
||||||
"""
|
"""
|
||||||
|
|
||||||
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request."
|
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
||||||
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
|
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
|
||||||
system_format: str = "{system}"
|
system_format: str = "{system}"
|
||||||
turn_format: str
|
turn_format: str
|
||||||
turn_no_input_format: str
|
turn_no_input_format: str
|
||||||
@@ -75,7 +69,7 @@ class AlpacaPrompter(Prompter):
|
|||||||
else:
|
else:
|
||||||
res = (
|
res = (
|
||||||
self.system_format.format(system=self.system_no_input_prompt)
|
self.system_format.format(system=self.system_no_input_prompt)
|
||||||
if self.system_no_input_prompt
|
if self.system_prompt
|
||||||
else ""
|
else ""
|
||||||
) + self.turn_no_input_format.format(instruction=instruction)
|
) + self.turn_no_input_format.format(instruction=instruction)
|
||||||
if output:
|
if output:
|
||||||
@@ -165,7 +159,7 @@ class NomicGPT4AllPrompter(AlpacaPrompter):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
class ReflectAlpacaPrompter(Prompter):
|
class ReflectAlpacaPrompter:
|
||||||
"""
|
"""
|
||||||
Prompter for ReflectAlpaca
|
Prompter for ReflectAlpaca
|
||||||
"""
|
"""
|
||||||
@@ -260,7 +254,7 @@ SHAREGPT_ASSERTION_FAILED_ROLE = (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ShareGPTPrompter(Prompter): # pylint: disable=too-few-public-methods
|
class ShareGPTPrompter: # pylint: disable=too-few-public-methods
|
||||||
"""
|
"""
|
||||||
A prompter that generates prompts for the ShareGPT
|
A prompter that generates prompts for the ShareGPT
|
||||||
"""
|
"""
|
||||||
@@ -355,7 +349,7 @@ class ShareGPTPrompterV2(ShareGPTPrompter):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedPrompter(Prompter):
|
class UnsupportedPrompter:
|
||||||
"""
|
"""
|
||||||
A dummy class for custom prompters
|
A dummy class for custom prompters
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -12,13 +12,12 @@ import transformers.modelcard
|
|||||||
from accelerate.logging import get_logger
|
from accelerate.logging import get_logger
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
from optimum.bettertransformer import BetterTransformer
|
from optimum.bettertransformer import BetterTransformer
|
||||||
from pkg_resources import get_distribution # type: ignore
|
|
||||||
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
||||||
|
|
||||||
from axolotl.common.cli import TrainerCliArgs
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.logging_config import configure_logging
|
from axolotl.logging_config import configure_logging
|
||||||
|
from axolotl.monkeypatch import neft_embeddings
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.freeze import freeze_parameters_except
|
|
||||||
from axolotl.utils.models import load_model, load_tokenizer
|
from axolotl.utils.models import load_model, load_tokenizer
|
||||||
from axolotl.utils.trainer import setup_trainer
|
from axolotl.utils.trainer import setup_trainer
|
||||||
|
|
||||||
@@ -79,15 +78,11 @@ def train(
|
|||||||
)
|
)
|
||||||
resume_from_checkpoint = cfg.resume_from_checkpoint
|
resume_from_checkpoint = cfg.resume_from_checkpoint
|
||||||
|
|
||||||
if cfg.unfrozen_parameters:
|
|
||||||
freeze_parameters_except(model, cfg.unfrozen_parameters)
|
|
||||||
|
|
||||||
trainer = setup_trainer(
|
trainer = setup_trainer(
|
||||||
cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps
|
cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps
|
||||||
)
|
)
|
||||||
|
|
||||||
if hasattr(model, "config"):
|
model.config.use_cache = False
|
||||||
model.config.use_cache = False
|
|
||||||
|
|
||||||
# go ahead and presave, so we have the adapter config available to inspect
|
# go ahead and presave, so we have the adapter config available to inspect
|
||||||
if peft_config:
|
if peft_config:
|
||||||
@@ -97,8 +92,7 @@ def train(
|
|||||||
if not Path(cfg.output_dir).is_dir():
|
if not Path(cfg.output_dir).is_dir():
|
||||||
os.makedirs(cfg.output_dir, exist_ok=True)
|
os.makedirs(cfg.output_dir, exist_ok=True)
|
||||||
tokenizer.save_pretrained(str(Path(cfg.output_dir)))
|
tokenizer.save_pretrained(str(Path(cfg.output_dir)))
|
||||||
if hasattr(model, "config"):
|
model.config.save_pretrained(str(Path(cfg.output_dir)))
|
||||||
model.config.save_pretrained(str(Path(cfg.output_dir)))
|
|
||||||
|
|
||||||
# In case we want to stop early with ctrl+c, this is a nice to have to save the pretrained model
|
# In case we want to stop early with ctrl+c, this is a nice to have to save the pretrained model
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
@@ -116,12 +110,6 @@ def train(
|
|||||||
badge_markdown = """[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)"""
|
badge_markdown = """[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)"""
|
||||||
transformers.modelcard.AUTOGENERATED_TRAINER_COMMENT += f"\n{badge_markdown}"
|
transformers.modelcard.AUTOGENERATED_TRAINER_COMMENT += f"\n{badge_markdown}"
|
||||||
|
|
||||||
if getattr(cfg, "axolotl_config_path"):
|
|
||||||
raw_axolotl_cfg = Path(cfg.axolotl_config_path)
|
|
||||||
version = get_distribution("axolotl").version
|
|
||||||
if raw_axolotl_cfg.is_file():
|
|
||||||
transformers.modelcard.AUTOGENERATED_TRAINER_COMMENT += f"\n<details><summary>See axolotl config</summary>\n\naxolotl version: `{version}`\n```yaml\n{raw_axolotl_cfg.read_text(encoding='utf-8')}\n```\n\n</details><br>\n"
|
|
||||||
|
|
||||||
LOG.info("Starting trainer...")
|
LOG.info("Starting trainer...")
|
||||||
if cfg.group_by_length:
|
if cfg.group_by_length:
|
||||||
LOG.info("hang tight... sorting dataset for group_by_length")
|
LOG.info("hang tight... sorting dataset for group_by_length")
|
||||||
@@ -186,19 +174,21 @@ def train(
|
|||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
|
|
||||||
def pretrain_hooks(_cfg, _trainer):
|
def pretrain_hooks(cfg, trainer):
|
||||||
"""
|
"""
|
||||||
Run hooks right before kicking off the training
|
Run hooks right before kicking off the training
|
||||||
:param cfg:
|
:param cfg:
|
||||||
:param trainer:
|
:param trainer:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
|
neft_embeddings.pretrain_hook(cfg, trainer)
|
||||||
|
|
||||||
|
|
||||||
def post_train_hooks(_cfg, _trainer):
|
def post_train_hooks(cfg, trainer):
|
||||||
"""
|
"""
|
||||||
Run hooks right after training completes
|
Run hooks right after training completes
|
||||||
:param cfg:
|
:param cfg:
|
||||||
:param trainer:
|
:param trainer:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
|
neft_embeddings.post_train_hook(cfg, trainer)
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from shutil import copyfile
|
|
||||||
from tempfile import NamedTemporaryFile
|
|
||||||
from typing import TYPE_CHECKING, Dict, List
|
from typing import TYPE_CHECKING, Dict, List
|
||||||
|
|
||||||
import evaluate
|
import evaluate
|
||||||
@@ -126,36 +124,6 @@ class GPUStatsCallback(
|
|||||||
return control
|
return control
|
||||||
|
|
||||||
|
|
||||||
class LossWatchDogCallback(TrainerCallback):
|
|
||||||
"""Callback to track loss and stop training if loss is too high"""
|
|
||||||
|
|
||||||
def __init__(self, cfg):
|
|
||||||
self.cfg = cfg
|
|
||||||
self.logged = False
|
|
||||||
self.violations = 0
|
|
||||||
self.threshold = cfg.loss_watchdog_threshold
|
|
||||||
self.patience = cfg.loss_watchdog_patience or 3
|
|
||||||
|
|
||||||
def on_step_end(
|
|
||||||
self,
|
|
||||||
_args: TrainingArguments,
|
|
||||||
state: TrainerState,
|
|
||||||
control: TrainerControl,
|
|
||||||
**_kwargs,
|
|
||||||
):
|
|
||||||
if len(state.log_history) > 0 and "loss" in state.log_history[-1]:
|
|
||||||
if state.log_history[-1]["loss"] > self.threshold:
|
|
||||||
self.violations += 1
|
|
||||||
if self.violations >= self.patience:
|
|
||||||
LOG.warning(
|
|
||||||
"Loss is too high, stopping training (loss_watchdog_threshold)"
|
|
||||||
)
|
|
||||||
control.should_training_stop = True
|
|
||||||
else:
|
|
||||||
self.violations = 0
|
|
||||||
return control
|
|
||||||
|
|
||||||
|
|
||||||
def bench_eval_callback_factory(trainer, tokenizer):
|
def bench_eval_callback_factory(trainer, tokenizer):
|
||||||
accuracy = evaluate.load("accuracy")
|
accuracy = evaluate.load("accuracy")
|
||||||
abcd_idx = [
|
abcd_idx = [
|
||||||
@@ -563,15 +531,10 @@ class SaveAxolotlConfigtoWandBCallback(TrainerCallback):
|
|||||||
):
|
):
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
try:
|
try:
|
||||||
# sync config to top level in run, cannot delete file right away because wandb schedules it to be synced even w/policy = 'now', so let OS delete it later.
|
artifact = wandb.Artifact(name="axolotl-config", type="config")
|
||||||
with NamedTemporaryFile(
|
artifact.add_file(local_path=self.axolotl_config_path)
|
||||||
mode="w", delete=False, suffix=".yml", prefix="axolotl_config_"
|
wandb.run.log_artifact(artifact)
|
||||||
) as temp_file:
|
LOG.info("Axolotl config has been saved to WandB as an artifact.")
|
||||||
copyfile(self.axolotl_config_path, temp_file.name)
|
|
||||||
wandb.save(temp_file.name)
|
|
||||||
LOG.info(
|
|
||||||
"The Axolotl config has been saved to the WandB run under files."
|
|
||||||
)
|
|
||||||
except (FileNotFoundError, ConnectionError) as err:
|
except (FileNotFoundError, ConnectionError) as err:
|
||||||
LOG.warning(f"Error while saving Axolotl config to WandB: {err}")
|
LOG.warning(f"Error while saving Axolotl config to WandB: {err}")
|
||||||
return control
|
return control
|
||||||
|
|||||||
@@ -1,29 +0,0 @@
|
|||||||
"""
|
|
||||||
This module provides functionality for selecting chat templates based on user choices.
|
|
||||||
These templates are used for formatting messages in a conversation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def chat_templates(user_choice: str):
|
|
||||||
"""
|
|
||||||
Finds the correct chat_template for the tokenizer_config.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_choice (str): The user's choice of template.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The chosen template string.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the user_choice is not found in the templates.
|
|
||||||
"""
|
|
||||||
|
|
||||||
templates = {
|
|
||||||
"inst": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", # I don't know what this one is called. Used by Mistral/Mixtral.
|
|
||||||
"chatml": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
|
||||||
}
|
|
||||||
|
|
||||||
if user_choice in templates:
|
|
||||||
return templates[user_choice]
|
|
||||||
|
|
||||||
raise ValueError(f"Template '{user_choice}' not found.")
|
|
||||||
@@ -2,16 +2,12 @@
|
|||||||
DataCollator for axolotl to pad labels and position_ids for packed sequences
|
DataCollator for axolotl to pad labels and position_ids for packed sequences
|
||||||
"""
|
"""
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Dict, Optional, Sequence, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
|
||||||
import transformers
|
|
||||||
from transformers import PreTrainedTokenizerBase
|
from transformers import PreTrainedTokenizerBase
|
||||||
from transformers.utils import PaddingStrategy
|
from transformers.utils import PaddingStrategy
|
||||||
|
|
||||||
IGNORE_INDEX = -100
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DataCollatorForSeq2Seq:
|
class DataCollatorForSeq2Seq:
|
||||||
@@ -150,31 +146,3 @@ class BatchSamplerDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
|
|||||||
chunked_data[feature] = np.concatenate(arrays)
|
chunked_data[feature] = np.concatenate(arrays)
|
||||||
features = [chunked_data]
|
features = [chunked_data]
|
||||||
return super().__call__(features, return_tensors=return_tensors)
|
return super().__call__(features, return_tensors=return_tensors)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class MambaDataCollator:
|
|
||||||
"""
|
|
||||||
Collator for State Space Models (Mamba)
|
|
||||||
"""
|
|
||||||
|
|
||||||
tokenizer: transformers.PreTrainedTokenizer
|
|
||||||
|
|
||||||
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
|
|
||||||
input_ids, labels = tuple(
|
|
||||||
[torch.LongTensor(instance[key]) for instance in instances]
|
|
||||||
for key in ("input_ids", "labels")
|
|
||||||
)
|
|
||||||
input_ids = torch.nn.utils.rnn.pad_sequence(
|
|
||||||
input_ids,
|
|
||||||
batch_first=True,
|
|
||||||
padding_value=self.tokenizer.pad_token_id,
|
|
||||||
)
|
|
||||||
labels = torch.nn.utils.rnn.pad_sequence(
|
|
||||||
labels, batch_first=True, padding_value=IGNORE_INDEX
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"input_ids": input_ids,
|
|
||||||
"labels": labels,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ def choose_device(cfg):
|
|||||||
|
|
||||||
cfg.device = get_device()
|
cfg.device = get_device()
|
||||||
if cfg.world_size == 1:
|
if cfg.world_size == 1:
|
||||||
cfg.device_map = cfg.device_map or "auto"
|
cfg.device_map = "auto"
|
||||||
else:
|
else:
|
||||||
if cfg.device.startswith("cuda"):
|
if cfg.device.startswith("cuda"):
|
||||||
cfg.device_map = {"": torch.cuda.current_device()}
|
cfg.device_map = {"": torch.cuda.current_device()}
|
||||||
@@ -70,22 +70,15 @@ def normalize_config(cfg):
|
|||||||
else:
|
else:
|
||||||
torch.backends.cuda.matmul.allow_tf32 = cfg.tf32 or False
|
torch.backends.cuda.matmul.allow_tf32 = cfg.tf32 or False
|
||||||
|
|
||||||
if cfg.bf16 or cfg.bfloat16:
|
if cfg.fp8:
|
||||||
|
cfg.torch_dtype = torch.bfloat16
|
||||||
|
elif cfg.bf16 or cfg.bfloat16:
|
||||||
cfg.torch_dtype = torch.bfloat16
|
cfg.torch_dtype = torch.bfloat16
|
||||||
elif cfg.load_in_8bit or cfg.fp16 or cfg.float16:
|
elif cfg.load_in_8bit or cfg.fp16 or cfg.float16:
|
||||||
cfg.torch_dtype = torch.float16
|
cfg.torch_dtype = torch.float16
|
||||||
else:
|
else:
|
||||||
cfg.torch_dtype = torch.float32
|
cfg.torch_dtype = torch.float32
|
||||||
|
|
||||||
if cfg.saves_per_epoch:
|
|
||||||
save_steps = 1.0 / (cfg.saves_per_epoch * cfg.num_epochs)
|
|
||||||
if save_steps < 1.0: # prevent saves on every step
|
|
||||||
cfg.save_steps = save_steps
|
|
||||||
if cfg.evals_per_epoch:
|
|
||||||
eval_steps = 1.0 / (cfg.evals_per_epoch * cfg.num_epochs)
|
|
||||||
if eval_steps < 1.0: # prevent evals on every step
|
|
||||||
cfg.eval_steps = eval_steps
|
|
||||||
|
|
||||||
cfg.dataset_processes = cfg.dataset_processes or os.cpu_count()
|
cfg.dataset_processes = cfg.dataset_processes or os.cpu_count()
|
||||||
|
|
||||||
if not cfg.base_model_config:
|
if not cfg.base_model_config:
|
||||||
@@ -131,19 +124,6 @@ def normalize_config(cfg):
|
|||||||
or (cfg.model_type and "mistral" in cfg.model_type.lower())
|
or (cfg.model_type and "mistral" in cfg.model_type.lower())
|
||||||
)
|
)
|
||||||
|
|
||||||
cfg.is_qwen_derived_model = (
|
|
||||||
(
|
|
||||||
hasattr(model_config, "model_type")
|
|
||||||
and model_config.model_type
|
|
||||||
in [
|
|
||||||
"qwen",
|
|
||||||
]
|
|
||||||
)
|
|
||||||
or cfg.is_qwen_derived_model
|
|
||||||
or "qwen" in cfg.base_model.lower()
|
|
||||||
or (cfg.model_type and "qwen" in cfg.model_type.lower())
|
|
||||||
)
|
|
||||||
|
|
||||||
if isinstance(cfg.learning_rate, str):
|
if isinstance(cfg.learning_rate, str):
|
||||||
cfg.learning_rate = float(cfg.learning_rate)
|
cfg.learning_rate = float(cfg.learning_rate)
|
||||||
|
|
||||||
@@ -187,11 +167,7 @@ def validate_config(cfg):
|
|||||||
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
||||||
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
||||||
)
|
)
|
||||||
if (
|
if cfg.eval_batch_size != cfg.micro_batch_size:
|
||||||
cfg.eval_batch_size
|
|
||||||
and cfg.micro_batch_size
|
|
||||||
and cfg.eval_batch_size != cfg.micro_batch_size
|
|
||||||
):
|
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
"eval_batch_size != micro_batch_size. This can lead to VRAM instability."
|
"eval_batch_size != micro_batch_size. This can lead to VRAM instability."
|
||||||
)
|
)
|
||||||
@@ -361,27 +337,6 @@ def validate_config(cfg):
|
|||||||
cfg.datasets[idx].type = cfg.datasets[idx].type.replace(
|
cfg.datasets[idx].type = cfg.datasets[idx].type.replace(
|
||||||
"sharegpt_simple", "sharegpt"
|
"sharegpt_simple", "sharegpt"
|
||||||
)
|
)
|
||||||
|
|
||||||
if cfg.saves_per_epoch and cfg.save_steps:
|
|
||||||
raise ValueError(
|
|
||||||
"save_steps and saves_per_epoch are mutually exclusive and cannot be used together."
|
|
||||||
)
|
|
||||||
if cfg.saves_per_epoch and cfg.save_strategy and cfg.save_strategy != "steps":
|
|
||||||
raise ValueError(
|
|
||||||
"save_strategy must be empty or set to `steps` when used with saves_per_epoch."
|
|
||||||
)
|
|
||||||
if cfg.evals_per_epoch and cfg.eval_steps:
|
|
||||||
raise ValueError(
|
|
||||||
"eval_steps and evals_per_epoch are mutually exclusive and cannot be used together."
|
|
||||||
)
|
|
||||||
if (
|
|
||||||
cfg.evals_per_epoch
|
|
||||||
and cfg.evaluation_strategy
|
|
||||||
and cfg.evaluation_strategy != "steps"
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
"evaluation_strategy must be empty or set to `steps` when used with evals_per_epoch."
|
|
||||||
)
|
|
||||||
if cfg.save_strategy and cfg.save_steps and cfg.save_strategy != "steps":
|
if cfg.save_strategy and cfg.save_steps and cfg.save_strategy != "steps":
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"save_strategy and save_steps mismatch. Please set save_strategy to 'steps' or remove save_steps."
|
"save_strategy and save_steps mismatch. Please set save_strategy to 'steps' or remove save_steps."
|
||||||
@@ -416,52 +371,6 @@ def validate_config(cfg):
|
|||||||
"If you want to full finetune, please turn off load_in_8bit and load_in_4bit."
|
"If you want to full finetune, please turn off load_in_8bit and load_in_4bit."
|
||||||
)
|
)
|
||||||
|
|
||||||
if cfg.rope_scaling:
|
|
||||||
LOG.warning("`rope_scaling` should now be be a key under `model_config`")
|
|
||||||
|
|
||||||
if cfg.warmup_steps and cfg.warmup_ratio:
|
|
||||||
raise ValueError("warmup_steps and warmup_ratio are mutually exclusive")
|
|
||||||
|
|
||||||
if cfg.is_qwen_derived_model and cfg.gradient_checkpointing:
|
|
||||||
LOG.warning(
|
|
||||||
"Gradient checkpointing is broken for Qwen models for transformers>=4.35.0, except main branch."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.wandb_run_id and not cfg.wandb_name:
|
|
||||||
cfg.wandb_name = cfg.wandb_run_id
|
|
||||||
|
|
||||||
LOG.warning(
|
|
||||||
"wandb_run_id sets the ID of the run. If you would like to set the name, please use wandb_name instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.noisy_embedding_alpha is not None:
|
|
||||||
# Deprecated, use neftune_noise_alpha
|
|
||||||
LOG.warning("noisy_embedding_alpha is deprecated, use neftune_noise_alpha")
|
|
||||||
if cfg.neftune_noise_alpha is None:
|
|
||||||
cfg.neftune_noise_alpha = cfg.noisy_embedding_alpha
|
|
||||||
else:
|
|
||||||
# User is providing both; bail and have them sort out their settings
|
|
||||||
raise ValueError(
|
|
||||||
"noisy_embedding_alpha is deprecated, use neftune_noise_alpha; both are set, please remove the deprecated noisy_embedding_alpha setting"
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.neftune_noise_alpha is not None and cfg.neftune_noise_alpha <= 0.0:
|
|
||||||
raise ValueError("neftune_noise_alpha must be > 0.0")
|
|
||||||
|
|
||||||
if (
|
|
||||||
cfg.adapter
|
|
||||||
and cfg.tokens
|
|
||||||
and (
|
|
||||||
not cfg.lora_modules_to_save
|
|
||||||
or not all(
|
|
||||||
x in cfg.lora_modules_to_save for x in ["embed_tokens", "lm_head"]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
"lora_modules_to_save not properly set yet adding new tokens. Please add `embed_tokens` and `lm_head` to `lora_modules_to_save`."
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# MPT 7b
|
# MPT 7b
|
||||||
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import functools
|
|||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Tuple, Union
|
from typing import Any, Dict, List, Tuple, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from datasets import (
|
from datasets import (
|
||||||
@@ -34,7 +34,6 @@ from axolotl.prompters import (
|
|||||||
JeopardyPrompter,
|
JeopardyPrompter,
|
||||||
MultipleChoiceConcisePrompter,
|
MultipleChoiceConcisePrompter,
|
||||||
MultipleChoiceExplainPrompter,
|
MultipleChoiceExplainPrompter,
|
||||||
Prompter,
|
|
||||||
ReflectAlpacaPrompter,
|
ReflectAlpacaPrompter,
|
||||||
SummarizeTLDRPrompter,
|
SummarizeTLDRPrompter,
|
||||||
UnsupportedPrompter,
|
UnsupportedPrompter,
|
||||||
@@ -79,14 +78,6 @@ def prepare_dataset(cfg, tokenizer):
|
|||||||
train_dataset, eval_dataset = process_datasets_for_packing(
|
train_dataset, eval_dataset = process_datasets_for_packing(
|
||||||
cfg, train_dataset, eval_dataset, tokenizer
|
cfg, train_dataset, eval_dataset, tokenizer
|
||||||
)
|
)
|
||||||
|
|
||||||
if eval_dataset and cfg.sample_packing and cfg.eval_sample_packing is not False:
|
|
||||||
total_eval_steps = calculate_total_num_steps(cfg, eval_dataset, update=False)
|
|
||||||
if total_eval_steps == 0:
|
|
||||||
raise ValueError(
|
|
||||||
"eval dataset split is too small for sample_packing. You should set `eval_sample_packing: False`. "
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.max_steps:
|
if cfg.max_steps:
|
||||||
total_num_steps = min(
|
total_num_steps = min(
|
||||||
calculate_total_num_steps(cfg, train_dataset), cfg.max_steps
|
calculate_total_num_steps(cfg, train_dataset), cfg.max_steps
|
||||||
@@ -99,7 +90,7 @@ def prepare_dataset(cfg, tokenizer):
|
|||||||
|
|
||||||
def load_tokenized_prepared_datasets(
|
def load_tokenized_prepared_datasets(
|
||||||
tokenizer, cfg, default_dataset_prepared_path
|
tokenizer, cfg, default_dataset_prepared_path
|
||||||
) -> Tuple[DatasetDict, List[Prompter]]:
|
) -> DatasetDict:
|
||||||
tokenizer_name = tokenizer.__class__.__name__
|
tokenizer_name = tokenizer.__class__.__name__
|
||||||
ds_hash = str(
|
ds_hash = str(
|
||||||
md5(
|
md5(
|
||||||
@@ -107,12 +98,7 @@ def load_tokenized_prepared_datasets(
|
|||||||
str(cfg.sequence_len)
|
str(cfg.sequence_len)
|
||||||
+ "@"
|
+ "@"
|
||||||
+ "|".join(
|
+ "|".join(
|
||||||
sorted(
|
sorted([f"{d.path}:{d.type}:{d.shards}" for d in cfg.datasets])
|
||||||
[
|
|
||||||
f"{d.path}:{d.type}:{d.shards}:{d.conversation}"
|
|
||||||
for d in cfg.datasets
|
|
||||||
]
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
+ "|"
|
+ "|"
|
||||||
+ tokenizer_name
|
+ tokenizer_name
|
||||||
@@ -178,66 +164,6 @@ def load_tokenized_prepared_datasets(
|
|||||||
except (FileNotFoundError, ConnectionError):
|
except (FileNotFoundError, ConnectionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
ds_from_cloud = False
|
|
||||||
storage_options = {}
|
|
||||||
remote_file_system = None
|
|
||||||
if config_dataset.path.startswith("s3://"):
|
|
||||||
try:
|
|
||||||
import aiobotocore.session # type: ignore
|
|
||||||
import s3fs # type: ignore
|
|
||||||
except ImportError as exc:
|
|
||||||
raise ImportError(
|
|
||||||
"s3:// paths require aiobotocore and s3fs to be installed"
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
# Takes credentials from ~/.aws/credentials for default profile
|
|
||||||
s3_session = aiobotocore.session.AioSession(profile="default")
|
|
||||||
storage_options = {"session": s3_session}
|
|
||||||
remote_file_system = s3fs.S3FileSystem(**storage_options)
|
|
||||||
elif config_dataset.path.startswith(
|
|
||||||
"gs://"
|
|
||||||
) or config_dataset.path.startswith("gcs://"):
|
|
||||||
try:
|
|
||||||
import gcsfs # type: ignore
|
|
||||||
except ImportError as exc:
|
|
||||||
raise ImportError(
|
|
||||||
"gs:// or gcs:// paths require gcsfs to be installed"
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
# gcsfs will use default credentials from the environment else anon
|
|
||||||
# https://gcsfs.readthedocs.io/en/latest/#credentials
|
|
||||||
storage_options = {"token": None}
|
|
||||||
remote_file_system = gcsfs.GCSFileSystem(**storage_options)
|
|
||||||
# TODO: Figure out how to get auth creds passed
|
|
||||||
# elif config_dataset.path.startswith("adl://") or config_dataset.path.startswith("abfs://"):
|
|
||||||
# try:
|
|
||||||
# import adlfs
|
|
||||||
# except ImportError as exc:
|
|
||||||
# raise ImportError(
|
|
||||||
# "adl:// or abfs:// paths require adlfs to be installed"
|
|
||||||
# ) from exc
|
|
||||||
|
|
||||||
# # Gen 1
|
|
||||||
# storage_options = {
|
|
||||||
# "tenant_id": TENANT_ID,
|
|
||||||
# "client_id": CLIENT_ID,
|
|
||||||
# "client_secret": CLIENT_SECRET,
|
|
||||||
# }
|
|
||||||
# # Gen 2
|
|
||||||
# storage_options = {
|
|
||||||
# "account_name": ACCOUNT_NAME,
|
|
||||||
# "account_key": ACCOUNT_KEY,
|
|
||||||
# }
|
|
||||||
|
|
||||||
# remote_file_system = adlfs.AzureBlobFileSystem(**storage_options)
|
|
||||||
try:
|
|
||||||
if remote_file_system and remote_file_system.exists(
|
|
||||||
config_dataset.path
|
|
||||||
):
|
|
||||||
ds_from_cloud = True
|
|
||||||
except (FileNotFoundError, ConnectionError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# prefer local dataset, even if hub exists
|
# prefer local dataset, even if hub exists
|
||||||
local_path = Path(config_dataset.path)
|
local_path = Path(config_dataset.path)
|
||||||
if local_path.exists():
|
if local_path.exists():
|
||||||
@@ -251,8 +177,17 @@ def load_tokenized_prepared_datasets(
|
|||||||
split=None,
|
split=None,
|
||||||
)
|
)
|
||||||
elif local_path.is_file():
|
elif local_path.is_file():
|
||||||
ds_type = get_ds_type(config_dataset)
|
ds_type = "json"
|
||||||
|
if config_dataset.ds_type:
|
||||||
|
ds_type = config_dataset.ds_type
|
||||||
|
elif ".parquet" in config_dataset.path:
|
||||||
|
ds_type = "parquet"
|
||||||
|
elif ".arrow" in config_dataset.path:
|
||||||
|
ds_type = "arrow"
|
||||||
|
elif ".csv" in config_dataset.path:
|
||||||
|
ds_type = "csv"
|
||||||
|
elif ".txt" in config_dataset.path:
|
||||||
|
ds_type = "text"
|
||||||
ds = load_dataset(
|
ds = load_dataset(
|
||||||
ds_type,
|
ds_type,
|
||||||
name=config_dataset.name,
|
name=config_dataset.name,
|
||||||
@@ -272,22 +207,6 @@ def load_tokenized_prepared_datasets(
|
|||||||
data_files=config_dataset.data_files,
|
data_files=config_dataset.data_files,
|
||||||
token=use_auth_token,
|
token=use_auth_token,
|
||||||
)
|
)
|
||||||
elif ds_from_cloud and remote_file_system:
|
|
||||||
if remote_file_system.isdir(config_dataset.path):
|
|
||||||
ds = load_from_disk(
|
|
||||||
config_dataset.path,
|
|
||||||
storage_options=storage_options,
|
|
||||||
)
|
|
||||||
elif remote_file_system.isfile(config_dataset.path):
|
|
||||||
ds_type = get_ds_type(config_dataset)
|
|
||||||
ds = load_dataset(
|
|
||||||
ds_type,
|
|
||||||
name=config_dataset.name,
|
|
||||||
data_files=config_dataset.path,
|
|
||||||
streaming=False,
|
|
||||||
split=None,
|
|
||||||
storage_options=storage_options,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if isinstance(config_dataset.data_files, str):
|
if isinstance(config_dataset.data_files, str):
|
||||||
fp = hf_hub_download(
|
fp = hf_hub_download(
|
||||||
@@ -379,29 +298,11 @@ def load_tokenized_prepared_datasets(
|
|||||||
return dataset, prompters
|
return dataset, prompters
|
||||||
|
|
||||||
|
|
||||||
def get_ds_type(config_dataset: DictDefault):
|
|
||||||
"""
|
|
||||||
Get the dataset type from the path if it's not specified
|
|
||||||
"""
|
|
||||||
ds_type = "json"
|
|
||||||
if config_dataset.ds_type:
|
|
||||||
ds_type = config_dataset.ds_type
|
|
||||||
elif ".parquet" in config_dataset.path:
|
|
||||||
ds_type = "parquet"
|
|
||||||
elif ".arrow" in config_dataset.path:
|
|
||||||
ds_type = "arrow"
|
|
||||||
elif ".csv" in config_dataset.path:
|
|
||||||
ds_type = "csv"
|
|
||||||
elif ".txt" in config_dataset.path:
|
|
||||||
ds_type = "text"
|
|
||||||
return ds_type
|
|
||||||
|
|
||||||
|
|
||||||
def load_prepare_datasets(
|
def load_prepare_datasets(
|
||||||
tokenizer: PreTrainedTokenizerBase,
|
tokenizer: PreTrainedTokenizerBase,
|
||||||
cfg,
|
cfg,
|
||||||
default_dataset_prepared_path,
|
default_dataset_prepared_path,
|
||||||
) -> Tuple[Dataset, Dataset, List[Prompter]]:
|
) -> Tuple[Dataset, Dataset, List[Any]]:
|
||||||
max_packed_sequence_len = (
|
max_packed_sequence_len = (
|
||||||
cfg.max_packed_sequence_len if cfg.max_packed_sequence_len else cfg.sequence_len
|
cfg.max_packed_sequence_len if cfg.max_packed_sequence_len else cfg.sequence_len
|
||||||
)
|
)
|
||||||
@@ -410,7 +311,7 @@ def load_prepare_datasets(
|
|||||||
) # make sure we don't accidentally set it larger than sequence_len
|
) # make sure we don't accidentally set it larger than sequence_len
|
||||||
|
|
||||||
tokenizer_name = tokenizer.__class__.__name__
|
tokenizer_name = tokenizer.__class__.__name__
|
||||||
prompters: List[Prompter] = []
|
prompters = []
|
||||||
if cfg.max_packed_sequence_len is not None:
|
if cfg.max_packed_sequence_len is not None:
|
||||||
# see if we can go ahead and load the stacked dataset
|
# see if we can go ahead and load the stacked dataset
|
||||||
seed = f"@{str(cfg.seed)}" if cfg.seed else ""
|
seed = f"@{str(cfg.seed)}" if cfg.seed else ""
|
||||||
@@ -544,13 +445,14 @@ def load_prepare_datasets(
|
|||||||
train_fingerprint = md5(to_hash_train)
|
train_fingerprint = md5(to_hash_train)
|
||||||
test_fingerprint = md5(to_hash_test)
|
test_fingerprint = md5(to_hash_test)
|
||||||
|
|
||||||
dataset = dataset.train_test_split(
|
with zero_first(is_main_process()):
|
||||||
test_size=cfg.val_set_size,
|
dataset = dataset.train_test_split(
|
||||||
shuffle=False,
|
test_size=cfg.val_set_size,
|
||||||
seed=cfg.seed or 42,
|
shuffle=False,
|
||||||
train_new_fingerprint=train_fingerprint,
|
seed=cfg.seed or 42,
|
||||||
test_new_fingerprint=test_fingerprint,
|
train_new_fingerprint=train_fingerprint,
|
||||||
)
|
test_new_fingerprint=test_fingerprint,
|
||||||
|
)
|
||||||
|
|
||||||
train_dataset = dataset["train"]
|
train_dataset = dataset["train"]
|
||||||
eval_dataset = dataset["test"]
|
eval_dataset = dataset["test"]
|
||||||
|
|||||||
342
src/axolotl/utils/dataloader.py
Normal file
342
src/axolotl/utils/dataloader.py
Normal file
@@ -0,0 +1,342 @@
|
|||||||
|
# pylint: skip-file
|
||||||
|
import hashlib
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
from queue import Queue
|
||||||
|
from threading import Thread
|
||||||
|
from typing import Any, Callable, List, Union
|
||||||
|
|
||||||
|
import numba
|
||||||
|
import numpy as np
|
||||||
|
from torch.utils.data import DistributedSampler, Sampler
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl.utils.dataloader")
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def ffd_check(a: np.ndarray, c: int, n: int):
|
||||||
|
# First-fit-decreasing bin packing
|
||||||
|
# Check if a[] could fit in n bins with capacity c
|
||||||
|
# https://en.wikipedia.org/wiki/First-fit-decreasing_bin_packing
|
||||||
|
|
||||||
|
a = np.sort(a)[::-1]
|
||||||
|
bins = np.full((n,), c, dtype=a.dtype)
|
||||||
|
for size in a:
|
||||||
|
not_found = True
|
||||||
|
for idx in range(n):
|
||||||
|
if bins[idx] >= size:
|
||||||
|
bins[idx] -= size
|
||||||
|
not_found = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if not_found:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def ffd_with_result(a: np.ndarray, c: int, start_index: int):
|
||||||
|
# First-fit-decreasing bin packing (with result return)
|
||||||
|
|
||||||
|
indices = np.argsort(a)[::-1]
|
||||||
|
a = a[indices]
|
||||||
|
|
||||||
|
bins: List[Any] = []
|
||||||
|
bins_result: List[Any] = []
|
||||||
|
for a_id, size in enumerate(a):
|
||||||
|
add_new = True
|
||||||
|
for idx in range(len(bins)):
|
||||||
|
if bins[idx] >= size:
|
||||||
|
bins[idx] -= size
|
||||||
|
bins_result[idx].append(indices[a_id] + start_index)
|
||||||
|
add_new = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if add_new:
|
||||||
|
bins.append(c - size)
|
||||||
|
bins_result.append([indices[a_id] + start_index])
|
||||||
|
|
||||||
|
return bins_result, len(a)
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def allocate(
|
||||||
|
lengths: np.ndarray, lengths_cumsum: np.ndarray, rank: int, c: int, n: int
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param lengths: array of lengths of each sample
|
||||||
|
:param lengths_cumsum: cumulative sum of consecutive lengths
|
||||||
|
:param rank: rank for this process
|
||||||
|
:param c: length of tokens per batch
|
||||||
|
:param n: number of ranks
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
# Dynamic batch allocator, similar to Multifit
|
||||||
|
# https://en.wikipedia.org/wiki/Multifit_algorithm
|
||||||
|
# ~99.5% efficiency on OpenChat training set (12 * 2048 ctx len)
|
||||||
|
|
||||||
|
s = 0
|
||||||
|
start_index = 0
|
||||||
|
result = []
|
||||||
|
result_totseqs = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# binary search [left, right)
|
||||||
|
left = 1
|
||||||
|
right = 1 + np.searchsorted(lengths_cumsum[start_index:], s + c * n, "right")
|
||||||
|
|
||||||
|
while right - left > 1:
|
||||||
|
mid = (left + right) // 2
|
||||||
|
if ffd_check(lengths[start_index : start_index + mid], c, n):
|
||||||
|
left = mid
|
||||||
|
else:
|
||||||
|
right = mid
|
||||||
|
|
||||||
|
# use length left
|
||||||
|
batch, tot_seqs = ffd_with_result(
|
||||||
|
lengths[start_index : start_index + left], c, start_index
|
||||||
|
)
|
||||||
|
if len(batch) < n:
|
||||||
|
break
|
||||||
|
|
||||||
|
start_index += left
|
||||||
|
s = lengths_cumsum[start_index - 1]
|
||||||
|
|
||||||
|
# add local rank
|
||||||
|
result.append(batch[rank])
|
||||||
|
# add total seqs for all ranks
|
||||||
|
result_totseqs.append(tot_seqs)
|
||||||
|
# yield batch[rank], tot_seqs, s, len(result) * c * n
|
||||||
|
return result, result_totseqs, s, len(result) * c * n
|
||||||
|
|
||||||
|
|
||||||
|
def chunk(iterable, n):
|
||||||
|
"""
|
||||||
|
Chunk data into tuples of length n
|
||||||
|
"""
|
||||||
|
# batched('ABCDEFG', 3) --> ABC DEF G
|
||||||
|
if n < 1:
|
||||||
|
raise ValueError("n must be at least one")
|
||||||
|
it = iter(iterable)
|
||||||
|
while batch := tuple(itertools.islice(it, n)):
|
||||||
|
yield batch
|
||||||
|
|
||||||
|
|
||||||
|
def hash_indices(lst: List[int]) -> str:
|
||||||
|
# Convert the list of integers to a string representation
|
||||||
|
concatenated = ",".join(map(str, lst))
|
||||||
|
|
||||||
|
# Generate the hash
|
||||||
|
sha256 = hashlib.sha256()
|
||||||
|
sha256.update(concatenated.encode())
|
||||||
|
|
||||||
|
return sha256.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
class MultipackDistributedDataloader:
|
||||||
|
"""Unpadded data loading using Multipack.
|
||||||
|
Adapted from https://github.com/imoneoi/openchat/blob/v3_fix_mle_loss/ochat/training_deepspeed/multipack_dataloader.py
|
||||||
|
Approximate (at most ~1.22x) the optimal solution of the identical-machines scheduling problem, which is NP-hard.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dataset: Any,
|
||||||
|
collate_fn: Callable,
|
||||||
|
seq_max_length: int = 2048,
|
||||||
|
batch_size: int = 1,
|
||||||
|
sampler: Union[Sampler, DistributedSampler] = None,
|
||||||
|
packing_efficiency_estimate: float = 1.0,
|
||||||
|
sample_packing_seq_len_multiplier: int = 1,
|
||||||
|
device_count: int = 1,
|
||||||
|
prefetch_max: int = 1000,
|
||||||
|
num_epochs: int = 1,
|
||||||
|
):
|
||||||
|
# Dataset
|
||||||
|
self.dataset = dataset
|
||||||
|
self.lengths = (
|
||||||
|
dataset.data.column("position_ids")
|
||||||
|
.to_pandas()
|
||||||
|
.apply(lambda x: x[-1] + 1)
|
||||||
|
.values
|
||||||
|
)
|
||||||
|
assert isinstance(self.lengths, np.ndarray)
|
||||||
|
assert batch_size % sample_packing_seq_len_multiplier == 0
|
||||||
|
assert batch_size >= sample_packing_seq_len_multiplier
|
||||||
|
self.sampler = sampler
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.sample_packing_seq_len_multiplier = sample_packing_seq_len_multiplier
|
||||||
|
self.seq_max_length = seq_max_length
|
||||||
|
self.batch_max_length = batch_size * seq_max_length
|
||||||
|
self.collate_fn = collate_fn
|
||||||
|
self.num_epochs = num_epochs
|
||||||
|
|
||||||
|
self.num_replicas = 1
|
||||||
|
self.rank = 0
|
||||||
|
|
||||||
|
# statistics
|
||||||
|
self.eff_total_used = 0
|
||||||
|
self.eff_total_slots = 0
|
||||||
|
self.packing_efficiency_estimate = packing_efficiency_estimate or 1.0
|
||||||
|
self.device_count = device_count
|
||||||
|
|
||||||
|
# maxsize is maximum number of samples in queue
|
||||||
|
self.prefetch_max = prefetch_max
|
||||||
|
self.queue: Queue = Queue(maxsize=prefetch_max)
|
||||||
|
self.thread = None
|
||||||
|
|
||||||
|
def _worker(self):
|
||||||
|
LOG.info(
|
||||||
|
f"[WORKER] Epochs: {self.num_epochs}, Samples: {self.len_w_stats()*self.batch_size}"
|
||||||
|
)
|
||||||
|
for epoch in range(self.num_epochs):
|
||||||
|
for sample in self._internal_batch_generator():
|
||||||
|
while True:
|
||||||
|
if self.queue.full():
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
self.queue.put(sample)
|
||||||
|
|
||||||
|
# stop the queue when epoch is done
|
||||||
|
self.queue.put(None)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
if hasattr(self.sampler, "set_epoch"):
|
||||||
|
new_epoch = self.sampler.epoch + 1
|
||||||
|
self.sampler.set_epoch(new_epoch)
|
||||||
|
LOG.info(f"calling sampler.set_epoch({new_epoch})")
|
||||||
|
|
||||||
|
if self.thread is None:
|
||||||
|
self.thread = Thread(target=self._worker, daemon=True)
|
||||||
|
self.thread.start()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
item = self.queue.get()
|
||||||
|
|
||||||
|
if item is None:
|
||||||
|
break
|
||||||
|
yield item
|
||||||
|
|
||||||
|
def generate_batches(self, set_stats=False):
|
||||||
|
LOG.info("generating packed batches")
|
||||||
|
if self.sampler:
|
||||||
|
indices = [idx for idx in self.sampler]
|
||||||
|
else:
|
||||||
|
indices = range(0, len(self.dataset))
|
||||||
|
|
||||||
|
LOG.info(hash_indices(indices))
|
||||||
|
lengths = self.lengths[indices]
|
||||||
|
lengths_cumsum = np.cumsum(lengths)
|
||||||
|
|
||||||
|
batches, totseqs, total_used, total_slots = allocate(
|
||||||
|
lengths=lengths,
|
||||||
|
lengths_cumsum=lengths_cumsum,
|
||||||
|
rank=self.rank,
|
||||||
|
# c=self.batch_max_length,
|
||||||
|
c=self.seq_max_length * self.sample_packing_seq_len_multiplier,
|
||||||
|
n=self.num_replicas,
|
||||||
|
)
|
||||||
|
|
||||||
|
batches = [[indices[b_idx] for b_idx in batch] for batch in batches]
|
||||||
|
|
||||||
|
# statistics
|
||||||
|
if set_stats:
|
||||||
|
self.eff_total_used += total_used
|
||||||
|
self.eff_total_slots += total_slots
|
||||||
|
|
||||||
|
return batches, totseqs
|
||||||
|
|
||||||
|
def _internal_batch_generator(self):
|
||||||
|
all_batches, _ = self.generate_batches(set_stats=True)
|
||||||
|
features = self.dataset.features.keys()
|
||||||
|
len_remaining = self._len_est()
|
||||||
|
for batches in chunk(
|
||||||
|
all_batches, self.batch_size // self.sample_packing_seq_len_multiplier
|
||||||
|
):
|
||||||
|
chunked_data = []
|
||||||
|
attn_mask_cum_idx = 0
|
||||||
|
for batch in batches:
|
||||||
|
concatenated = {}
|
||||||
|
batched_data = [self.dataset[batch_idx] for batch_idx in batch]
|
||||||
|
for feature in features:
|
||||||
|
if feature == "length":
|
||||||
|
continue
|
||||||
|
if feature == "attention_mask":
|
||||||
|
arrays = [
|
||||||
|
(attn_mask_cum_idx + idx + 1) * np.array(item[feature])
|
||||||
|
for idx, item in enumerate(batched_data)
|
||||||
|
if feature in item
|
||||||
|
]
|
||||||
|
attn_mask_cum_idx += len(batched_data)
|
||||||
|
concatenated[feature] = np.concatenate(arrays)
|
||||||
|
else:
|
||||||
|
arrays = [
|
||||||
|
np.array(item[feature])
|
||||||
|
for item in batched_data
|
||||||
|
if feature in item
|
||||||
|
]
|
||||||
|
concatenated[feature] = np.concatenate(arrays)
|
||||||
|
chunked_data.append(concatenated)
|
||||||
|
yield self.collate_fn(chunked_data)
|
||||||
|
len_remaining -= 1
|
||||||
|
if not len_remaining:
|
||||||
|
return
|
||||||
|
# yield a no-op for cases where we don't have any data left to pack
|
||||||
|
for i in range(0, len_remaining):
|
||||||
|
yield self.collate_fn(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"input_ids": [0],
|
||||||
|
"labels": [-100],
|
||||||
|
"attention_mask": [True],
|
||||||
|
"position_ids": [0],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _len_est(self):
|
||||||
|
lengths_sum = np.sum(self.lengths)
|
||||||
|
lengths_sum_per_device = lengths_sum // self.device_count
|
||||||
|
LOG.info(
|
||||||
|
f"packing_efficiency_estimate: {self.packing_efficiency_estimate} "
|
||||||
|
f"total_num_tokens per device: {lengths_sum_per_device}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# shave off 1% + 1 for dealing with variance in packing from random sampler to sampler
|
||||||
|
return (
|
||||||
|
math.floor(
|
||||||
|
0.99
|
||||||
|
* lengths_sum_per_device
|
||||||
|
/ self.packing_efficiency_estimate
|
||||||
|
// self.seq_max_length
|
||||||
|
// self.batch_size
|
||||||
|
)
|
||||||
|
- 1
|
||||||
|
)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
# this doesn't return the actual length b/c with distributed samplers, not all dataloaders get
|
||||||
|
# the same share of total tokens
|
||||||
|
# if not self.eff_total_used:
|
||||||
|
# batches, _ = self.generate_batches(set_stats=True)
|
||||||
|
# LOG.info(
|
||||||
|
# f"packing_efficiency_estimate: {self.packing_efficiency_estimate} "
|
||||||
|
# f"actual packing efficiency: {self.efficiency()}"
|
||||||
|
# )
|
||||||
|
return max(1, self._len_est())
|
||||||
|
|
||||||
|
def len_w_stats(self):
|
||||||
|
if not self.eff_total_used:
|
||||||
|
batches, _ = self.generate_batches(set_stats=True)
|
||||||
|
LOG.info(
|
||||||
|
f"packing_efficiency_estimate: {self.packing_efficiency_estimate} "
|
||||||
|
f"actual packing efficiency: {self.efficiency()}"
|
||||||
|
)
|
||||||
|
return max(1, self._len_est())
|
||||||
|
|
||||||
|
def efficiency(self):
|
||||||
|
return self.eff_total_used / self.eff_total_slots
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
"""
|
|
||||||
module to freeze/unfreeze parameters by name
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
|
|
||||||
from axolotl.utils.distributed import is_main_process
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.utils.freeze")
|
|
||||||
|
|
||||||
|
|
||||||
def freeze_parameters_except(model, regex_patterns):
|
|
||||||
"""
|
|
||||||
Freezes all layers of the given model except for the layers that match given regex patterns.
|
|
||||||
Periods in the patterns are treated as literal periods, not as wildcard characters.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- model (nn.Module): The PyTorch model to be modified.
|
|
||||||
- regex_patterns (list of str): List of regex patterns to match layer names to keep unfrozen.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None; the model is modified in place.
|
|
||||||
"""
|
|
||||||
# Escape periods and compile the regex patterns
|
|
||||||
compiled_patterns = [
|
|
||||||
re.compile(pattern.replace(".", "\\.")) for pattern in regex_patterns
|
|
||||||
]
|
|
||||||
|
|
||||||
# First, freeze all parameters in the model
|
|
||||||
for param in model.parameters():
|
|
||||||
param.requires_grad = False
|
|
||||||
|
|
||||||
# Unfreeze layers that match the regex patterns
|
|
||||||
for name, param in model.named_parameters():
|
|
||||||
if any(pattern.match(name) for pattern in compiled_patterns):
|
|
||||||
if is_main_process():
|
|
||||||
LOG.debug(f"unfreezing {name}")
|
|
||||||
param.requires_grad = True
|
|
||||||
@@ -4,7 +4,6 @@ import math
|
|||||||
import os
|
import os
|
||||||
from typing import Optional, Tuple # noqa: F401
|
from typing import Optional, Tuple # noqa: F401
|
||||||
|
|
||||||
import addict
|
|
||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
@@ -18,65 +17,24 @@ from transformers import ( # noqa: F401
|
|||||||
AutoTokenizer,
|
AutoTokenizer,
|
||||||
BitsAndBytesConfig,
|
BitsAndBytesConfig,
|
||||||
GPTQConfig,
|
GPTQConfig,
|
||||||
|
LlamaConfig,
|
||||||
PreTrainedModel,
|
PreTrainedModel,
|
||||||
PreTrainedTokenizerBase,
|
PreTrainedTokenizerBase,
|
||||||
)
|
)
|
||||||
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
|
||||||
|
|
||||||
from axolotl.models.mamba import fix_mamba_attn_for_loss
|
|
||||||
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_EOS_TOKEN
|
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_EOS_TOKEN
|
||||||
from axolotl.utils.bench import log_gpu_memory_usage
|
from axolotl.utils.bench import log_gpu_memory_usage
|
||||||
from axolotl.utils.chat_templates import chat_templates
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl")
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
def check_model_config(cfg: DictDefault, model_config: AutoConfig):
|
|
||||||
quant_config_exists = hasattr(model_config, "quantization_config")
|
|
||||||
quant_config_method_is_gptq = (
|
|
||||||
quant_config_exists
|
|
||||||
and "quant_method" in model_config.quantization_config
|
|
||||||
and model_config.quantization_config["quant_method"] == "gptq"
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.gptq and not quant_config_method_is_gptq:
|
|
||||||
raise ValueError(
|
|
||||||
"model_config.quantization_config is not set or quant_method is not set to gptq. "
|
|
||||||
"Please make sure to point to a GPTQ model."
|
|
||||||
)
|
|
||||||
|
|
||||||
if not cfg.gptq and quant_config_exists:
|
|
||||||
raise ValueError(
|
|
||||||
"model_config.quantization_config is set but `gptq` flag is not. "
|
|
||||||
"Please use the `gptq` flag to train quantized model or point to a non-quantized model."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_model_config(cfg):
|
def load_model_config(cfg):
|
||||||
model_config_name = cfg.base_model_config or cfg.base_model
|
model_config_name = cfg.base_model_config or cfg.base_model
|
||||||
trust_remote_code = cfg.trust_remote_code is True
|
trust_remote_code = cfg.trust_remote_code is True
|
||||||
|
return AutoConfig.from_pretrained(
|
||||||
try:
|
model_config_name, trust_remote_code=trust_remote_code
|
||||||
model_config = AutoConfig.from_pretrained(
|
)
|
||||||
model_config_name, trust_remote_code=trust_remote_code
|
|
||||||
)
|
|
||||||
except ValueError as err:
|
|
||||||
if "mamba" in model_config_name:
|
|
||||||
return addict.Dict(
|
|
||||||
{
|
|
||||||
"model_type": "mamba",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
raise err
|
|
||||||
|
|
||||||
if cfg.model_config:
|
|
||||||
for key, val in cfg.model_config.items():
|
|
||||||
setattr(model_config, key, val)
|
|
||||||
|
|
||||||
check_model_config(cfg, model_config)
|
|
||||||
|
|
||||||
return model_config
|
|
||||||
|
|
||||||
|
|
||||||
def load_tokenizer(cfg):
|
def load_tokenizer(cfg):
|
||||||
@@ -93,7 +51,7 @@ def load_tokenizer(cfg):
|
|||||||
if cfg.tokenizer_type:
|
if cfg.tokenizer_type:
|
||||||
tokenizer_cls = getattr(transformers, cfg.tokenizer_type)
|
tokenizer_cls = getattr(transformers, cfg.tokenizer_type)
|
||||||
|
|
||||||
tokenizer_config = cfg.tokenizer_config or cfg.base_model_config or cfg.base_model
|
tokenizer_config = cfg.tokenizer_config or cfg.base_model_config
|
||||||
tokenizer = tokenizer_cls.from_pretrained(
|
tokenizer = tokenizer_cls.from_pretrained(
|
||||||
tokenizer_config,
|
tokenizer_config,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
@@ -107,7 +65,6 @@ def load_tokenizer(cfg):
|
|||||||
"LlamaTokenizer",
|
"LlamaTokenizer",
|
||||||
"LlamaTokenizerFast",
|
"LlamaTokenizerFast",
|
||||||
"CodeLlamaTokenizer",
|
"CodeLlamaTokenizer",
|
||||||
"CodeLlamaTokenizerFast",
|
|
||||||
]
|
]
|
||||||
and hasattr(tokenizer, "pad_token")
|
and hasattr(tokenizer, "pad_token")
|
||||||
and not tokenizer.pad_token
|
and not tokenizer.pad_token
|
||||||
@@ -123,57 +80,11 @@ def load_tokenizer(cfg):
|
|||||||
if cfg.is_mistral_derived_model and cfg.flash_attention and not cfg.sample_packing:
|
if cfg.is_mistral_derived_model and cfg.flash_attention and not cfg.sample_packing:
|
||||||
tokenizer.padding_side = "left"
|
tokenizer.padding_side = "left"
|
||||||
|
|
||||||
# Qwen base only has single token, so we need to set the special tokens
|
|
||||||
if cfg.is_qwen_derived_model:
|
|
||||||
token_ids = ["bos_token_id", "eos_token_id", "pad_token_id", "unk_token_id"]
|
|
||||||
for attr_name in token_ids:
|
|
||||||
if getattr(tokenizer, attr_name) is None:
|
|
||||||
setattr(tokenizer, attr_name, tokenizer.eod_id)
|
|
||||||
|
|
||||||
token_names = ["bos_token", "eos_token", "pad_token", "unk_token"]
|
|
||||||
for attr_name in token_names:
|
|
||||||
if getattr(tokenizer, attr_name) is None:
|
|
||||||
setattr(tokenizer, attr_name, "<|endoftext|>")
|
|
||||||
|
|
||||||
if cfg.special_tokens:
|
if cfg.special_tokens:
|
||||||
for k, val in cfg.special_tokens.items():
|
for k, val in cfg.special_tokens.items():
|
||||||
# check if new special token is not already in tokenizer and
|
|
||||||
# is adapter training to make sure lora_modules_to_save is set
|
|
||||||
if (
|
|
||||||
(getattr(tokenizer, k) is None or getattr(tokenizer, k) != val)
|
|
||||||
and cfg.adapter
|
|
||||||
and (
|
|
||||||
not cfg.lora_modules_to_save
|
|
||||||
or not all(
|
|
||||||
x in cfg.lora_modules_to_save
|
|
||||||
for x in ["embed_tokens", "lm_head"]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
"Please set lora_modules_to_save to ['embed_tokens', 'lm_head'] when using an adapter and changing the special tokens."
|
|
||||||
)
|
|
||||||
|
|
||||||
tokenizer.add_special_tokens(
|
tokenizer.add_special_tokens(
|
||||||
{k: AddedToken(val, rstrip=False, lstrip=False, normalized=False)}
|
{k: AddedToken(val, rstrip=False, lstrip=False, normalized=False)}
|
||||||
)
|
)
|
||||||
|
|
||||||
# If we add bos_token and eos_token, we need to update the post processor to
|
|
||||||
# handle them correctly.
|
|
||||||
# https://github.com/huggingface/transformers/pull/24132
|
|
||||||
bos_or_eos_in_special_tokens = (
|
|
||||||
"bos_token" in cfg.special_tokens and "eos_token" in cfg.special_tokens
|
|
||||||
)
|
|
||||||
if (
|
|
||||||
tokenizer.__class__.__name__
|
|
||||||
in (
|
|
||||||
"LlamaTokenizerFast",
|
|
||||||
"CodeLlamaTokenizerFast",
|
|
||||||
)
|
|
||||||
and bos_or_eos_in_special_tokens
|
|
||||||
):
|
|
||||||
tokenizer.update_post_processor()
|
|
||||||
|
|
||||||
if cfg.tokens:
|
if cfg.tokens:
|
||||||
tokenizer.add_tokens(
|
tokenizer.add_tokens(
|
||||||
[
|
[
|
||||||
@@ -187,12 +98,6 @@ def load_tokenizer(cfg):
|
|||||||
LOG.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
|
LOG.debug(f"PAD: {tokenizer.pad_token_id} / {tokenizer.pad_token}")
|
||||||
LOG.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
|
LOG.debug(f"UNK: {tokenizer.unk_token_id} / {tokenizer.unk_token}")
|
||||||
|
|
||||||
if cfg.chat_template:
|
|
||||||
tokenizer.chat_template = chat_templates(cfg.chat_template)
|
|
||||||
else:
|
|
||||||
LOG.info(
|
|
||||||
"No Chat template selected. Consider adding a chat template for easier inference."
|
|
||||||
)
|
|
||||||
return tokenizer
|
return tokenizer
|
||||||
|
|
||||||
|
|
||||||
@@ -205,6 +110,7 @@ def load_model(
|
|||||||
Load a model for a given configuration and tokenizer.
|
Load a model for a given configuration and tokenizer.
|
||||||
"""
|
"""
|
||||||
base_model = cfg.base_model
|
base_model = cfg.base_model
|
||||||
|
base_model_config = cfg.base_model_config
|
||||||
model_type = cfg.model_type
|
model_type = cfg.model_type
|
||||||
model_config = load_model_config(cfg)
|
model_config = load_model_config(cfg)
|
||||||
|
|
||||||
@@ -254,6 +160,17 @@ def load_model(
|
|||||||
|
|
||||||
LOG.info("patching with sdp attention")
|
LOG.info("patching with sdp attention")
|
||||||
hijack_llama_sdp_attention()
|
hijack_llama_sdp_attention()
|
||||||
|
elif cfg.is_llama_derived_model and cfg.landmark_attention:
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import (
|
||||||
|
MEM_TOKEN,
|
||||||
|
patch_llama_with_landmark_attn,
|
||||||
|
)
|
||||||
|
|
||||||
|
LOG.info("patching with landmark attention")
|
||||||
|
patch_llama_with_landmark_attn()
|
||||||
|
|
||||||
|
# Note: This might overwrite previous additional_special_tokens
|
||||||
|
tokenizer.add_special_tokens({"additional_special_tokens": [MEM_TOKEN]})
|
||||||
|
|
||||||
if cfg.is_mistral_derived_model and cfg.flash_attention and cfg.sample_packing:
|
if cfg.is_mistral_derived_model and cfg.flash_attention and cfg.sample_packing:
|
||||||
from axolotl.monkeypatch.mistral_attn_hijack_flash import (
|
from axolotl.monkeypatch.mistral_attn_hijack_flash import (
|
||||||
@@ -263,17 +180,13 @@ def load_model(
|
|||||||
LOG.info("patching with flash attention")
|
LOG.info("patching with flash attention")
|
||||||
replace_mistral_attn_with_flash_attn(packed=cfg.sample_packing)
|
replace_mistral_attn_with_flash_attn(packed=cfg.sample_packing)
|
||||||
|
|
||||||
if (
|
if cfg.is_llama_derived_model and cfg.xpos_rope:
|
||||||
cfg.model_config_type == "mixtral"
|
from axolotl.monkeypatch.xpos_rope_llama_monkey_patch import (
|
||||||
and cfg.flash_attention
|
replace_llama_rope_with_xpos_rope,
|
||||||
and cfg.sample_packing
|
|
||||||
):
|
|
||||||
from axolotl.monkeypatch.mixtral import (
|
|
||||||
replace_mixtral_attn_with_multipack_flash_attn,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.info("patching with flash attention")
|
LOG.info("patching with xpos rope")
|
||||||
replace_mixtral_attn_with_multipack_flash_attn()
|
replace_llama_rope_with_xpos_rope()
|
||||||
|
|
||||||
if (
|
if (
|
||||||
cfg.is_llama_derived_model
|
cfg.is_llama_derived_model
|
||||||
@@ -288,12 +201,8 @@ def load_model(
|
|||||||
model_kwargs = {}
|
model_kwargs = {}
|
||||||
|
|
||||||
model_kwargs["device_map"] = cfg.device_map
|
model_kwargs["device_map"] = cfg.device_map
|
||||||
model_kwargs["max_memory"] = cfg.max_memory
|
|
||||||
model_kwargs["torch_dtype"] = cfg.torch_dtype
|
model_kwargs["torch_dtype"] = cfg.torch_dtype
|
||||||
|
|
||||||
if is_deepspeed_zero3_enabled():
|
|
||||||
del model_kwargs["device_map"]
|
|
||||||
|
|
||||||
if cfg.model_revision:
|
if cfg.model_revision:
|
||||||
model_kwargs["revision"] = cfg.model_revision
|
model_kwargs["revision"] = cfg.model_revision
|
||||||
if cfg.gptq:
|
if cfg.gptq:
|
||||||
@@ -308,50 +217,37 @@ def load_model(
|
|||||||
**model_config.quantization_config
|
**model_config.quantization_config
|
||||||
)
|
)
|
||||||
if cfg.adapter == "qlora" and cfg.load_in_4bit:
|
if cfg.adapter == "qlora" and cfg.load_in_4bit:
|
||||||
bnb_config = {
|
|
||||||
"load_in_4bit": True,
|
|
||||||
"llm_int8_threshold": 6.0,
|
|
||||||
"llm_int8_has_fp16_weight": False,
|
|
||||||
"bnb_4bit_compute_dtype": cfg.torch_dtype,
|
|
||||||
"bnb_4bit_use_double_quant": True,
|
|
||||||
"bnb_4bit_quant_type": "nf4",
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.bnb_config_kwargs:
|
|
||||||
bnb_config.update(cfg.bnb_config_kwargs)
|
|
||||||
|
|
||||||
model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
||||||
**bnb_config,
|
load_in_4bit=True,
|
||||||
|
llm_int8_threshold=6.0,
|
||||||
|
llm_int8_has_fp16_weight=False,
|
||||||
|
bnb_4bit_compute_dtype=cfg.torch_dtype,
|
||||||
|
bnb_4bit_use_double_quant=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
)
|
)
|
||||||
# sample packing uses custom FA2 patch
|
# sample packing uses custom FA2 patch
|
||||||
if cfg.flash_attention:
|
if cfg.flash_attention and not cfg.sample_packing:
|
||||||
if not cfg.sample_packing:
|
if (
|
||||||
if (
|
cfg.is_llama_derived_model
|
||||||
cfg.is_llama_derived_model
|
or cfg.is_falcon_derived_model
|
||||||
or cfg.is_falcon_derived_model
|
or cfg.is_mistral_derived_model
|
||||||
or cfg.is_mistral_derived_model
|
):
|
||||||
or model_config.model_type == "mixtral"
|
model_kwargs["use_flash_attention_2"] = True
|
||||||
):
|
|
||||||
model_config._attn_implementation = ( # pylint: disable=protected-access
|
|
||||||
"flash_attention_2"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if model_config.model_type == "mixtral":
|
|
||||||
model_config._attn_implementation = ( # pylint: disable=protected-access
|
|
||||||
"flash_attention_2"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
model_config._attn_implementation = ( # pylint: disable=protected-access
|
|
||||||
"eager"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if cfg.is_llama_derived_model and not cfg.trust_remote_code and not cfg.gptq:
|
if cfg.is_llama_derived_model and not cfg.trust_remote_code and not cfg.gptq:
|
||||||
from transformers import LlamaForCausalLM
|
from transformers import LlamaForCausalLM
|
||||||
|
|
||||||
|
config_kwargs = {}
|
||||||
|
if cfg.rope_scaling:
|
||||||
|
config_kwargs["rope_scaling"] = cfg.rope_scaling
|
||||||
|
config = LlamaConfig.from_pretrained(
|
||||||
|
base_model_config,
|
||||||
|
**config_kwargs,
|
||||||
|
)
|
||||||
model = LlamaForCausalLM.from_pretrained(
|
model = LlamaForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=model_config,
|
config=config,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
@@ -396,99 +292,92 @@ def load_model(
|
|||||||
# device=cfg.device,
|
# device=cfg.device,
|
||||||
# )
|
# )
|
||||||
# model.train() # sets to train instead of eval mode
|
# model.train() # sets to train instead of eval mode
|
||||||
elif model_type == "PhiForCausalLM":
|
elif model_type == "MixFormerSequentialForCausalLM":
|
||||||
from axolotl.models.phi import PhiForCausalLM
|
from axolotl.models.phi import MixFormerSequentialForCausalLM
|
||||||
|
|
||||||
model = PhiForCausalLM.from_pretrained(
|
model = MixFormerSequentialForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
elif model_type == "MambaLMHeadModel":
|
|
||||||
# FIXME this is janky at best and hacked together to make it work
|
|
||||||
MambaLMHeadModel = fix_mamba_attn_for_loss() # pylint: disable=invalid-name
|
|
||||||
|
|
||||||
model_kwargs["dtype"] = model_kwargs["torch_dtype"]
|
|
||||||
model_kwargs["device"] = torch.cuda.current_device()
|
|
||||||
del model_kwargs["torch_dtype"]
|
|
||||||
del model_kwargs["device_map"]
|
|
||||||
del model_kwargs["max_memory"]
|
|
||||||
|
|
||||||
model = MambaLMHeadModel.from_pretrained(
|
|
||||||
base_model,
|
|
||||||
**model_kwargs,
|
|
||||||
)
|
|
||||||
elif model_type and not cfg.trust_remote_code:
|
elif model_type and not cfg.trust_remote_code:
|
||||||
if cfg.gptq:
|
if cfg.gptq:
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=model_config,
|
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
model = getattr(transformers, model_type).from_pretrained(
|
model = getattr(transformers, model_type).from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=model_config,
|
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
config = AutoConfig.from_pretrained(
|
||||||
|
base_model,
|
||||||
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
|
)
|
||||||
# Shouldn't be a problem most of the time. will obviously error if the model doesn't support this
|
# Shouldn't be a problem most of the time. will obviously error if the model doesn't support this
|
||||||
# when training starts
|
# when training starts
|
||||||
if (
|
if (
|
||||||
hasattr(model_config, "max_seq_len")
|
hasattr(config, "max_seq_len")
|
||||||
and model_config.max_seq_len
|
and config.max_seq_len
|
||||||
and cfg.sequence_len > model_config.max_seq_len
|
and cfg.sequence_len > config.max_seq_len
|
||||||
):
|
):
|
||||||
model_config.max_seq_len = cfg.sequence_len
|
config.max_seq_len = cfg.sequence_len
|
||||||
LOG.warning(f"increasing context length to {cfg.sequence_len}")
|
LOG.warning(f"increasing context length to {cfg.sequence_len}")
|
||||||
elif (
|
elif (
|
||||||
hasattr(model_config, "max_sequence_length")
|
hasattr(config, "max_sequence_length")
|
||||||
and model_config.max_sequence_length
|
and config.max_sequence_length
|
||||||
and cfg.sequence_len > model_config.max_sequence_length
|
and cfg.sequence_len > config.max_sequence_length
|
||||||
):
|
):
|
||||||
model_config.max_sequence_length = cfg.sequence_len
|
config.max_sequence_length = cfg.sequence_len
|
||||||
LOG.warning(f"increasing context length to {cfg.sequence_len}")
|
LOG.warning(f"increasing context length to {cfg.sequence_len}")
|
||||||
if cfg.gptq:
|
if cfg.gptq:
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=model_config,
|
config=config,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=model_config,
|
config=config,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
**model_kwargs,
|
**model_kwargs,
|
||||||
)
|
)
|
||||||
except Exception as err: # pylint: disable=broad-exception-caught
|
except Exception as err: # pylint: disable=broad-exception-caught
|
||||||
|
LOG.error(
|
||||||
|
"Exception raised attempting to load model, retrying with AutoModelForCausalLM"
|
||||||
|
)
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
raise err
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
base_model,
|
||||||
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
|
**model_kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
embeddings_len = (
|
embeddings_len = (
|
||||||
math.ceil(len(tokenizer) / 32) * 32
|
math.ceil(len(tokenizer) / 32) * 32
|
||||||
if cfg.resize_token_embeddings_to_32x
|
if cfg.resize_token_embeddings_to_32x
|
||||||
else len(tokenizer)
|
else len(tokenizer)
|
||||||
)
|
)
|
||||||
if (
|
if model.get_input_embeddings().num_embeddings < embeddings_len:
|
||||||
hasattr(model, "get_input_embeddings")
|
|
||||||
and model.get_input_embeddings().num_embeddings < embeddings_len
|
|
||||||
):
|
|
||||||
model.resize_token_embeddings(embeddings_len)
|
model.resize_token_embeddings(embeddings_len)
|
||||||
else:
|
else:
|
||||||
model.tie_weights()
|
model.tie_weights()
|
||||||
|
|
||||||
if (
|
if (
|
||||||
hasattr(model, "config")
|
hasattr(model.config, "max_position_embeddings")
|
||||||
and hasattr(model.config, "max_position_embeddings")
|
|
||||||
and model.config.max_position_embeddings
|
and model.config.max_position_embeddings
|
||||||
and cfg.sequence_len > model.config.max_position_embeddings
|
and cfg.sequence_len > model.config.max_position_embeddings
|
||||||
):
|
):
|
||||||
@@ -498,22 +387,20 @@ def load_model(
|
|||||||
model.config.max_position_embeddings = cfg.sequence_len
|
model.config.max_position_embeddings = cfg.sequence_len
|
||||||
|
|
||||||
if (
|
if (
|
||||||
hasattr(model, "config")
|
hasattr(model.config, "bos_token_id")
|
||||||
and hasattr(model.config, "bos_token_id")
|
|
||||||
and model.config.bos_token_id
|
and model.config.bos_token_id
|
||||||
and model.config.bos_token_id != tokenizer.bos_token_id
|
and model.config.bos_token_id != tokenizer.bos_token_id
|
||||||
):
|
):
|
||||||
model.config.bos_token_id = tokenizer.bos_token_id
|
model.config.bos_token_id = tokenizer.bos_token_id
|
||||||
|
|
||||||
if (
|
if (
|
||||||
hasattr(model, "config")
|
hasattr(model.config, "eos_token_id")
|
||||||
and hasattr(model.config, "eos_token_id")
|
|
||||||
and model.config.eos_token_id
|
and model.config.eos_token_id
|
||||||
and model.config.eos_token_id != tokenizer.eos_token_id
|
and model.config.eos_token_id != tokenizer.eos_token_id
|
||||||
):
|
):
|
||||||
model.config.eos_token_id = tokenizer.eos_token_id
|
model.config.eos_token_id = tokenizer.eos_token_id
|
||||||
|
|
||||||
if hasattr(model, "device") and model.device.type == "cuda":
|
if model.device.type == "cuda":
|
||||||
log_gpu_memory_usage(LOG, "after model load", model.device)
|
log_gpu_memory_usage(LOG, "after model load", model.device)
|
||||||
|
|
||||||
# make sure these are fp32 per Ramesh et al. (2021)
|
# make sure these are fp32 per Ramesh et al. (2021)
|
||||||
@@ -528,22 +415,15 @@ def load_model(
|
|||||||
module.to(torch.float32)
|
module.to(torch.float32)
|
||||||
|
|
||||||
needs_fa2_dtype = cfg.adapter or cfg.fsdp
|
needs_fa2_dtype = cfg.adapter or cfg.fsdp
|
||||||
skip_prepare_model_for_kbit_training = False
|
|
||||||
|
|
||||||
if cfg.model_config_type == "qwen" and cfg.adapter == "lora":
|
|
||||||
# Qwen doesn't play nicely with LoRA if this is enabled
|
|
||||||
skip_prepare_model_for_kbit_training = True
|
|
||||||
|
|
||||||
if (cfg.adapter == "lora" and load_in_8bit) or (
|
if (cfg.adapter == "lora" and load_in_8bit) or (
|
||||||
cfg.adapter == "qlora" and cfg.load_in_4bit
|
cfg.adapter == "qlora" and cfg.load_in_4bit
|
||||||
):
|
):
|
||||||
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
||||||
if cfg.gradient_checkpointing:
|
if cfg.gradient_checkpointing:
|
||||||
model.gradient_checkpointing_enable()
|
model.gradient_checkpointing_enable()
|
||||||
if not skip_prepare_model_for_kbit_training:
|
model = prepare_model_for_kbit_training(
|
||||||
model = prepare_model_for_kbit_training(
|
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
||||||
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
)
|
||||||
)
|
|
||||||
needs_fa2_dtype = True
|
needs_fa2_dtype = True
|
||||||
|
|
||||||
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so we need to
|
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so we need to
|
||||||
@@ -572,8 +452,7 @@ def load_model(
|
|||||||
requires_grad.append(f"{name}: {param.requires_grad}")
|
requires_grad.append(f"{name}: {param.requires_grad}")
|
||||||
if len(requires_grad) == 0:
|
if len(requires_grad) == 0:
|
||||||
LOG.warning("there are no parameters that require gradient updates")
|
LOG.warning("there are no parameters that require gradient updates")
|
||||||
if hasattr(model, "config"):
|
model.config.use_cache = False
|
||||||
model.config.use_cache = False
|
|
||||||
|
|
||||||
if cfg.flash_optimum:
|
if cfg.flash_optimum:
|
||||||
model = BetterTransformer.transform(model)
|
model = BetterTransformer.transform(model)
|
||||||
|
|||||||
@@ -181,16 +181,13 @@ class MultipackBatchSampler(BatchSampler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# shave off 1% + 1 for dealing with variance in packing from random sampler to sampler
|
# shave off 1% + 1 for dealing with variance in packing from random sampler to sampler
|
||||||
return max(
|
return (
|
||||||
0,
|
world_size
|
||||||
(
|
* math.floor(
|
||||||
world_size
|
0.99
|
||||||
* math.floor(
|
* lengths_sum_per_device
|
||||||
0.99
|
/ self.packing_efficiency_estimate
|
||||||
* lengths_sum_per_device
|
// self.batch_max_len
|
||||||
/ self.packing_efficiency_estimate
|
)
|
||||||
// self.batch_max_len
|
- 1
|
||||||
)
|
|
||||||
- 1
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -131,10 +131,8 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Phi doesn't want the attention_mask feature when training
|
# Phi doesn't want the attention_mask feature when training
|
||||||
if (
|
if "CodeGenTokenizer" in tokenizer.__class__.__name__ or (
|
||||||
"CodeGenTokenizer" in tokenizer.__class__.__name__
|
cfg.is_mistral_derived_model and cfg.flash_attention
|
||||||
or (cfg.is_mistral_derived_model and cfg.flash_attention)
|
|
||||||
or cfg.model_config_type == "mamba"
|
|
||||||
):
|
):
|
||||||
train_dataset = train_dataset.remove_columns("attention_mask")
|
train_dataset = train_dataset.remove_columns("attention_mask")
|
||||||
if eval_dataset:
|
if eval_dataset:
|
||||||
@@ -143,37 +141,32 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer):
|
|||||||
return train_dataset, eval_dataset
|
return train_dataset, eval_dataset
|
||||||
|
|
||||||
|
|
||||||
def calculate_total_num_steps(cfg, train_dataset, update=True):
|
def calculate_total_num_steps(cfg, train_dataset):
|
||||||
if not cfg.total_num_tokens:
|
if cfg.sample_packing:
|
||||||
total_num_tokens = np.sum(
|
|
||||||
train_dataset.data.column("input_ids")
|
|
||||||
.to_pandas()
|
|
||||||
.apply(lambda x: len(x)) # pylint: disable=unnecessary-lambda
|
|
||||||
.values
|
|
||||||
)
|
|
||||||
LOG.debug(f"total_num_tokens: {total_num_tokens}", main_process_only=True)
|
|
||||||
if update:
|
|
||||||
cfg.total_num_tokens = total_num_tokens
|
|
||||||
|
|
||||||
skip_estimates = cfg.model_config_type == "mamba"
|
|
||||||
|
|
||||||
if not skip_estimates and not cfg.total_supervised_tokens:
|
|
||||||
total_supervised_tokens = (
|
|
||||||
train_dataset.data.column("labels")
|
|
||||||
.to_pandas()
|
|
||||||
.apply(lambda x: np.sum(np.array(x) != -100))
|
|
||||||
.sum()
|
|
||||||
)
|
|
||||||
LOG.debug(
|
|
||||||
f"`total_supervised_tokens: {total_supervised_tokens}`",
|
|
||||||
main_process_only=True,
|
|
||||||
)
|
|
||||||
if update:
|
|
||||||
cfg.total_supervised_tokens = total_supervised_tokens
|
|
||||||
|
|
||||||
if not skip_estimates and cfg.sample_packing:
|
|
||||||
# we have to drop anything longer then sequence len otherwise
|
# we have to drop anything longer then sequence len otherwise
|
||||||
# flash attention with position ids fails
|
# flash attention with position ids fails
|
||||||
|
if not cfg.total_num_tokens:
|
||||||
|
total_num_tokens = np.sum(
|
||||||
|
train_dataset.data.column("input_ids")
|
||||||
|
.to_pandas()
|
||||||
|
.apply(lambda x: len(x)) # pylint: disable=unnecessary-lambda
|
||||||
|
.values
|
||||||
|
)
|
||||||
|
LOG.debug(f"total_num_tokens: {total_num_tokens}", main_process_only=True)
|
||||||
|
cfg.total_num_tokens = total_num_tokens
|
||||||
|
|
||||||
|
if not cfg.total_supervised_tokens:
|
||||||
|
total_supervised_tokens = (
|
||||||
|
train_dataset.data.column("labels")
|
||||||
|
.to_pandas()
|
||||||
|
.apply(lambda x: np.sum(np.array(x) != -100))
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
LOG.debug(
|
||||||
|
f"`total_supervised_tokens: {total_supervised_tokens}`",
|
||||||
|
main_process_only=True,
|
||||||
|
)
|
||||||
|
cfg.total_supervised_tokens = total_supervised_tokens
|
||||||
|
|
||||||
if cfg.sample_packing_eff_est:
|
if cfg.sample_packing_eff_est:
|
||||||
total_num_steps = (
|
total_num_steps = (
|
||||||
@@ -238,8 +231,7 @@ def calculate_total_num_steps(cfg, train_dataset, update=True):
|
|||||||
sample_packing_eff_est = (
|
sample_packing_eff_est = (
|
||||||
math.ceil(sample_packing_actual_eff_all * 100.0) / 100.0
|
math.ceil(sample_packing_actual_eff_all * 100.0) / 100.0
|
||||||
)
|
)
|
||||||
if update:
|
cfg.sample_packing_eff_est = sample_packing_eff_est
|
||||||
cfg.sample_packing_eff_est = sample_packing_eff_est
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"sample_packing_eff_est: {cfg.sample_packing_eff_est}",
|
f"sample_packing_eff_est: {cfg.sample_packing_eff_est}",
|
||||||
main_process_only=True,
|
main_process_only=True,
|
||||||
@@ -271,15 +263,14 @@ def setup_fsdp_envs(cfg):
|
|||||||
] = cfg.fsdp_config.fsdp_transformer_layer_cls_to_wrap
|
] = cfg.fsdp_config.fsdp_transformer_layer_cls_to_wrap
|
||||||
|
|
||||||
|
|
||||||
def prepare_optim_env(cfg):
|
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps):
|
||||||
if cfg.fsdp:
|
if cfg.fsdp:
|
||||||
setup_fsdp_envs(cfg)
|
setup_fsdp_envs(cfg)
|
||||||
elif cfg.deepspeed:
|
elif cfg.deepspeed:
|
||||||
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
||||||
os.environ["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = cfg.deepspeed
|
if cfg.fp8:
|
||||||
|
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
||||||
|
|
||||||
|
|
||||||
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps):
|
|
||||||
trainer_builder = HFCausalTrainerBuilder(cfg, model, tokenizer)
|
trainer_builder = HFCausalTrainerBuilder(cfg, model, tokenizer)
|
||||||
trainer_builder.train_dataset = train_dataset
|
trainer_builder.train_dataset = train_dataset
|
||||||
trainer_builder.eval_dataset = eval_dataset
|
trainer_builder.eval_dataset = eval_dataset
|
||||||
|
|||||||
@@ -2,20 +2,20 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
|
def setup_wandb_env_vars(cfg):
|
||||||
def setup_wandb_env_vars(cfg: DictDefault):
|
if cfg.wandb_mode and cfg.wandb_mode == "offline":
|
||||||
for key in cfg.keys():
|
os.environ["WANDB_MODE"] = cfg.wandb_mode
|
||||||
if key.startswith("wandb_"):
|
elif cfg.wandb_project and len(cfg.wandb_project) > 0:
|
||||||
value = cfg.get(key, "")
|
os.environ["WANDB_PROJECT"] = cfg.wandb_project
|
||||||
|
|
||||||
if value and isinstance(value, str) and len(value) > 0:
|
|
||||||
os.environ[key.upper()] = value
|
|
||||||
|
|
||||||
# Enable wandb if project name is present
|
|
||||||
if cfg.wandb_project and len(cfg.wandb_project) > 0:
|
|
||||||
cfg.use_wandb = True
|
cfg.use_wandb = True
|
||||||
os.environ.pop("WANDB_DISABLED", None) # Remove if present
|
if cfg.wandb_entity and len(cfg.wandb_entity) > 0:
|
||||||
|
os.environ["WANDB_ENTITY"] = cfg.wandb_entity
|
||||||
|
if cfg.wandb_watch and len(cfg.wandb_watch) > 0:
|
||||||
|
os.environ["WANDB_WATCH"] = cfg.wandb_watch
|
||||||
|
if cfg.wandb_log_model and len(cfg.wandb_log_model) > 0:
|
||||||
|
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
|
||||||
|
if cfg.wandb_run_id and len(cfg.wandb_run_id) > 0:
|
||||||
|
os.environ["WANDB_RUN_ID"] = cfg.wandb_run_id
|
||||||
else:
|
else:
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
|
|||||||
@@ -101,7 +101,6 @@ class TestLoraLlama(unittest.TestCase):
|
|||||||
"learning_rate": 0.00001,
|
"learning_rate": 0.00001,
|
||||||
"optimizer": "adamw_torch",
|
"optimizer": "adamw_torch",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"bf16": True,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
"""
|
|
||||||
E2E tests for lora llama
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import unittest
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from axolotl.cli import load_datasets
|
|
||||||
from axolotl.common.cli import TrainerCliArgs
|
|
||||||
from axolotl.train import train
|
|
||||||
from axolotl.utils.config import normalize_config
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
from .utils import with_temp_dir
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
|
||||||
|
|
||||||
|
|
||||||
class TestMistral(unittest.TestCase):
|
|
||||||
"""
|
|
||||||
Test case for Llama models using LoRA
|
|
||||||
"""
|
|
||||||
|
|
||||||
@with_temp_dir
|
|
||||||
def test_fft(self, temp_dir):
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": "state-spaces/mamba-130m",
|
|
||||||
"model_type": "MambaLMHeadModel",
|
|
||||||
"tokenizer_type": "AutoTokenizer",
|
|
||||||
"tokenizer_config": "EleutherAI/gpt-neox-20b",
|
|
||||||
"flash_attention": False,
|
|
||||||
"sequence_len": 1024,
|
|
||||||
"load_in_8bit": False,
|
|
||||||
"val_set_size": 0.0,
|
|
||||||
"datasets": [
|
|
||||||
{
|
|
||||||
"path": "mhenrichsen/alpaca_2k_test",
|
|
||||||
"type": "alpaca",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"gradient_checkpointing": False,
|
|
||||||
"num_epochs": 2,
|
|
||||||
"micro_batch_size": 2,
|
|
||||||
"gradient_accumulation_steps": 1,
|
|
||||||
"output_dir": temp_dir,
|
|
||||||
"learning_rate": 0.00001,
|
|
||||||
"optimizer": "adamw_torch",
|
|
||||||
"lr_scheduler": "cosine",
|
|
||||||
"max_steps": 20,
|
|
||||||
"save_steps": 10,
|
|
||||||
"eval_steps": None,
|
|
||||||
"save_safetensors": False,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
normalize_config(cfg)
|
|
||||||
cli_args = TrainerCliArgs()
|
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
|
||||||
|
|
||||||
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
|
||||||
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
|
||||||
@@ -31,7 +31,7 @@ class TestPhi(unittest.TestCase):
|
|||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "microsoft/phi-1_5",
|
||||||
"trust_remote_code": True,
|
"trust_remote_code": True,
|
||||||
"model_type": "PhiForCausalLM",
|
"model_type": "MixFormerSequentialForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 512,
|
"sequence_len": 512,
|
||||||
"sample_packing": False,
|
"sample_packing": False,
|
||||||
@@ -76,7 +76,7 @@ class TestPhi(unittest.TestCase):
|
|||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "microsoft/phi-1_5",
|
||||||
"trust_remote_code": True,
|
"trust_remote_code": True,
|
||||||
"model_type": "PhiForCausalLM",
|
"model_type": "MixFormerSequentialForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 512,
|
"sequence_len": 512,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
|
|||||||
@@ -1,95 +0,0 @@
|
|||||||
"""
|
|
||||||
E2E tests for resuming training
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import unittest
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
|
||||||
|
|
||||||
from axolotl.cli import load_datasets
|
|
||||||
from axolotl.common.cli import TrainerCliArgs
|
|
||||||
from axolotl.train import train
|
|
||||||
from axolotl.utils.config import normalize_config
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
from .utils import most_recent_subdir, with_temp_dir
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
|
||||||
|
|
||||||
|
|
||||||
class TestResumeLlama(unittest.TestCase):
|
|
||||||
"""
|
|
||||||
Test case for resuming training of llama models
|
|
||||||
"""
|
|
||||||
|
|
||||||
@with_temp_dir
|
|
||||||
def test_resume_qlora(self, temp_dir):
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": "JackFram/llama-68m",
|
|
||||||
"tokenizer_type": "LlamaTokenizer",
|
|
||||||
"sequence_len": 1024,
|
|
||||||
"sample_packing": True,
|
|
||||||
"flash_attention": True,
|
|
||||||
"load_in_4bit": True,
|
|
||||||
"adapter": "qlora",
|
|
||||||
"lora_r": 32,
|
|
||||||
"lora_alpha": 64,
|
|
||||||
"lora_dropout": 0.05,
|
|
||||||
"lora_target_linear": True,
|
|
||||||
"val_set_size": 0.1,
|
|
||||||
"special_tokens": {},
|
|
||||||
"datasets": [
|
|
||||||
{
|
|
||||||
"path": "vicgalle/alpaca-gpt4",
|
|
||||||
"type": "alpaca",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"num_epochs": 2,
|
|
||||||
"micro_batch_size": 1,
|
|
||||||
"gradient_accumulation_steps": 1,
|
|
||||||
"output_dir": temp_dir,
|
|
||||||
"learning_rate": 0.00001,
|
|
||||||
"optimizer": "adamw_torch",
|
|
||||||
"lr_scheduler": "cosine",
|
|
||||||
"save_steps": 10,
|
|
||||||
"save_total_limit": 5,
|
|
||||||
"max_steps": 40,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if is_torch_bf16_gpu_available():
|
|
||||||
cfg.bf16 = True
|
|
||||||
else:
|
|
||||||
cfg.fp16 = True
|
|
||||||
normalize_config(cfg)
|
|
||||||
cli_args = TrainerCliArgs()
|
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
|
||||||
|
|
||||||
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
|
||||||
|
|
||||||
resume_cfg = cfg | DictDefault(
|
|
||||||
{
|
|
||||||
"resume_from_checkpoint": f"{temp_dir}/checkpoint-30/",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
normalize_config(resume_cfg)
|
|
||||||
cli_args = TrainerCliArgs()
|
|
||||||
|
|
||||||
train(cfg=resume_cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
|
||||||
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
|
||||||
|
|
||||||
tb_log_path_1 = most_recent_subdir(temp_dir + "/runs")
|
|
||||||
cmd = f"tensorboard --inspect --logdir {tb_log_path_1}"
|
|
||||||
res = subprocess.run(
|
|
||||||
cmd, shell=True, text=True, capture_output=True, check=True
|
|
||||||
)
|
|
||||||
pattern = r"first_step\s+(\d+)"
|
|
||||||
first_steps = int(re.findall(pattern, res.stdout)[0])
|
|
||||||
assert first_steps == 31
|
|
||||||
@@ -1,11 +1,10 @@
|
|||||||
"""
|
"""
|
||||||
helper utils for tests
|
helper utils for tests
|
||||||
"""
|
"""
|
||||||
import os
|
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
def with_temp_dir(test_func):
|
def with_temp_dir(test_func):
|
||||||
@@ -21,13 +20,3 @@ def with_temp_dir(test_func):
|
|||||||
shutil.rmtree(temp_dir)
|
shutil.rmtree(temp_dir)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def most_recent_subdir(path):
|
|
||||||
base_path = Path(path)
|
|
||||||
subdirectories = [d for d in base_path.iterdir() if d.is_dir()]
|
|
||||||
if not subdirectories:
|
|
||||||
return None
|
|
||||||
subdir = max(subdirectories, key=os.path.getctime)
|
|
||||||
|
|
||||||
return subdir
|
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -2,7 +2,6 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import unittest
|
import unittest
|
||||||
from copy import deepcopy
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@@ -26,50 +25,6 @@ from axolotl.prompters import AlpacaPrompter, PromptStyle, ShareGPTPrompterV2
|
|||||||
|
|
||||||
LOG = logging.getLogger("axolotl")
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
test_data = {
|
|
||||||
"multi_turn_sys": {
|
|
||||||
"conversations": [
|
|
||||||
{"from": "system", "value": "lorem"},
|
|
||||||
{"from": "human", "value": "abc"},
|
|
||||||
{"from": "gpt", "value": "ipsum"},
|
|
||||||
{"from": "human", "value": "123"},
|
|
||||||
{"from": "gpt", "value": "sit"},
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"single_turn_sys": {
|
|
||||||
"conversations": [
|
|
||||||
{"from": "system", "value": "lorem"},
|
|
||||||
{"from": "human", "value": "abc"},
|
|
||||||
{"from": "gpt", "value": "ipsum"},
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"single_turn_no_sys": {
|
|
||||||
"conversations": [
|
|
||||||
{"from": "human", "value": "abc"},
|
|
||||||
{"from": "gpt", "value": "ipsum"},
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"multi_turn_no_sys": {
|
|
||||||
"conversations": [
|
|
||||||
{"from": "human", "value": "abc"},
|
|
||||||
{"from": "gpt", "value": "ipsum"},
|
|
||||||
{"from": "human", "value": "123"},
|
|
||||||
{"from": "gpt", "value": "sit"},
|
|
||||||
]
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def prompt_strat(conversation, tokenizer):
|
|
||||||
"Helper function to create a prompt strategy for testing."
|
|
||||||
prompter = ShareGPTPrompterV2(conversation=conversation)
|
|
||||||
return ShareGPTPromptTokenizingStrategy(
|
|
||||||
prompter,
|
|
||||||
tokenizer,
|
|
||||||
False,
|
|
||||||
2048,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPromptTokenizationStrategies(unittest.TestCase):
|
class TestPromptTokenizationStrategies(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
@@ -159,70 +114,6 @@ class TestPromptTokenizationStrategies(unittest.TestCase):
|
|||||||
in self._caplog.records[0].message
|
in self._caplog.records[0].message
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_sharegpt_llama(self):
|
|
||||||
"Make sure the sharegpt/llama is tokenized and formatted correctly."
|
|
||||||
strat = prompt_strat("llama-2", self.tokenizer)
|
|
||||||
|
|
||||||
def tokenize(conv):
|
|
||||||
return strat.tokenize_prompt(deepcopy(conv))["input_ids"]
|
|
||||||
|
|
||||||
def decode(ids):
|
|
||||||
return strat.tokenizer.decode(ids)
|
|
||||||
|
|
||||||
# fmt: off
|
|
||||||
# System message, multi-turn conversations
|
|
||||||
mt_ids = tokenize(test_data['multi_turn_sys'])
|
|
||||||
assert decode(mt_ids) == '<s> [INST] <<SYS>>\nlorem\n<</SYS>>\n\nabc [/INST] ipsum</s><s> [INST] 123 [/INST] sit</s>'
|
|
||||||
assert mt_ids == [1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 29880, 3668, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 10736, 518, 29914, 25580, 29962, 23421, 2, 1, 518, 25580, 29962, 29871, 29896, 29906, 29941, 518, 29914, 25580, 29962, 7845, 2]
|
|
||||||
|
|
||||||
# System message, single-turn conversations
|
|
||||||
st_ids = tokenize(test_data['single_turn_sys'])
|
|
||||||
assert decode(st_ids) == '<s> [INST] <<SYS>>\nlorem\n<</SYS>>\n\nabc [/INST] ipsum</s>'
|
|
||||||
assert st_ids == [1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 29880, 3668, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 10736, 518, 29914, 25580, 29962, 23421, 2]
|
|
||||||
|
|
||||||
# No system message, single-turn
|
|
||||||
ns_ids = tokenize(test_data['single_turn_no_sys'])
|
|
||||||
assert decode(ns_ids) == '<s> [INST] abc [/INST] ipsum</s>'
|
|
||||||
assert ns_ids == [1, 518, 25580, 29962, 25638, 518, 29914, 25580, 29962, 23421, 2]
|
|
||||||
|
|
||||||
# No system message, multi-turn
|
|
||||||
ns_mt_ids = tokenize(test_data['multi_turn_no_sys'])
|
|
||||||
assert decode(ns_mt_ids) == '<s> [INST] abc [/INST] ipsum</s><s> [INST] 123 [/INST] sit</s>'
|
|
||||||
assert ns_mt_ids == [1, 518, 25580, 29962, 25638, 518, 29914, 25580, 29962, 23421, 2, 1, 518, 25580, 29962, 29871, 29896, 29906, 29941, 518, 29914, 25580, 29962, 7845, 2]
|
|
||||||
# fmt: on
|
|
||||||
|
|
||||||
def test_sharegpt_mistral(self):
|
|
||||||
"Make sure the sharegpt/mistral is tokenized and formatted correctly."
|
|
||||||
strat = prompt_strat("mistral", self.tokenizer)
|
|
||||||
|
|
||||||
def tokenize(conv):
|
|
||||||
return strat.tokenize_prompt(deepcopy(conv))["input_ids"]
|
|
||||||
|
|
||||||
def decode(ids):
|
|
||||||
return strat.tokenizer.decode(ids)
|
|
||||||
|
|
||||||
# fmt: off
|
|
||||||
# System message, multi-turn conversations
|
|
||||||
mt_ids = tokenize(test_data['multi_turn_sys'])
|
|
||||||
assert decode(mt_ids) == '<s> [INST] lorem\nabc [/INST] ipsum</s> [INST] 123 [/INST] sit</s>'
|
|
||||||
assert mt_ids == [1, 518, 25580, 29962, 301, 3668, 13, 10736, 518, 29914, 25580, 29962, 23421, 2, 518, 25580, 29962, 29871, 29896, 29906, 29941, 518, 29914, 25580, 29962, 7845, 2]
|
|
||||||
|
|
||||||
# System message, single-turn conversations
|
|
||||||
st_ids = tokenize(test_data['single_turn_sys'])
|
|
||||||
assert decode(st_ids) == '<s> [INST] lorem\nabc [/INST] ipsum</s>'
|
|
||||||
assert st_ids == [1, 518, 25580, 29962, 301, 3668, 13, 10736, 518, 29914, 25580, 29962, 23421, 2]
|
|
||||||
|
|
||||||
# No system message, single-turn
|
|
||||||
ns_ids = tokenize(test_data['single_turn_no_sys'])
|
|
||||||
assert decode(ns_ids) == '<s> [INST] abc [/INST] ipsum</s>'
|
|
||||||
assert ns_ids == [1, 518, 25580, 29962, 25638, 518, 29914, 25580, 29962, 23421, 2]
|
|
||||||
|
|
||||||
# No system message, multi-turn
|
|
||||||
ns_mt_ids = tokenize(test_data['multi_turn_no_sys'])
|
|
||||||
assert decode(ns_mt_ids) == '<s> [INST] abc [/INST] ipsum</s> [INST] 123 [/INST] sit</s>'
|
|
||||||
assert ns_mt_ids == [1, 518, 25580, 29962, 25638, 518, 29914, 25580, 29962, 23421, 2, 518, 25580, 29962, 29871, 29896, 29906, 29941, 518, 29914, 25580, 29962, 7845, 2]
|
|
||||||
# fmt: on
|
|
||||||
|
|
||||||
def test_sharegpt_changes_roles(self):
|
def test_sharegpt_changes_roles(self):
|
||||||
conversation = {
|
conversation = {
|
||||||
"roles": ["USER", "CHARACTER"],
|
"roles": ["USER", "CHARACTER"],
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ Test cases for the tokenizer loading
|
|||||||
"""
|
"""
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_tokenizer
|
from axolotl.utils.models import load_tokenizer
|
||||||
|
|
||||||
@@ -33,40 +31,6 @@ class TestTokenizers(unittest.TestCase):
|
|||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
assert "Fast" not in tokenizer.__class__.__name__
|
assert "Fast" not in tokenizer.__class__.__name__
|
||||||
|
|
||||||
def test_special_tokens_modules_to_save(self):
|
|
||||||
# setting special_tokens to new token
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"tokenizer_config": "huggyllama/llama-7b",
|
|
||||||
"adapter": "lora",
|
|
||||||
"special_tokens": {"bos_token": "[INST]"},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
with pytest.raises(
|
|
||||||
ValueError,
|
|
||||||
match=r".*Please set lora_modules_to_save*",
|
|
||||||
):
|
|
||||||
load_tokenizer(cfg)
|
|
||||||
|
|
||||||
# setting special_tokens but not changing from default
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"tokenizer_config": "huggyllama/llama-7b",
|
|
||||||
"adapter": "lora",
|
|
||||||
"special_tokens": {"bos_token": "<s>"},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
load_tokenizer(cfg)
|
|
||||||
|
|
||||||
# non-adapter setting special_tokens
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"tokenizer_config": "huggyllama/llama-7b",
|
|
||||||
"special_tokens": {"bos_token": "[INST]"},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
load_tokenizer(cfg)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
"""Module for testing the validation module"""
|
"""Module for testing the validation module"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import unittest
|
import unittest
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@@ -9,7 +8,6 @@ import pytest
|
|||||||
|
|
||||||
from axolotl.utils.config import validate_config
|
from axolotl.utils.config import validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
|
||||||
|
|
||||||
|
|
||||||
class ValidationTest(unittest.TestCase):
|
class ValidationTest(unittest.TestCase):
|
||||||
@@ -651,150 +649,3 @@ class ValidationTest(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
def test_warmup_step_no_conflict(self):
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"warmup_steps": 10,
|
|
||||||
"warmup_ratio": 0.1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
with pytest.raises(
|
|
||||||
ValueError,
|
|
||||||
match=r".*warmup_steps and warmup_ratio are mutually exclusive*",
|
|
||||||
):
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"warmup_steps": 10,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"warmup_ratio": 0.1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
def test_add_tokens_adapter(self):
|
|
||||||
cfg = DictDefault(
|
|
||||||
{"adapter": "qlora", "load_in_4bit": True, "tokens": ["<|imstart|>"]}
|
|
||||||
)
|
|
||||||
|
|
||||||
with pytest.raises(
|
|
||||||
ValueError,
|
|
||||||
match=r".*lora_modules_to_save not properly set yet adding new tokens*",
|
|
||||||
):
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"adapter": "qlora",
|
|
||||||
"load_in_4bit": True,
|
|
||||||
"tokens": ["<|imstart|>"],
|
|
||||||
"lora_modules_to_save": ["embed_tokens"],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
with pytest.raises(
|
|
||||||
ValueError,
|
|
||||||
match=r".*lora_modules_to_save not properly set yet adding new tokens*",
|
|
||||||
):
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"adapter": "qlora",
|
|
||||||
"load_in_4bit": True,
|
|
||||||
"tokens": ["<|imstart|>"],
|
|
||||||
"lora_modules_to_save": ["embed_tokens", "lm_head"],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
|
|
||||||
class ValidationWandbTest(ValidationTest):
|
|
||||||
"""
|
|
||||||
Validation test for wandb
|
|
||||||
"""
|
|
||||||
|
|
||||||
def test_wandb_set_run_id_to_name(self):
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"wandb_run_id": "foo",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
with self._caplog.at_level(logging.WARNING):
|
|
||||||
validate_config(cfg)
|
|
||||||
assert any(
|
|
||||||
"wandb_run_id sets the ID of the run. If you would like to set the name, please use wandb_name instead."
|
|
||||||
in record.message
|
|
||||||
for record in self._caplog.records
|
|
||||||
)
|
|
||||||
|
|
||||||
assert cfg.wandb_name == "foo" and cfg.wandb_run_id == "foo"
|
|
||||||
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"wandb_name": "foo",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
assert cfg.wandb_name == "foo" and cfg.wandb_run_id is None
|
|
||||||
|
|
||||||
def test_wandb_sets_env(self):
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"wandb_project": "foo",
|
|
||||||
"wandb_name": "bar",
|
|
||||||
"wandb_run_id": "bat",
|
|
||||||
"wandb_entity": "baz",
|
|
||||||
"wandb_mode": "online",
|
|
||||||
"wandb_watch": "false",
|
|
||||||
"wandb_log_model": "checkpoint",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
setup_wandb_env_vars(cfg)
|
|
||||||
|
|
||||||
assert os.environ.get("WANDB_PROJECT", "") == "foo"
|
|
||||||
assert os.environ.get("WANDB_NAME", "") == "bar"
|
|
||||||
assert os.environ.get("WANDB_RUN_ID", "") == "bat"
|
|
||||||
assert os.environ.get("WANDB_ENTITY", "") == "baz"
|
|
||||||
assert os.environ.get("WANDB_MODE", "") == "online"
|
|
||||||
assert os.environ.get("WANDB_WATCH", "") == "false"
|
|
||||||
assert os.environ.get("WANDB_LOG_MODEL", "") == "checkpoint"
|
|
||||||
assert os.environ.get("WANDB_DISABLED", "") != "true"
|
|
||||||
|
|
||||||
def test_wandb_set_disabled(self):
|
|
||||||
cfg = DictDefault({})
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
setup_wandb_env_vars(cfg)
|
|
||||||
|
|
||||||
assert os.environ.get("WANDB_DISABLED", "") == "true"
|
|
||||||
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"wandb_project": "foo",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
validate_config(cfg)
|
|
||||||
|
|
||||||
setup_wandb_env_vars(cfg)
|
|
||||||
|
|
||||||
assert os.environ.get("WANDB_DISABLED", "") != "true"
|
|
||||||
|
|||||||
Reference in New Issue
Block a user