Compare commits
215 Commits
exp-expand
...
multipack
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81d60e96f0 | ||
|
|
168a7a09cc | ||
|
|
231031a0e1 | ||
|
|
5daf7d5299 | ||
|
|
5491278a79 | ||
|
|
1514739f0f | ||
|
|
896c1aebcf | ||
|
|
ef17e15483 | ||
|
|
69a235061b | ||
|
|
687d889928 | ||
|
|
c4cf567b55 | ||
|
|
c49729d2bc | ||
|
|
13ac4d8de2 | ||
|
|
19cf0bda99 | ||
|
|
f74edd5b56 | ||
|
|
d69da99c2c | ||
|
|
66afb76a15 | ||
|
|
a692ad3f4c | ||
|
|
41da98b982 | ||
|
|
9e64f42e0f | ||
|
|
b9b7d4ce92 | ||
|
|
9bed281867 | ||
|
|
e79c8e617e | ||
|
|
71456955f5 | ||
|
|
3a783c04e4 | ||
|
|
1e5014acec | ||
|
|
a10da1caff | ||
|
|
4066c78631 | ||
|
|
78a1e1fa12 | ||
|
|
bc8a2e5547 | ||
|
|
910ebe47f5 | ||
|
|
c146880a75 | ||
|
|
77bdb7d144 | ||
|
|
530809fd74 | ||
|
|
924bbfddec | ||
|
|
f150c027e3 | ||
|
|
5c39c006c9 | ||
|
|
612aabd8c4 | ||
|
|
af05883f75 | ||
|
|
05ab9092e3 | ||
|
|
7b57ed7618 | ||
|
|
3a38271276 | ||
|
|
8d20e0a3d3 | ||
|
|
de8ed229c3 | ||
|
|
478d8c7b8e | ||
|
|
645c13592c | ||
|
|
47d601fa23 | ||
|
|
756dfba97b | ||
|
|
91ab0592af | ||
|
|
0aeb7c7802 | ||
|
|
9bdd30cdfd | ||
|
|
d35278aaf1 | ||
|
|
9492d4ebb7 | ||
|
|
ad5ca4f734 | ||
|
|
cb9d3af5c0 | ||
|
|
c969f0a9dc | ||
|
|
6d0ee4ba34 | ||
|
|
a81f52d575 | ||
|
|
1925eaf1e6 | ||
|
|
1ab3bf3e67 | ||
|
|
d7635b7148 | ||
|
|
88e17ffc50 | ||
|
|
baed440fa1 | ||
|
|
7925ddce86 | ||
|
|
6f849809c5 | ||
|
|
c16644d05e | ||
|
|
945c4191a3 | ||
|
|
136522f9c9 | ||
|
|
556fe408b3 | ||
|
|
16bb6276a5 | ||
|
|
06674a11f2 | ||
|
|
3513885f43 | ||
|
|
06652c1c39 | ||
|
|
068fc48978 | ||
|
|
aaadacf6b3 | ||
|
|
5ff547dc70 | ||
|
|
dc77c8ebce | ||
|
|
51a4c12242 | ||
|
|
4b43a66a0b | ||
|
|
34ae69989f | ||
|
|
7dc580b837 | ||
|
|
fd2c9814c9 | ||
|
|
2ba4ae8f46 | ||
|
|
93dacba228 | ||
|
|
8002ffb41f | ||
|
|
74ef5cc083 | ||
|
|
5e616d91c0 | ||
|
|
94f310c7a6 | ||
|
|
8e568bbdae | ||
|
|
e21dab49fd | ||
|
|
52cde69288 | ||
|
|
9a58e99e81 | ||
|
|
c7dee56b87 | ||
|
|
aac4b7691e | ||
|
|
f31a338cbb | ||
|
|
4cd1deeef2 | ||
|
|
9ac16ed8d1 | ||
|
|
6b3f509d9e | ||
|
|
336aa3fd48 | ||
|
|
d0d7eaa4f3 | ||
|
|
a6ebf57e82 | ||
|
|
280832cec2 | ||
|
|
a43bae9ff0 | ||
|
|
effbbf6dd1 | ||
|
|
c9a149f9e8 | ||
|
|
c530e4b9c8 | ||
|
|
f620706776 | ||
|
|
77762a5d6b | ||
|
|
14668fa54e | ||
|
|
b565ecf0a1 | ||
|
|
fe0b76854e | ||
|
|
e944311442 | ||
|
|
e3e7b52a5b | ||
|
|
974dc00a7d | ||
|
|
572d1141e6 | ||
|
|
a6190c8094 | ||
|
|
563b6d89e6 | ||
|
|
cd0a6f6027 | ||
|
|
0e664a5ebc | ||
|
|
dd7d16d2eb | ||
|
|
e285e24f7f | ||
|
|
919727b4d7 | ||
|
|
5ffefee37f | ||
|
|
d9f713e4e3 | ||
|
|
958da70376 | ||
|
|
c4e4f8115c | ||
|
|
a808bf913f | ||
|
|
01248253a3 | ||
|
|
759e8673ce | ||
|
|
0c6f928601 | ||
|
|
eea2731a5e | ||
|
|
1db46a9c72 | ||
|
|
ab5cd28acf | ||
|
|
1a82082e91 | ||
|
|
1210dc8fd5 | ||
|
|
488a67d75a | ||
|
|
71a43f8479 | ||
|
|
39619028a3 | ||
|
|
8792199799 | ||
|
|
1edc30c786 | ||
|
|
14163c15d9 | ||
|
|
41e4f6ca31 | ||
|
|
79e2a6f140 | ||
|
|
c2508987a6 | ||
|
|
215d775147 | ||
|
|
f36e227eaf | ||
|
|
5878bb1f3a | ||
|
|
a03a7d7d8b | ||
|
|
fec6bcc3e6 | ||
|
|
931e606459 | ||
|
|
7f09106437 | ||
|
|
6b50200234 | ||
|
|
16f9e28048 | ||
|
|
b9083a7fc1 | ||
|
|
aefb2fc681 | ||
|
|
b5aa8d854c | ||
|
|
4d6490bce2 | ||
|
|
b242b69e10 | ||
|
|
320beb20f4 | ||
|
|
bd3b537344 | ||
|
|
813cfa4c14 | ||
|
|
2e13ceff37 | ||
|
|
2a801b001a | ||
|
|
e44c9e0b3e | ||
|
|
55b8542de8 | ||
|
|
febe902517 | ||
|
|
f4df266842 | ||
|
|
281dc3df59 | ||
|
|
2ef4634d45 | ||
|
|
7eae90333e | ||
|
|
c8242de725 | ||
|
|
2cfe9e9b16 | ||
|
|
79a8f52181 | ||
|
|
afaa0d2c01 | ||
|
|
bfd27ba55e | ||
|
|
babf0fdb71 | ||
|
|
a52f4816b0 | ||
|
|
81911d112c | ||
|
|
52765ac588 | ||
|
|
73e9ea4069 | ||
|
|
f8d379883d | ||
|
|
04a1b77307 | ||
|
|
2097a09d2d | ||
|
|
cfff94b123 | ||
|
|
2b222de5b6 | ||
|
|
df9528f865 | ||
|
|
193c73bce0 | ||
|
|
6abfd87d44 | ||
|
|
59bb2197ed | ||
|
|
9a02e7e1ff | ||
|
|
5b33e295bd | ||
|
|
4ac9e251b7 | ||
|
|
c9c050316f | ||
|
|
ca11ae9689 | ||
|
|
328c3bce96 | ||
|
|
5cd2126439 | ||
|
|
12620f3089 | ||
|
|
4ab0c8b201 | ||
|
|
74ebbf4371 | ||
|
|
76a70fd739 | ||
|
|
618816d4df | ||
|
|
91992cb8f5 | ||
|
|
84169d15b3 | ||
|
|
ecfe8d0a1a | ||
|
|
eee44a3b47 | ||
|
|
078a43eef8 | ||
|
|
33e1890086 | ||
|
|
1c38253692 | ||
|
|
496b83f778 | ||
|
|
ff68a95781 | ||
|
|
fb3d40f197 | ||
|
|
288fd62431 | ||
|
|
3c71c8debe | ||
|
|
72bf8aafb6 | ||
|
|
8afb0fbaba |
3
.github/workflows/base.yml
vendored
3
.github/workflows/base.yml
vendored
@@ -12,6 +12,7 @@ jobs:
|
|||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
runs-on: self-hosted
|
runs-on: self-hosted
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: "118"
|
- cuda: "118"
|
||||||
@@ -25,7 +26,7 @@ jobs:
|
|||||||
pytorch: 2.0.0
|
pytorch: 2.0.0
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
- cuda: "117"
|
- cuda: "117"
|
||||||
cuda_version: 11.7.0
|
cuda_version: 11.7.1
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 1.13.1
|
pytorch: 1.13.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
|
|||||||
5
.github/workflows/main.yml
vendored
5
.github/workflows/main.yml
vendored
@@ -11,6 +11,7 @@ jobs:
|
|||||||
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
if: github.repository_owner == 'OpenAccess-AI-Collective'
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- cuda: cu118
|
- cuda: cu118
|
||||||
@@ -29,7 +30,7 @@ jobs:
|
|||||||
pytorch: 2.0.0
|
pytorch: 2.0.0
|
||||||
axolotl_extras: gptq
|
axolotl_extras: gptq
|
||||||
- cuda: cu117
|
- cuda: cu117
|
||||||
cuda_version: 11.7.0
|
cuda_version: 11.7.1
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 1.13.1
|
pytorch: 1.13.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
@@ -84,7 +85,7 @@ jobs:
|
|||||||
pytorch: 2.0.0
|
pytorch: 2.0.0
|
||||||
axolotl_extras: gptq
|
axolotl_extras: gptq
|
||||||
- cuda: cu117
|
- cuda: cu117
|
||||||
cuda_version: 11.7.0
|
cuda_version: 11.7.1
|
||||||
python_version: "3.9"
|
python_version: "3.9"
|
||||||
pytorch: 1.13.1
|
pytorch: 1.13.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
|
|||||||
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
@@ -7,6 +7,7 @@ jobs:
|
|||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.9", "3.10"]
|
python_version: ["3.9", "3.10"]
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
default_language_version:
|
default_language_version:
|
||||||
python: python3.9
|
python: python3
|
||||||
|
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
|||||||
3
FAQS.md
3
FAQS.md
@@ -2,3 +2,6 @@
|
|||||||
|
|
||||||
- Can you train StableLM with this? Yes, but only with a single GPU atm. Multi GPU support is coming soon! Just waiting on this [PR](https://github.com/huggingface/transformers/pull/22874)
|
- Can you train StableLM with this? Yes, but only with a single GPU atm. Multi GPU support is coming soon! Just waiting on this [PR](https://github.com/huggingface/transformers/pull/22874)
|
||||||
- Will this work with Deepspeed? That's still a WIP, but setting `export ACCELERATE_USE_DEEPSPEED=true` should work in some cases
|
- Will this work with Deepspeed? That's still a WIP, but setting `export ACCELERATE_USE_DEEPSPEED=true` should work in some cases
|
||||||
|
- `Error invalid argument at line 359 in file /workspace/bitsandbytes/csrc/pythonInterface.c`
|
||||||
|
`/arrow/cpp/src/arrow/filesystem/s3fs.cc:2598: arrow::fs::FinalizeS3 was not called even though S3 was initialized.`
|
||||||
|
This could lead to a segmentation fault at exit. Try reinstalling bitsandbytes and transformers from source.
|
||||||
|
|||||||
230
README.md
230
README.md
@@ -16,13 +16,14 @@
|
|||||||
|
|
||||||
## Axolotl supports
|
## Axolotl supports
|
||||||
|
|
||||||
| | fp16/fp32 | fp16/fp32 w/ lora | qlora | 4bit-quant | 4bit-quant w/flash attention | flash attention | xformers attention |
|
| | fp16/fp32 | lora | qlora | gptq | gptq w/ lora | gptq w/flash attn | flash attn | xformers attn |
|
||||||
|---------|:----------|:------------------|------|------------|------------------------------|-----------------|--------------------|
|
|----------|:----------|:-----|-------|------|:-------------|-------------------|------------|---------------|
|
||||||
| llama | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| llama | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| Pythia | ✅ | ✅ | ❓ | ❌ | ❌ | ❌ | ❓ |
|
| Pythia | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❌ | ❓ |
|
||||||
| cerebras | ✅ | ✅ | ❓ | ❌ | ❌ | ❌ | ❓ |
|
| cerebras | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❌ | ✅ |
|
||||||
| mpt | ✅ | ❌ | ❓ | ❌ | ❌ | ❌ | ❓ |
|
| mpt | ✅ | ❌ | ❓ | ❌ | ❓ | ❌ | ❌ | ❓ |
|
||||||
| falcon | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❓ |
|
| falcon | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❌ | ✅ |
|
||||||
|
| gpt-j | ✅ | ✅ | ✅ | ❌ | ❓ | ❌ | ❓ | ✅ |
|
||||||
|
|
||||||
|
|
||||||
## Quickstart ⚡
|
## Quickstart ⚡
|
||||||
@@ -33,14 +34,15 @@
|
|||||||
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
||||||
|
|
||||||
pip3 install -e .
|
pip3 install -e .
|
||||||
|
pip3 install -U git+https://github.com/huggingface/peft.git
|
||||||
|
|
||||||
accelerate config
|
accelerate config
|
||||||
|
|
||||||
# finetune lora
|
# finetune lora
|
||||||
accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml
|
accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml
|
||||||
|
|
||||||
# inference
|
# inference
|
||||||
accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml \
|
accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml \
|
||||||
--inference --lora_model_dir="./lora-out"
|
--inference --lora_model_dir="./lora-out"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -50,10 +52,17 @@ accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml \
|
|||||||
|
|
||||||
- Docker
|
- Docker
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus '"all"' --rm -it winglian/axolotl:main
|
docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.9-cu118-2.0.0
|
||||||
|
```
|
||||||
|
- `winglian/axolotl-runpod:main-py3.9-cu118-2.0.0`: for runpod
|
||||||
|
- `winglian/axolotl-runpod:main-py3.9-cu118-2.0.0-gptq`: for gptq
|
||||||
|
- `winglian/axolotl:dev`: dev branch (not usually up to date)
|
||||||
|
|
||||||
|
Or run on the current files for development:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker compose up -d
|
||||||
```
|
```
|
||||||
- `winglian/axolotl:dev`: dev branch
|
|
||||||
- `winglian/axolotl-runpod:main`: for runpod
|
|
||||||
|
|
||||||
- Conda/Pip venv
|
- Conda/Pip venv
|
||||||
1. Install python **3.9**
|
1. Install python **3.9**
|
||||||
@@ -61,9 +70,65 @@ accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml \
|
|||||||
2. Install pytorch stable https://pytorch.org/get-started/locally/
|
2. Install pytorch stable https://pytorch.org/get-started/locally/
|
||||||
|
|
||||||
3. Install python dependencies with ONE of the following:
|
3. Install python dependencies with ONE of the following:
|
||||||
- `pip3 install -e .` (recommended, supports QLoRA, no gptq/int4 support)
|
- Recommended, supports QLoRA, NO gptq/int4 support
|
||||||
- `pip3 install -e .[gptq]` (next best if you don't need QLoRA, but want to use gptq)
|
```bash
|
||||||
- `pip3 install -e .[gptq_triton]`
|
pip3 install -e .
|
||||||
|
pip3 install -U git+https://github.com/huggingface/peft.git
|
||||||
|
```
|
||||||
|
- gptq/int4 support, NO QLoRA
|
||||||
|
```bash
|
||||||
|
pip3 install -e .[gptq]
|
||||||
|
```
|
||||||
|
- same as above but not recommended
|
||||||
|
```bash
|
||||||
|
pip3 install -e .[gptq_triton]
|
||||||
|
```
|
||||||
|
|
||||||
|
- LambdaLabs
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>Click to Expand</summary>
|
||||||
|
|
||||||
|
1. Install python
|
||||||
|
```bash
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y python3.9
|
||||||
|
|
||||||
|
sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1
|
||||||
|
sudo update-alternatives --config python # pick 3.9 if given option
|
||||||
|
python -V # should be 3.9
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install pip
|
||||||
|
```bash
|
||||||
|
wget https://bootstrap.pypa.io/get-pip.py
|
||||||
|
python get-pip.py
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Install torch
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Axolotl
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
||||||
|
cd axolotl
|
||||||
|
|
||||||
|
pip3 install -e . # change depend on needs
|
||||||
|
pip3 install protobuf==3.20.3
|
||||||
|
pip3 install -U requests
|
||||||
|
pip3 install -U --ignore-installed psutil
|
||||||
|
pip3 install -U scipy
|
||||||
|
pip3 install git+https://github.com/huggingface/peft.git # not for gptq
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Set path
|
||||||
|
```bash
|
||||||
|
export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
### Dataset
|
### Dataset
|
||||||
|
|
||||||
@@ -73,7 +138,7 @@ Have dataset(s) in one of the following format (JSONL recommended):
|
|||||||
```json
|
```json
|
||||||
{"instruction": "...", "input": "...", "output": "..."}
|
{"instruction": "...", "input": "...", "output": "..."}
|
||||||
```
|
```
|
||||||
- `sharegpt`: conversations
|
- `sharegpt:chat`: conversations
|
||||||
```json
|
```json
|
||||||
{"conversations": [{"from": "...", "value": "..."}]}
|
{"conversations": [{"from": "...", "value": "..."}]}
|
||||||
```
|
```
|
||||||
@@ -114,13 +179,70 @@ Have dataset(s) in one of the following format (JSONL recommended):
|
|||||||
```json
|
```json
|
||||||
{"article": "...", "summary": "..."}
|
{"article": "...", "summary": "..."}
|
||||||
```
|
```
|
||||||
|
- `alpaca_chat`: basic instruct for alpaca chat
|
||||||
> Have some new format to propose? Check if it's already defined in [data.py](src/axolotl/utils/data.py) in `dev` branch!
|
```json
|
||||||
|
{"instruction": "...", "input": "...", "response": "..."}
|
||||||
|
```
|
||||||
|
- `alpaca_chat.load_qa`: question and answer for alpaca chat
|
||||||
|
```json
|
||||||
|
{"question": "...", "answer": "..."}
|
||||||
|
```
|
||||||
|
- `alpaca_chat.load_concise`: question and answer for alpaca chat, for concise answers
|
||||||
|
```json
|
||||||
|
{"instruction": "...", "input": "...", "response": "..."}
|
||||||
|
```
|
||||||
|
- `alpaca_chat.load_camel_ai`: question and answer for alpaca chat, for load_camel_ai
|
||||||
|
```json
|
||||||
|
{"message_1": "...", "message_2": "..."}
|
||||||
|
```
|
||||||
|
- `alpaca_w_system.load_open_orca`: support for open orca datasets with included system prompts, instruct
|
||||||
|
```json
|
||||||
|
{"system_prompt": "...", "question": "...", "response": "..."}
|
||||||
|
```
|
||||||
|
- `context_qa`: in context question answering from an article
|
||||||
|
```json
|
||||||
|
{"article": "...", "question": "...", "answer": "..."}
|
||||||
|
```
|
||||||
|
- `context_qa.load_404`: in context question answering from an article, with default response for no answer from context
|
||||||
|
```json
|
||||||
|
{"article": "...", "unanswerable_question": "..."}
|
||||||
|
```
|
||||||
|
- `creative_acr.load_answer`: instruction and revision
|
||||||
|
```json
|
||||||
|
{"instruction": "...", "revision": "..."}
|
||||||
|
```
|
||||||
|
- `creative_acr.load_critique`: critique
|
||||||
|
```json
|
||||||
|
{"scores": "...", "critiques": "...", "instruction": "...", "answer": "..."}
|
||||||
|
```
|
||||||
|
- `creative_acr.load_revise`: critique and revise
|
||||||
|
```json
|
||||||
|
{"scores": "...", "critiques": "...", "instruction": "...", "answer": "...", "revision": "..."}
|
||||||
|
```
|
||||||
|
- `pygmalion`: pygmalion
|
||||||
|
```json
|
||||||
|
{"conversations": [{"role": "...", "value": "..."}]}
|
||||||
|
```
|
||||||
|
- `sharegpt_simple.load_role`: conversations where `role` is used instead of `from`
|
||||||
|
```json
|
||||||
|
{"conversations": [{"role": "...", "value": "..."}]}
|
||||||
|
```
|
||||||
|
- `sharegpt_jokes`: creates a chat where bot is asked to tell a joke, then explain why the joke is funny
|
||||||
|
```json
|
||||||
|
{"conversations": [{"title": "...", "text": "...", "explanation": "..."}]}
|
||||||
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
#### How to add custom prompts
|
||||||
|
|
||||||
|
1. Add your method to a file in [prompt_strategies](src/axolotl/prompt_strategies). Please see other files as example.
|
||||||
|
2. Use your custom file name as the dataset type `<prompt_strategies_file>.load_<load_fn>`.
|
||||||
|
|
||||||
Optionally, download some datasets, see [data/README.md](data/README.md)
|
Optionally, download some datasets, see [data/README.md](data/README.md)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Config
|
### Config
|
||||||
|
|
||||||
See sample configs in [configs](configs) folder or [examples](examples) for quick start. It is recommended to duplicate and modify to your needs. The most important options are:
|
See sample configs in [configs](configs) folder or [examples](examples) for quick start. It is recommended to duplicate and modify to your needs. The most important options are:
|
||||||
@@ -133,10 +255,18 @@ See sample configs in [configs](configs) folder or [examples](examples) for quic
|
|||||||
|
|
||||||
- dataset
|
- dataset
|
||||||
```yaml
|
```yaml
|
||||||
|
sequence_len: 2048 # max token length for prompt
|
||||||
|
|
||||||
|
# huggingface repo
|
||||||
datasets:
|
datasets:
|
||||||
- path: vicgalle/alpaca-gpt4 # local or huggingface repo
|
- path: vicgalle/alpaca-gpt4
|
||||||
|
type: alpaca # format from earlier
|
||||||
|
|
||||||
|
# local
|
||||||
|
datasets:
|
||||||
|
- path: json
|
||||||
|
data_files: data.jsonl # or json
|
||||||
type: alpaca # format from earlier
|
type: alpaca # format from earlier
|
||||||
sequence_len: 2048 # max token length / prompt
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- loading
|
- loading
|
||||||
@@ -146,6 +276,8 @@ See sample configs in [configs](configs) folder or [examples](examples) for quic
|
|||||||
bf16: true # require >=ampere
|
bf16: true # require >=ampere
|
||||||
fp16: true
|
fp16: true
|
||||||
tf32: true # require >=ampere
|
tf32: true # require >=ampere
|
||||||
|
bfloat16: true # require >=ampere, use instead of bf16 when you don't want AMP (automatic mixed precision)
|
||||||
|
float16: true # use instead of fp16 when you don't want AMP
|
||||||
```
|
```
|
||||||
Note: Repo does not do 4-bit quantization.
|
Note: Repo does not do 4-bit quantization.
|
||||||
|
|
||||||
@@ -173,6 +305,8 @@ base_model_ignore_patterns:
|
|||||||
# if the base_model repo on hf hub doesn't include configuration .json files,
|
# if the base_model repo on hf hub doesn't include configuration .json files,
|
||||||
# you can set that here, or leave this empty to default to base_model
|
# you can set that here, or leave this empty to default to base_model
|
||||||
base_model_config: ./llama-7b-hf
|
base_model_config: ./llama-7b-hf
|
||||||
|
# you can specify to choose a specific model revision from huggingface hub
|
||||||
|
model_revision:
|
||||||
# Optional tokenizer configuration override in case you want to use a different tokenizer
|
# Optional tokenizer configuration override in case you want to use a different tokenizer
|
||||||
# than the one defined in the base model
|
# than the one defined in the base model
|
||||||
tokenizer_config:
|
tokenizer_config:
|
||||||
@@ -182,6 +316,8 @@ model_type: AutoModelForCausalLM
|
|||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
# Trust remote code for untrusted source
|
# Trust remote code for untrusted source
|
||||||
trust_remote_code:
|
trust_remote_code:
|
||||||
|
# use_fast option for tokenizer loading from_pretrained, default to True
|
||||||
|
tokenizer_use_fast:
|
||||||
|
|
||||||
# whether you are training a 4-bit GPTQ quantized model
|
# whether you are training a 4-bit GPTQ quantized model
|
||||||
gptq: true
|
gptq: true
|
||||||
@@ -202,10 +338,10 @@ tf32: true # require >=ampere
|
|||||||
|
|
||||||
# a list of one or more datasets to finetune the model with
|
# a list of one or more datasets to finetune the model with
|
||||||
datasets:
|
datasets:
|
||||||
# this can be either a hf dataset, or relative path
|
# hf dataset repo | "json" for local dataset, make sure to fill data_files
|
||||||
- path: vicgalle/alpaca-gpt4
|
- path: vicgalle/alpaca-gpt4
|
||||||
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
||||||
type: alpaca # format OR format:prompt_style (chat/instruct)
|
type: alpaca # format | format:<prompt_style> (chat/instruct) | <prompt_strategies>.load_<load_fn>
|
||||||
data_files: # path to source data files
|
data_files: # path to source data files
|
||||||
shards: # number of shards to split data into
|
shards: # number of shards to split data into
|
||||||
|
|
||||||
@@ -214,6 +350,8 @@ datasets:
|
|||||||
dataset_prepared_path: data/last_run_prepared
|
dataset_prepared_path: data/last_run_prepared
|
||||||
# push prepared dataset to hub
|
# push prepared dataset to hub
|
||||||
push_dataset_to_hub: # repo path
|
push_dataset_to_hub: # repo path
|
||||||
|
# push checkpoints to hub
|
||||||
|
hub_model_id: # repo path
|
||||||
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
# whether to use hf `use_auth_token` for loading datasets. Useful for fetching private datasets
|
||||||
# required to be true when used in combination with `push_dataset_to_hub`
|
# required to be true when used in combination with `push_dataset_to_hub`
|
||||||
hf_use_auth_token: # boolean
|
hf_use_auth_token: # boolean
|
||||||
@@ -272,13 +410,18 @@ num_epochs: 3
|
|||||||
warmup_steps: 100
|
warmup_steps: 100
|
||||||
learning_rate: 0.00003
|
learning_rate: 0.00003
|
||||||
logging_steps:
|
logging_steps:
|
||||||
|
save_steps:
|
||||||
|
eval_steps:
|
||||||
|
|
||||||
|
# save model as safetensors (require safetensors package)
|
||||||
|
save_safetensors:
|
||||||
|
|
||||||
# whether to mask out or include the human's prompt from the training labels
|
# whether to mask out or include the human's prompt from the training labels
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
# don't use this, leads to wonky training (according to someone on the internet)
|
# don't use this, leads to wonky training (according to someone on the internet)
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
|
|
||||||
# does not work with current implementation of 4-bit LoRA
|
# Whether to use gradient checkpointing https://huggingface.co/docs/transformers/v4.18.0/en/performance#gradient-checkpointing
|
||||||
gradient_checkpointing: false
|
gradient_checkpointing: false
|
||||||
|
|
||||||
# stop training after this many evaluation losses have increased in a row
|
# stop training after this many evaluation losses have increased in a row
|
||||||
@@ -300,7 +443,15 @@ log_sweep_max_lr:
|
|||||||
optimizer:
|
optimizer:
|
||||||
# specify weight decay
|
# specify weight decay
|
||||||
weight_decay:
|
weight_decay:
|
||||||
|
# adamw hyperparams
|
||||||
|
adam_beta1:
|
||||||
|
adam_beta2:
|
||||||
|
adam_epsilon:
|
||||||
|
# Gradient clipping max norm
|
||||||
|
max_grad_norm:
|
||||||
|
|
||||||
|
# whether to bettertransformers
|
||||||
|
flash_optimum:
|
||||||
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
||||||
xformers_attention:
|
xformers_attention:
|
||||||
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
|
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
|
||||||
@@ -308,6 +459,11 @@ flash_attention: # require a100 for llama
|
|||||||
# whether to use scaled-dot-product attention
|
# whether to use scaled-dot-product attention
|
||||||
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
# https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html
|
||||||
sdp_attention:
|
sdp_attention:
|
||||||
|
# Landmark attention (only llama)
|
||||||
|
landmark_attention:
|
||||||
|
# xpos RoPE see https://github.com/kaiokendev/cutoff-len-is-context-len/blob/main/util/xpos_rope_llama_monkey_patch.py
|
||||||
|
# llama only
|
||||||
|
xpos_rope:
|
||||||
|
|
||||||
# resume from a specific checkpoint dir
|
# resume from a specific checkpoint dir
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
@@ -375,11 +531,16 @@ Pass the appropriate flag to the train command:
|
|||||||
|
|
||||||
- Pretrained LORA:
|
- Pretrained LORA:
|
||||||
```bash
|
```bash
|
||||||
--inference --lora_model_dir ./completed-model
|
--inference --lora_model_dir="./lora-output-dir"
|
||||||
```
|
```
|
||||||
- Full weights finetune:
|
- Full weights finetune:
|
||||||
```bash
|
```bash
|
||||||
--inference --base_model ./completed-model
|
--inference --base_model="./completed-model"
|
||||||
|
```
|
||||||
|
- Full weights finetune w/ a prompt from a text file:
|
||||||
|
```bash
|
||||||
|
cat /tmp/prompt.txt | python scripts/finetune.py configs/your_config.yml \
|
||||||
|
--base_model="./completed-model" --inference --prompter=None --load_in_8bit=True
|
||||||
```
|
```
|
||||||
|
|
||||||
### Merge LORA to base
|
### Merge LORA to base
|
||||||
@@ -390,6 +551,12 @@ Add below flag to train command above
|
|||||||
--merge_lora --lora_model_dir="./completed-model" --load_in_8bit=False --load_in_4bit=False
|
--merge_lora --lora_model_dir="./completed-model" --load_in_8bit=False --load_in_4bit=False
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you run out of CUDA memory, you can try to merge in system RAM with
|
||||||
|
|
||||||
|
```bash
|
||||||
|
CUDA_VISIBLE_DEVICES="" python3 scripts/finetune.py ...
|
||||||
|
```
|
||||||
|
|
||||||
## Common Errors 🧰
|
## Common Errors 🧰
|
||||||
|
|
||||||
> Cuda out of memory
|
> Cuda out of memory
|
||||||
@@ -397,6 +564,7 @@ Add below flag to train command above
|
|||||||
Please reduce any below
|
Please reduce any below
|
||||||
- `micro_batch_size`
|
- `micro_batch_size`
|
||||||
- `eval_batch_size`
|
- `eval_batch_size`
|
||||||
|
- `gradient_accumulation_steps`
|
||||||
- `sequence_len`
|
- `sequence_len`
|
||||||
|
|
||||||
> RuntimeError: expected scalar type Float but found Half
|
> RuntimeError: expected scalar type Float but found Half
|
||||||
@@ -407,7 +575,7 @@ Try set `fp16: true`
|
|||||||
|
|
||||||
Try to turn off xformers.
|
Try to turn off xformers.
|
||||||
|
|
||||||
## Need help? 🙋♂️
|
## Need help? 🙋♂️
|
||||||
|
|
||||||
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
|
Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you
|
||||||
|
|
||||||
@@ -421,6 +589,16 @@ Building something cool with Axolotl? Consider adding a badge to your model card
|
|||||||
|
|
||||||
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
||||||
|
|
||||||
|
## Community Showcase
|
||||||
|
|
||||||
|
Open Access AI Collective
|
||||||
|
- [Minotaur 13b](https://huggingface.co/openaccess-ai-collective/minotaur-13b)
|
||||||
|
- [Manticore 13b](https://huggingface.co/openaccess-ai-collective/manticore-13b)
|
||||||
|
- [Hippogriff 30b](https://huggingface.co/openaccess-ai-collective/hippogriff-30b-chat)
|
||||||
|
|
||||||
|
PocketDoc Labs
|
||||||
|
- [Dan's PersonalityEngine 13b LoRA](https://huggingface.co/PocketDoc/Dans-PersonalityEngine-13b-LoRA)
|
||||||
|
|
||||||
## Contributing 🤝
|
## Contributing 🤝
|
||||||
|
|
||||||
Bugs? Please check for open issue else create a new [Issue](https://github.com/OpenAccess-AI-Collective/axolotl/issues/new).
|
Bugs? Please check for open issue else create a new [Issue](https://github.com/OpenAccess-AI-Collective/axolotl/issues/new).
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
compute_environment: LOCAL_MACHINE
|
|
||||||
distributed_type: 'NO'
|
|
||||||
downcast_bf16: 'no'
|
|
||||||
gpu_ids: all
|
|
||||||
machine_rank: 0
|
|
||||||
main_training_function: main
|
|
||||||
mixed_precision: bf16
|
|
||||||
num_machines: 1
|
|
||||||
num_processes: 1
|
|
||||||
rdzv_backend: static
|
|
||||||
same_network: true
|
|
||||||
tpu_env: []
|
|
||||||
tpu_use_cluster: false
|
|
||||||
tpu_use_sudo: false
|
|
||||||
use_cpu: false
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
base_model: cerebras/Cerebras-GPT-1.3B
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
load_in_8bit: true
|
|
||||||
datasets:
|
|
||||||
- path: data/alpaca_data_gpt4.jsonl
|
|
||||||
type: alpaca
|
|
||||||
- path: data/vicuna_cleaned.jsonl
|
|
||||||
type: sharegpt
|
|
||||||
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.05
|
|
||||||
adapter: lora
|
|
||||||
sequence_len: 2048
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- c_attn
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project: pythia-1.4b-lora
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-alpaca
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 4
|
|
||||||
num_epochs: 5
|
|
||||||
learning_rate: 0.0003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: True
|
|
||||||
tf32: True
|
|
||||||
gradient_checkpointing:
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
base_model: facebook/galactica-1.3b
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
load_in_8bit: false
|
|
||||||
datasets:
|
|
||||||
- path: tatsu-lab/alpaca
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.1
|
|
||||||
adapter:
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 1024
|
|
||||||
max_packed_sequence_len: 1024
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-llama-alpaca
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 16
|
|
||||||
num_epochs: 3
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: false
|
|
||||||
tf32: false
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
tokens:
|
|
||||||
pad_token: "[PAD]"
|
|
||||||
bos_token: "<s>"
|
|
||||||
eos_token: "</s>"
|
|
||||||
unk_token: "<unk>"
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
base_model: huggyllama/llama-13b
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
load_in_8bit: true
|
|
||||||
datasets:
|
|
||||||
- path: anon8231489123/ShareGPT_Vicuna_unfiltered
|
|
||||||
data_files: ShareGPT_V3_unfiltered_cleaned_split_no_imsorry.json
|
|
||||||
type: sharegpt
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.002
|
|
||||||
adapter:
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 2048
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./llama-13b-sharegpt
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 2
|
|
||||||
warmup_steps: 1000
|
|
||||||
save_steps:
|
|
||||||
eval_steps:
|
|
||||||
num_epochs: 5
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
early_stopping_patience: 5
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
base_model: huggyllama/llama-65b
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
load_in_8bit: true
|
|
||||||
datasets:
|
|
||||||
- path: data/alpaca_data_gpt4.jsonl
|
|
||||||
type: alpaca
|
|
||||||
- path: anon8231489123/ShareGPT_Vicuna_unfiltered
|
|
||||||
data_files: ShareGPT_V3_unfiltered_cleaned_split_no_imsorry.json
|
|
||||||
type: sharegpt
|
|
||||||
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.04
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 2048
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project: llama-65b-lora
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-llama-alpaca
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 16
|
|
||||||
warmup_steps: 1000
|
|
||||||
save_steps:
|
|
||||||
num_epochs: 5
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
base_model: decapoda-research/llama-7b-hf-int4
|
|
||||||
base_model_config: decapoda-research/llama-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
load_in_8bit: true
|
|
||||||
datasets:
|
|
||||||
- path: tatsu-lab/alpaca # original alpaca dataset
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: data/last_run_prepared
|
|
||||||
val_set_size: 0.04
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 2048
|
|
||||||
max_packed_sequence_len: 1024
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
# - k_proj
|
|
||||||
# - o_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-test
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 2
|
|
||||||
num_epochs: 3
|
|
||||||
warmup_steps: 100
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
gradient_checkpointing: false
|
|
||||||
early_stopping_patience: 3
|
|
||||||
resume_from_checkpoint:
|
|
||||||
auto_resume_from_checkpoints: true
|
|
||||||
local_rank:
|
|
||||||
load_4bit: true
|
|
||||||
xformers_attention: true
|
|
||||||
flash_attention:
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
base_model: huggyllama/llama-7b
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
load_in_8bit: true
|
|
||||||
datasets:
|
|
||||||
- path: data/alpaca_data_gpt4.jsonl
|
|
||||||
type: alpaca
|
|
||||||
- path: data/vicuna_cleaned.jsonl
|
|
||||||
type: sharegpt
|
|
||||||
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.04
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 2048
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project: llama-7b-lora
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-llama-alpaca
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 16
|
|
||||||
num_epochs: 5
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
base_model: decapoda-research/llama-7b-hf-int4
|
|
||||||
base_model_config: decapoda-research/llama-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
load_in_8bit: true
|
|
||||||
datasets:
|
|
||||||
- path: tatsu-lab/alpaca # original alpaca dataset
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: data/last_run_prepared
|
|
||||||
val_set_size: 0.04
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 1024
|
|
||||||
max_packed_sequence_len: 1024
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
# - k_proj
|
|
||||||
# - o_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-test
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 3
|
|
||||||
warmup_steps: 100
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
gradient_checkpointing: false
|
|
||||||
early_stopping_patience: 3
|
|
||||||
resume_from_checkpoint:
|
|
||||||
auto_resume_from_checkpoints: true
|
|
||||||
local_rank:
|
|
||||||
gptq: true
|
|
||||||
xformers_attention: true
|
|
||||||
flash_attention:
|
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
# this is the huggingface model that contains *.pt, *.safetensors, or *.bin files
|
|
||||||
# this can also be a relative path to a model on disk
|
|
||||||
base_model: decapoda-research/llama-7b-hf-int4
|
|
||||||
# you can specify an ignore pattern if the model repo contains more than 1 model type (*.pt, etc)
|
|
||||||
base_model_ignore_patterns:
|
|
||||||
# if the base_model repo on hf hub doesn't include configuration .json files,
|
|
||||||
# you can set that here, or leave this empty to default to base_model
|
|
||||||
base_model_config: decapoda-research/llama-7b-hf
|
|
||||||
# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
|
|
||||||
model_type: AutoModelForCausalLM
|
|
||||||
# Corresponding tokenizer for the model AutoTokenizer is a good choice
|
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
# whether you are training a 4-bit quantized model
|
|
||||||
load_4bit: true
|
|
||||||
# this will attempt to quantize the model down to 8 bits and use adam 8 bit optimizer
|
|
||||||
load_in_8bit: true
|
|
||||||
# a list of one or more datasets to finetune the model with
|
|
||||||
datasets:
|
|
||||||
# this can be either a hf dataset, or relative path
|
|
||||||
- path: vicgalle/alpaca-gpt4
|
|
||||||
# The type of prompt to use for training. [alpaca, sharegpt, gpteacher, oasst, reflection]
|
|
||||||
type: alpaca
|
|
||||||
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
|
||||||
# subsequent training attempts load faster, relative path
|
|
||||||
dataset_prepared_path: data/last_run_prepared
|
|
||||||
# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc
|
|
||||||
val_set_size: 0.04
|
|
||||||
# if you want to use lora, leave blank to train all parameters in original model
|
|
||||||
adapter: lora
|
|
||||||
# if you already have a lora model trained that you want to load, put that here
|
|
||||||
lora_model_dir:
|
|
||||||
# the maximum length of an input to train with, this should typically be less than 2048
|
|
||||||
# as most models have a token/context limit of 2048
|
|
||||||
sequence_len: 2048
|
|
||||||
# max sequence length to concatenate training samples together up to
|
|
||||||
# inspired by StackLLaMA. see https://huggingface.co/blog/stackllama#supervised-fine-tuning
|
|
||||||
max_packed_sequence_len: 1024
|
|
||||||
# lora hyperparameters
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
# - k_proj
|
|
||||||
# - o_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
# wandb configuration if your're using it
|
|
||||||
wandb_project:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
# where to save the finsihed model to
|
|
||||||
output_dir: ./completed-model
|
|
||||||
# training hyperparameters
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
batch_size:
|
|
||||||
micro_batch_size: 2
|
|
||||||
num_epochs: 3
|
|
||||||
warmup_steps: 100
|
|
||||||
learning_rate: 0.00003
|
|
||||||
# whether to mask out or include the human's prompt from the training labels
|
|
||||||
train_on_inputs: false
|
|
||||||
# don't use this, leads to wonky training (according to someone on the internet)
|
|
||||||
group_by_length: false
|
|
||||||
# Use CUDA bf16
|
|
||||||
bf16: true
|
|
||||||
# Use CUDA tf32
|
|
||||||
tf32: true
|
|
||||||
# does not work with current implementation of 4-bit LoRA
|
|
||||||
gradient_checkpointing: false
|
|
||||||
# stop training after this many evaluation losses have increased in a row
|
|
||||||
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
|
||||||
early_stopping_patience: 3
|
|
||||||
# specify a scheduler to use with the optimizer. only one_cycle is supported currently
|
|
||||||
lr_scheduler:
|
|
||||||
# whether to use xformers attention patch https://github.com/facebookresearch/xformers:
|
|
||||||
xformers_attention:
|
|
||||||
# whether to use flash attention patch https://github.com/HazyResearch/flash-attention:
|
|
||||||
flash_attention:
|
|
||||||
# resume from a specific checkpoint dir
|
|
||||||
resume_from_checkpoint:
|
|
||||||
# if resume_from_checkpoint isn't set and you simply want it to start where it left off
|
|
||||||
# be careful with this being turned on between different models
|
|
||||||
auto_resume_from_checkpoints: false
|
|
||||||
# don't mess with this, it's here for accelerate and torchrun
|
|
||||||
local_rank:
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
base_model: stabilityai/stablelm-base-alpha-3b
|
|
||||||
base_model_config: stabilityai/stablelm-base-alpha-3b
|
|
||||||
load_in_8bit: false
|
|
||||||
datasets:
|
|
||||||
- path: vicgalle/alpaca-gpt4
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.04
|
|
||||||
adapter:
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 4096
|
|
||||||
max_packed_sequence_len: 4096
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project: stable-alpaca-3b
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./stable-alpaca-3b
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 1
|
|
||||||
num_epochs: 1
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
torchdistx_path:
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0000002
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention: true
|
|
||||||
flash_attention:
|
|
||||||
gptq_groupsize:
|
|
||||||
gptq_model_v1:
|
|
||||||
warmup_steps: 100
|
|
||||||
eval_steps: 50
|
|
||||||
save_steps: 200
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.01
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
#tokens:
|
|
||||||
# pad_token: "[PAD]"
|
|
||||||
# bos_token: "<s>"
|
|
||||||
# eos_token: "</s>"
|
|
||||||
# unk_token: "<unk>"
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
base_model: anon8231489123/vicuna-13b-GPTQ-4bit-128g
|
|
||||||
base_model_config: anon8231489123/vicuna-13b-GPTQ-4bit-128g
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
load_in_8bit: false
|
|
||||||
load_4bit: true
|
|
||||||
gptq_groupsize: 128
|
|
||||||
gptq_model_v1: false
|
|
||||||
datasets:
|
|
||||||
# https://github.com/vaguenebula/AlpacaDataReflect/blob/main/alpaca_reflect_pruned.json
|
|
||||||
- path: data/alpaca_reflect_pruned.jsonl
|
|
||||||
type: reflection
|
|
||||||
dataset_prepared_path: data/last_run_prepared
|
|
||||||
val_set_size: 0.04
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
sequence_len: 2048
|
|
||||||
max_packed_sequence_len: 2048
|
|
||||||
lora_r: 8
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_modules:
|
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
# - k_proj
|
|
||||||
# - o_proj
|
|
||||||
lora_fan_in_fan_out: false
|
|
||||||
wandb_project:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_run_id:
|
|
||||||
wandb_log_model:
|
|
||||||
output_dir: ./lora-reflect
|
|
||||||
gradient_accumulation_steps: 1
|
|
||||||
micro_batch_size: 2
|
|
||||||
num_epochs: 3
|
|
||||||
learning_rate: 0.00003
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: true
|
|
||||||
tf32: true
|
|
||||||
gradient_checkpointing: false
|
|
||||||
early_stopping_patience: 3
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
flash_attention: true
|
|
||||||
@@ -10,10 +10,10 @@ curl https://github.com/teknium1/GPTeacher/blob/main/Roleplay/roleplay-similarit
|
|||||||
## Convert the JSON data files to JSONL.
|
## Convert the JSON data files to JSONL.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/alpaca_data_gpt4.json > data/alpaca_data_gpt4.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/alpaca_data_gpt4.json --output data/alpaca_data_gpt4.jsonl
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/vicuna_cleaned.json > data/vicuna_cleaned.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/raw/vicuna_cleaned.json --output data/vicuna_cleaned.jsonl
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/roleplay-similarity_0.6-instruct-dataset.json > data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/raw/roleplay-similarity_0.6-instruct-dataset.json --output data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
||||||
python3 ./scripts/alpaca_json_to_jsonl.py --input data/raw/gpt4-instruct-similarity-0.6-dataset.json > data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
python3 ./scripts/alpaca_json_to_jsonl.py --file data/raw/gpt4-instruct-similarity-0.6-dataset.json --output data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
20
docker-compose.yaml
Normal file
20
docker-compose.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# version: '3.8'
|
||||||
|
services:
|
||||||
|
axolotl:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./docker/Dockerfile
|
||||||
|
volumes:
|
||||||
|
- .:/workspace/axolotl
|
||||||
|
- ~/.cache/huggingface/:/root/.cache/huggingface/
|
||||||
|
# set environment variables
|
||||||
|
environment:
|
||||||
|
- WANDB_API_KEY=${WANDB_API_KEY}
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
# count: 1
|
||||||
|
capabilities: [gpu]
|
||||||
|
command: tail -f /dev/null
|
||||||
@@ -13,8 +13,7 @@ RUN pip3 install --force-reinstall "peft @ git+https://github.com/huggingface/pe
|
|||||||
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
"accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
|
||||||
"transformers @ git+https://github.com/huggingface/transformers.git@main"
|
"transformers @ git+https://github.com/huggingface/transformers.git@main"
|
||||||
|
|
||||||
RUN mkdir axolotl
|
RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
|
||||||
COPY . axolotl/
|
|
||||||
# If AXOLOTL_EXTRAS is set, append it in brackets
|
# If AXOLOTL_EXTRAS is set, append it in brackets
|
||||||
RUN cd axolotl && \
|
RUN cd axolotl && \
|
||||||
if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ FROM base-builder
|
|||||||
RUN python3 -m pip uninstall -y apex
|
RUN python3 -m pip uninstall -y apex
|
||||||
RUN git clone https://github.com/NVIDIA/apex
|
RUN git clone https://github.com/NVIDIA/apex
|
||||||
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
|
# `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
|
||||||
RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check .
|
RUN cd apex && MAX_JOBS=1 python3 -m pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./
|
||||||
|
|
||||||
RUN mkdir -p /workspace/builds
|
RUN mkdir -p /workspace/builds
|
||||||
COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
|
COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
|
||||||
@@ -97,4 +97,4 @@ RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
|
|||||||
RUN git lfs install --skip-repo
|
RUN git lfs install --skip-repo
|
||||||
RUN pip3 install awscli && \
|
RUN pip3 install awscli && \
|
||||||
# The base image ships with `pydantic==1.8.2` which is not working
|
# The base image ships with `pydantic==1.8.2` which is not working
|
||||||
pip3 install -U --no-cache-dir pydantic
|
pip3 install -U --no-cache-dir pydantic==1.10.10
|
||||||
|
|||||||
60
examples/cerebras/qlora.yml
Normal file
60
examples/cerebras/qlora.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
base_model: cerebras/Cerebras-GPT-1.3B
|
||||||
|
base_model_config: cerebras/Cerebras-GPT-1.3B
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
push_dataset_to_hub:
|
||||||
|
datasets:
|
||||||
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 2048
|
||||||
|
max_packed_sequence_len: 2048
|
||||||
|
lora_r: 16
|
||||||
|
lora_alpha: 32
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_modules:
|
||||||
|
- c_fc
|
||||||
|
- c_attn
|
||||||
|
- c_proj
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
batch_size: 4
|
||||||
|
micro_batch_size: 4
|
||||||
|
num_epochs: 2
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
torchdistx_path:
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: true
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: true
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
gptq_groupsize:
|
||||||
|
gptq_model_v1:
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 20
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
@@ -23,7 +23,7 @@ lora_dropout: 0.0
|
|||||||
lora_target_modules:
|
lora_target_modules:
|
||||||
lora_target_linear: true
|
lora_target_linear: true
|
||||||
lora_fan_in_fan_out:
|
lora_fan_in_fan_out:
|
||||||
wandb_project: falcon-7b
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|||||||
92
examples/falcon/config-7b-qlora.yml
Normal file
92
examples/falcon/config-7b-qlora.yml
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# 1b: tiiuae/falcon-rw-1b
|
||||||
|
# 40b: tiiuae/falcon-40b
|
||||||
|
base_model: tiiuae/falcon-7b
|
||||||
|
base_model_config: tiiuae/falcon-7b
|
||||||
|
# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main
|
||||||
|
trust_remote_code: true
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
load_in_8bit: false
|
||||||
|
# enable 4bit for QLoRA
|
||||||
|
load_in_4bit: true
|
||||||
|
gptq: false
|
||||||
|
strict: false
|
||||||
|
push_dataset_to_hub:
|
||||||
|
datasets:
|
||||||
|
- path: QingyiSi/Alpaca-CoT
|
||||||
|
data_files:
|
||||||
|
- Chain-of-Thought/formatted_cot_data/gsm8k_train.json
|
||||||
|
type: "alpaca:chat"
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
# enable QLoRA
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 2048
|
||||||
|
max_packed_sequence_len:
|
||||||
|
|
||||||
|
# hyperparameters from QLoRA paper Appendix B.2
|
||||||
|
# "We find hyperparameters to be largely robust across datasets"
|
||||||
|
lora_r: 64
|
||||||
|
lora_alpha: 16
|
||||||
|
# 0.1 for models up to 13B
|
||||||
|
# 0.05 for 33B and 65B models
|
||||||
|
lora_dropout: 0.05
|
||||||
|
# add LoRA modules on all linear layers of the base model
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
# QLoRA paper Table 9
|
||||||
|
# - 16 for 7b & 13b
|
||||||
|
# - 32 for 33b, 64 for 64b
|
||||||
|
# Max size tested on A6000
|
||||||
|
# - 7b: 40
|
||||||
|
# - 40b: 4
|
||||||
|
# decrease if OOM, increase for max VRAM utilization
|
||||||
|
micro_batch_size: 1
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
num_epochs: 3
|
||||||
|
# Optimizer for QLoRA
|
||||||
|
optimizer: paged_adamw_32bit
|
||||||
|
torchdistx_path:
|
||||||
|
lr_scheduler: cosine
|
||||||
|
# QLoRA paper Table 9
|
||||||
|
# - 2e-4 for 7b & 13b
|
||||||
|
# - 1e-4 for 33b & 64b
|
||||||
|
learning_rate: 0.0002
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: true
|
||||||
|
gradient_checkpointing: true
|
||||||
|
# stop training after this many evaluation losses have increased in a row
|
||||||
|
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
||||||
|
early_stopping_patience: 3
|
||||||
|
resume_from_checkpoint:
|
||||||
|
auto_resume_from_checkpoints: true
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
gptq_groupsize:
|
||||||
|
gptq_model_v1:
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 5
|
||||||
|
save_steps: 10
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.000001
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
|
bos_token: ">>ABSTRACT<<"
|
||||||
|
eos_token: "<|endoftext|>"
|
||||||
@@ -23,7 +23,7 @@ lora_dropout: 0.0
|
|||||||
lora_target_modules:
|
lora_target_modules:
|
||||||
lora_target_linear: true
|
lora_target_linear: true
|
||||||
lora_fan_in_fan_out:
|
lora_fan_in_fan_out:
|
||||||
wandb_project: falcon-7b
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|||||||
57
examples/gptj/qlora.yml
Normal file
57
examples/gptj/qlora.yml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
base_model: EleutherAI/gpt-j-6b
|
||||||
|
base_model_config: EleutherAI/gpt-j-6b
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
push_dataset_to_hub:
|
||||||
|
datasets:
|
||||||
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 2048
|
||||||
|
max_packed_sequence_len:
|
||||||
|
lora_r: 8
|
||||||
|
lora_alpha: 32
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
output_dir: ./qlora-out
|
||||||
|
gradient_accumulation_steps: 2
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 2
|
||||||
|
optimizer: paged_adamw_8bit
|
||||||
|
torchdistx_path:
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0001
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: true
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: true
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
gptq_groupsize:
|
||||||
|
gptq_model_v1:
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 20
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
pad_token: "<|endoftext|>"
|
||||||
@@ -3,6 +3,6 @@
|
|||||||
This is a good place to start for beginners. This will run on an NVIDIA RTX4090 with no other changes needed.
|
This is a good place to start for beginners. This will run on an NVIDIA RTX4090 with no other changes needed.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
accelerate launch scripts/finetune.py examples/4bit-lora-7b/config.yml
|
accelerate launch scripts/finetune.py examples/gptq-lora-7b/config.yml
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -7,30 +7,28 @@ datasets:
|
|||||||
- path: openaccess-ai-collective/jeopardy
|
- path: openaccess-ai-collective/jeopardy
|
||||||
type: jeopardy
|
type: jeopardy
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path: last_run_prepared
|
||||||
val_set_size: 0.01
|
val_set_size: 0.02
|
||||||
adapter:
|
adapter:
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 512
|
||||||
max_packed_sequence_len: 2048
|
max_packed_sequence_len:
|
||||||
lora_r: 8
|
lora_r:
|
||||||
lora_alpha: 16
|
lora_alpha:
|
||||||
lora_dropout: 0.05
|
lora_dropout:
|
||||||
lora_target_modules:
|
lora_target_modules:
|
||||||
- q_proj
|
|
||||||
- v_proj
|
|
||||||
lora_fan_in_fan_out: false
|
lora_fan_in_fan_out: false
|
||||||
wandb_project: jeopardy-bot-7b
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./jeopardy-bot-7b
|
output_dir: ./jeopardy-bot-7b
|
||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 1
|
micro_batch_size: 1
|
||||||
num_epochs: 2
|
num_epochs: 3
|
||||||
optimizer: adamw_bnb_8bit
|
optimizer: adamw_bnb_8bit
|
||||||
torchdistx_path:
|
torchdistx_path:
|
||||||
lr_scheduler: cosine
|
lr_scheduler: cosine
|
||||||
learning_rate: 0.0000002
|
learning_rate: 0.00003
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
bf16: true
|
bf16: true
|
||||||
@@ -48,11 +46,10 @@ eval_steps: 110
|
|||||||
save_steps: 660
|
save_steps: 660
|
||||||
debug:
|
debug:
|
||||||
deepspeed:
|
deepspeed:
|
||||||
weight_decay: 0.0001
|
weight_decay: 0.1
|
||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
tokens:
|
tokens:
|
||||||
pad_token: "[PAD]"
|
|
||||||
bos_token: "<s>"
|
bos_token: "<s>"
|
||||||
eos_token: "</s>"
|
eos_token: "</s>"
|
||||||
unk_token: "<unk>"
|
unk_token: "<unk>"
|
||||||
16
examples/openllama-3b/README.md
Normal file
16
examples/openllama-3b/README.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# openllama-3b
|
||||||
|
|
||||||
|
Basic full tune
|
||||||
|
```shell
|
||||||
|
accelerate launch scripts/finetune.py examples/openllama-3b/config.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
LoRA
|
||||||
|
```shell
|
||||||
|
accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
QLoRA
|
||||||
|
```shell
|
||||||
|
accelerate launch scripts/finetune.py examples/openllama-3b/qlora.yml
|
||||||
|
```
|
||||||
62
examples/openllama-3b/config.yml
Normal file
62
examples/openllama-3b/config.yml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
base_model: openlm-research/open_llama_3b
|
||||||
|
base_model_config: openlm-research/open_llama_3b
|
||||||
|
model_type: LlamaForCausalLM
|
||||||
|
tokenizer_type: LlamaTokenizer
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
push_dataset_to_hub:
|
||||||
|
datasets:
|
||||||
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.02
|
||||||
|
adapter:
|
||||||
|
lora_model_dir:
|
||||||
|
sequence_len: 256
|
||||||
|
max_packed_sequence_len:
|
||||||
|
lora_r:
|
||||||
|
lora_alpha:
|
||||||
|
lora_dropout:
|
||||||
|
lora_target_modules:
|
||||||
|
lora_target_linear:
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
wandb_project:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_run_id:
|
||||||
|
wandb_log_model:
|
||||||
|
output_dir: ./openllama-out
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
|
micro_batch_size: 1
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
torchdistx_path:
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.00001
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
float16: true
|
||||||
|
bf16: false
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
gradient_checkpointing: true
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention: true
|
||||||
|
flash_attention:
|
||||||
|
gptq_groupsize:
|
||||||
|
gptq_model_v1:
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 50
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.1
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
base_model: openlm-research/open_llama_3b_600bt_preview
|
base_model: openlm-research/open_llama_3b
|
||||||
base_model_config: openlm-research/open_llama_3b_600bt_preview
|
base_model_config: openlm-research/open_llama_3b
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
@@ -49,7 +49,7 @@ early_stopping_patience:
|
|||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
logging_steps: 1
|
logging_steps: 1
|
||||||
xformers_attention:
|
xformers_attention: true
|
||||||
flash_attention:
|
flash_attention:
|
||||||
gptq_groupsize:
|
gptq_groupsize:
|
||||||
gptq_model_v1:
|
gptq_model_v1:
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
base_model: openlm-research/open_llama_3b_600bt_preview
|
base_model: openlm-research/open_llama_3b
|
||||||
base_model_config: openlm-research/open_llama_3b_600bt_preview
|
base_model_config: openlm-research/open_llama_3b
|
||||||
model_type: LlamaForCausalLM
|
model_type: LlamaForCausalLM
|
||||||
tokenizer_type: LlamaTokenizer
|
tokenizer_type: LlamaTokenizer
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Python 12B
|
# Pythia 12B
|
||||||
|
|
||||||
- Single-GPU A100 only (?)
|
- Single-GPU A100 only (?)
|
||||||
|
|
||||||
@@ -7,4 +7,3 @@ python scripts/finetune.py examples/pythia-12b/config.yml
|
|||||||
```
|
```
|
||||||
|
|
||||||
⚠️ Multiple-GPU A100 - Doesn't seem to work with multi-gpu without causing OOM! ⚠️
|
⚠️ Multiple-GPU A100 - Doesn't seem to work with multi-gpu without causing OOM! ⚠️
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ lora_dropout: 0.0
|
|||||||
lora_target_modules:
|
lora_target_modules:
|
||||||
lora_target_linear: true
|
lora_target_linear: true
|
||||||
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
||||||
wandb_project: pythia-12b
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
@@ -45,5 +45,5 @@ resume_from_checkpoint:
|
|||||||
local_rank:
|
local_rank:
|
||||||
gradient_checkpointing: true
|
gradient_checkpointing: true
|
||||||
fsdp:
|
fsdp:
|
||||||
fsdp_transformer_layer_cls_to_wrap:
|
fsdp_config:
|
||||||
collator_pad_to_longest: true
|
collator_pad_to_longest: true
|
||||||
|
|||||||
@@ -1,36 +1,29 @@
|
|||||||
base_model: EleutherAI/pythia-1.4b-deduped
|
base_model: EleutherAI/pythia-1.4b-deduped
|
||||||
model_type: GPTNeoXForCausalLM
|
base_model_config: EleutherAI/pythia-1.4b-deduped
|
||||||
tokenizer_type: AutoTokenizer
|
|
||||||
load_in_8bit: true
|
load_in_8bit: true
|
||||||
datasets:
|
datasets:
|
||||||
- path: data/alpaca_data_gpt4.jsonl
|
- path: teknium/GPT4-LLM-Cleaned
|
||||||
type: alpaca
|
type: alpaca
|
||||||
- path: data/vicuna_cleaned.jsonl
|
|
||||||
type: sharegpt
|
|
||||||
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
|
||||||
type: gpteacher
|
|
||||||
dataset_prepared_path: last_run_prepared
|
dataset_prepared_path: last_run_prepared
|
||||||
val_set_size: 0.05
|
val_set_size: 0.05
|
||||||
adapter: lora
|
adapter: lora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 512
|
||||||
lora_r: 8
|
lora_r: 16
|
||||||
lora_alpha: 32
|
lora_alpha: 32
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
lora_target_modules:
|
lora_target_modules:
|
||||||
- query_key_value
|
- query_key_value
|
||||||
# - xxx
|
lora_target_linear:
|
||||||
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
||||||
wandb_project: pythia-1.4b-lora
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_run_id:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-alpaca
|
output_dir: ./lora-alpaca-pythia
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
micro_batch_size: 4
|
micro_batch_size: 4
|
||||||
num_epochs: 5
|
num_epochs: 3
|
||||||
learning_rate: 0.00001
|
learning_rate: 0.00001
|
||||||
train_on_inputs: false
|
train_on_inputs: false
|
||||||
group_by_length: false
|
group_by_length: false
|
||||||
@@ -39,3 +32,6 @@ tf32: True
|
|||||||
early_stopping_patience:
|
early_stopping_patience:
|
||||||
resume_from_checkpoint:
|
resume_from_checkpoint:
|
||||||
local_rank:
|
local_rank:
|
||||||
|
weight_decay: 0.1
|
||||||
|
eval_steps: 20
|
||||||
|
logging_steps: 1
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
# qlora-openllama-3b
|
|
||||||
|
|
||||||
```shell
|
|
||||||
accelerate launch scripts/finetune.py examples/qlora-openllama-3b/config.yml
|
|
||||||
|
|
||||||
```
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
base_model: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
||||||
base_model_config: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
base_model_config: togethercomputer/RedPajama-INCITE-Chat-3B-v1
|
||||||
model_type: GPTNeoXForCausalLM
|
model_type: GPTNeoXForCausalLM
|
||||||
tokenizer_type: GPTNeoXTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
trust_remote_code:
|
trust_remote_code:
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
datasets:
|
datasets:
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
peft @ git+https://github.com/huggingface/peft.git
|
peft @ git+https://github.com/huggingface/peft.git
|
||||||
transformers @ git+https://github.com/huggingface/transformers.git
|
transformers @ git+https://github.com/huggingface/transformers.git
|
||||||
bitsandbytes>=0.39.0
|
bitsandbytes>=0.39.0
|
||||||
accelerate
|
|
||||||
addict
|
addict
|
||||||
fire
|
fire
|
||||||
PyYAML==6.0
|
PyYAML==6.0
|
||||||
@@ -18,3 +17,4 @@ evaluate==0.4.0
|
|||||||
rouge-score==0.1.2
|
rouge-score==0.1.2
|
||||||
scipy
|
scipy
|
||||||
scikit-learn==1.2.2
|
scikit-learn==1.2.2
|
||||||
|
numba
|
||||||
|
|||||||
@@ -14,9 +14,8 @@ import torch
|
|||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
# add src to the pythonpath so we don't need to pip install this
|
# add src to the pythonpath so we don't need to pip install this
|
||||||
from datasets import Dataset
|
|
||||||
from optimum.bettertransformer import BetterTransformer
|
from optimum.bettertransformer import BetterTransformer
|
||||||
from transformers import GenerationConfig
|
from transformers import GenerationConfig, TextStreamer
|
||||||
|
|
||||||
from axolotl.utils.data import load_prepare_datasets, load_pretraining_dataset
|
from axolotl.utils.data import load_prepare_datasets, load_pretraining_dataset
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
@@ -50,7 +49,7 @@ def choose_device(cfg):
|
|||||||
|
|
||||||
cfg.device = get_device()
|
cfg.device = get_device()
|
||||||
if cfg.device_map != "auto":
|
if cfg.device_map != "auto":
|
||||||
if cfg.device == "cuda":
|
if cfg.device.startswith("cuda"):
|
||||||
cfg.device_map = {"": cfg.local_rank}
|
cfg.device_map = {"": cfg.local_rank}
|
||||||
else:
|
else:
|
||||||
cfg.device_map = {"": cfg.device}
|
cfg.device_map = {"": cfg.device}
|
||||||
@@ -65,23 +64,43 @@ def get_multi_line_input() -> Optional[str]:
|
|||||||
return instruction
|
return instruction
|
||||||
|
|
||||||
|
|
||||||
def do_inference(cfg, model, tokenizer, prompter="AlpacaPrompter"):
|
def do_inference(cfg, model, tokenizer, prompter: Optional[str]):
|
||||||
tokenizer.add_special_tokens({"unk_token": "<unk>"})
|
default_tokens = {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
|
||||||
tokenizer.add_special_tokens({"bos_token": "<s>"})
|
|
||||||
tokenizer.add_special_tokens({"eos_token": "</s>"})
|
|
||||||
|
|
||||||
prompter_module = getattr(importlib.import_module("axolotl.prompters"), prompter)
|
for token, symbol in default_tokens.items():
|
||||||
|
# If the token isn't already specified in the config, add it
|
||||||
|
if not (cfg.special_tokens and token in cfg.special_tokens):
|
||||||
|
tokenizer.add_special_tokens({token: symbol})
|
||||||
|
|
||||||
|
prompter_module = None
|
||||||
|
if prompter:
|
||||||
|
prompter_module = getattr(
|
||||||
|
importlib.import_module("axolotl.prompters"), prompter
|
||||||
|
)
|
||||||
|
|
||||||
|
if cfg.landmark_attention:
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import set_model_mem_id
|
||||||
|
|
||||||
|
set_model_mem_id(model, tokenizer)
|
||||||
|
model.set_mem_cache_args(
|
||||||
|
max_seq_len=255, mem_freq=50, top_k=5, max_cache_size=None
|
||||||
|
)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
print("=" * 80)
|
||||||
# support for multiline inputs
|
# support for multiline inputs
|
||||||
instruction = get_multi_line_input()
|
instruction = get_multi_line_input()
|
||||||
if not instruction:
|
if not instruction:
|
||||||
return
|
return
|
||||||
prompt: str = next(
|
if prompter_module:
|
||||||
prompter_module().build_prompt(instruction=instruction.strip("\n"))
|
prompt: str = next(
|
||||||
)
|
prompter_module().build_prompt(instruction=instruction.strip("\n"))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
prompt = instruction.strip()
|
||||||
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
||||||
|
|
||||||
|
print("=" * 40)
|
||||||
model.eval()
|
model.eval()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
generation_config = GenerationConfig(
|
generation_config = GenerationConfig(
|
||||||
@@ -100,10 +119,13 @@ def do_inference(cfg, model, tokenizer, prompter="AlpacaPrompter"):
|
|||||||
output_hidden_states=False,
|
output_hidden_states=False,
|
||||||
output_scores=False,
|
output_scores=False,
|
||||||
)
|
)
|
||||||
|
streamer = TextStreamer(tokenizer)
|
||||||
generated = model.generate(
|
generated = model.generate(
|
||||||
inputs=batch["input_ids"].to(cfg.device),
|
inputs=batch["input_ids"].to(cfg.device),
|
||||||
generation_config=generation_config,
|
generation_config=generation_config,
|
||||||
|
streamer=streamer,
|
||||||
)
|
)
|
||||||
|
print("=" * 40)
|
||||||
print(tokenizer.decode(generated["sequences"].cpu().tolist()[0]))
|
print(tokenizer.decode(generated["sequences"].cpu().tolist()[0]))
|
||||||
|
|
||||||
|
|
||||||
@@ -153,7 +175,7 @@ def train(
|
|||||||
cfg_keys = cfg.keys()
|
cfg_keys = cfg.keys()
|
||||||
for k, _ in kwargs.items():
|
for k, _ in kwargs.items():
|
||||||
# if not strict, allow writing to cfg even if it's not in the yml already
|
# if not strict, allow writing to cfg even if it's not in the yml already
|
||||||
if k in cfg_keys or cfg.strict is False:
|
if k in cfg_keys or not cfg.strict:
|
||||||
# handle booleans
|
# handle booleans
|
||||||
if isinstance(cfg[k], bool):
|
if isinstance(cfg[k], bool):
|
||||||
cfg[k] = bool(kwargs[k])
|
cfg[k] = bool(kwargs[k])
|
||||||
@@ -185,27 +207,30 @@ def train(
|
|||||||
cfg.fp16 = True
|
cfg.fp16 = True
|
||||||
cfg.bf16 = False
|
cfg.bf16 = False
|
||||||
|
|
||||||
|
if cfg.tf32:
|
||||||
|
torch.backends.cuda.matmul.allow_tf32 = True
|
||||||
|
|
||||||
# load the tokenizer first
|
# load the tokenizer first
|
||||||
tokenizer_config = cfg.tokenizer_config or cfg.base_model_config
|
tokenizer_config = cfg.tokenizer_config or cfg.base_model_config
|
||||||
logging.info(f"loading tokenizer... {tokenizer_config}")
|
logging.info(f"loading tokenizer... {tokenizer_config}")
|
||||||
tokenizer = load_tokenizer(tokenizer_config, cfg.tokenizer_type, cfg)
|
tokenizer = load_tokenizer(tokenizer_config, cfg.tokenizer_type, cfg)
|
||||||
|
|
||||||
if check_not_in(
|
if (
|
||||||
["inference", "shard", "merge_lora"], kwargs
|
check_not_in(["shard", "merge_lora"], kwargs) and not cfg.inference
|
||||||
): # don't need to load dataset for these
|
): # don't need to load dataset for these
|
||||||
if not cfg.pretraining_dataset:
|
if not cfg.pretraining_dataset:
|
||||||
train_dataset, eval_dataset = load_prepare_datasets(
|
train_dataset, eval_dataset = load_prepare_datasets(
|
||||||
tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
|
tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if cfg.pretraining_dataset is True:
|
|
||||||
pretraining_dataset = "togethercomputer/RedPajama-Data-1T"
|
|
||||||
else:
|
|
||||||
pretraining_dataset = cfg.pretraining_dataset
|
|
||||||
train_dataset = load_pretraining_dataset(
|
train_dataset = load_pretraining_dataset(
|
||||||
pretraining_dataset, tokenizer, max_tokens=cfg.sequence_len
|
cfg.pretraining_dataset,
|
||||||
|
tokenizer,
|
||||||
|
max_tokens=cfg.sequence_len,
|
||||||
|
seed=cfg.seed,
|
||||||
)
|
)
|
||||||
train_dataset = Dataset.from_list(list(train_dataset))
|
# https://discuss.huggingface.co/t/how-to-use-huggingface-trainer-streaming-datasets-without-wrapping-it-with-torchdatas-iterablewrapper/25230
|
||||||
|
train_dataset = train_dataset.with_format("torch")
|
||||||
eval_dataset = None
|
eval_dataset = None
|
||||||
|
|
||||||
if cfg.debug or "debug" in kwargs:
|
if cfg.debug or "debug" in kwargs:
|
||||||
@@ -230,7 +255,6 @@ def train(
|
|||||||
tokenizer,
|
tokenizer,
|
||||||
cfg,
|
cfg,
|
||||||
adapter=cfg.adapter,
|
adapter=cfg.adapter,
|
||||||
inference=("inference" in kwargs),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if "merge_lora" in kwargs and cfg.adapter is not None:
|
if "merge_lora" in kwargs and cfg.adapter is not None:
|
||||||
@@ -243,30 +267,21 @@ def train(
|
|||||||
model.save_pretrained(str(Path(cfg.output_dir) / "merged"))
|
model.save_pretrained(str(Path(cfg.output_dir) / "merged"))
|
||||||
return
|
return
|
||||||
|
|
||||||
if "inference" in kwargs:
|
if cfg.inference:
|
||||||
logging.info("calling do_inference function")
|
logging.info("calling do_inference function")
|
||||||
do_inference(cfg, model, tokenizer)
|
prompter: Optional[str] = "AlpacaPrompter"
|
||||||
|
if "prompter" in kwargs:
|
||||||
|
if kwargs["prompter"] == "None":
|
||||||
|
prompter = None
|
||||||
|
else:
|
||||||
|
prompter = kwargs["prompter"]
|
||||||
|
do_inference(cfg, model, tokenizer, prompter=prompter)
|
||||||
return
|
return
|
||||||
|
|
||||||
if "shard" in kwargs:
|
if "shard" in kwargs:
|
||||||
model.save_pretrained(cfg.output_dir)
|
model.save_pretrained(cfg.output_dir)
|
||||||
return
|
return
|
||||||
|
|
||||||
if cfg.debug:
|
|
||||||
logging.info("check_dataset_labels...")
|
|
||||||
check_dataset_labels(
|
|
||||||
train_dataset.select(
|
|
||||||
[random.randrange(0, len(train_dataset) - 1) for i in range(5)] # nosec
|
|
||||||
),
|
|
||||||
tokenizer,
|
|
||||||
)
|
|
||||||
|
|
||||||
if prepare_ds_only:
|
|
||||||
logging.info("Finished preparing dataset. Exiting...")
|
|
||||||
return
|
|
||||||
|
|
||||||
model.train()
|
|
||||||
|
|
||||||
trainer = setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer)
|
trainer = setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer)
|
||||||
|
|
||||||
model.config.use_cache = False
|
model.config.use_cache = False
|
||||||
@@ -311,6 +326,8 @@ def train(
|
|||||||
f"Using Auto-resume functionality to start with checkpoint at {resume_from_checkpoint}"
|
f"Using Auto-resume functionality to start with checkpoint at {resume_from_checkpoint}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not Path(cfg.output_dir).is_dir():
|
||||||
|
os.makedirs(cfg.output_dir, exist_ok=True)
|
||||||
if cfg.flash_optimum:
|
if cfg.flash_optimum:
|
||||||
with torch.backends.cuda.sdp_kernel(
|
with torch.backends.cuda.sdp_kernel(
|
||||||
enable_flash=True, enable_math=True, enable_mem_efficient=True
|
enable_flash=True, enable_math=True, enable_mem_efficient=True
|
||||||
|
|||||||
@@ -33,12 +33,16 @@ class TokenizedPromptDataset(IterableDataset):
|
|||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
iterator = iter(self.dataset)
|
iterator = iter(self.dataset)
|
||||||
|
count = 0
|
||||||
# Loop through the entire dataset
|
# Loop through the entire dataset
|
||||||
for example in iterator:
|
for example in iterator:
|
||||||
try:
|
try:
|
||||||
yield self.prompt_tokenizer.tokenize_prompt(example)
|
yield self.prompt_tokenizer.tokenize_prompt(example)
|
||||||
|
count += 1
|
||||||
except InvalidDataException:
|
except InvalidDataException:
|
||||||
pass
|
pass
|
||||||
|
if count == 0:
|
||||||
|
raise RuntimeError("Expected at least one datapoint in dataset.")
|
||||||
|
|
||||||
|
|
||||||
# TODO this isn't the best since it can't interleave datasets
|
# TODO this isn't the best since it can't interleave datasets
|
||||||
@@ -122,6 +126,7 @@ class ConstantLengthDataset(IterableDataset):
|
|||||||
buffer_len = 0
|
buffer_len = 0
|
||||||
|
|
||||||
if example:
|
if example:
|
||||||
|
# FIXME
|
||||||
# just going to drop data points that are too long
|
# just going to drop data points that are too long
|
||||||
if len(example["input_ids"]) <= self.seq_length:
|
if len(example["input_ids"]) <= self.seq_length:
|
||||||
input_ids = example["input_ids"]
|
input_ids = example["input_ids"]
|
||||||
|
|||||||
1249
src/axolotl/monkeypatch/llama_landmark_attn.py
Normal file
1249
src/axolotl/monkeypatch/llama_landmark_attn.py
Normal file
File diff suppressed because it is too large
Load Diff
94
src/axolotl/monkeypatch/xpos_rope_llama_monkey_patch.py
Normal file
94
src/axolotl/monkeypatch/xpos_rope_llama_monkey_patch.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# pylint: skip-file
|
||||||
|
"""
|
||||||
|
Copied from https://github.com/kaiokendev/cutoff-len-is-context-len/blob/main/util/xpos_rope_llama_monkey_patch.py
|
||||||
|
"""
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
import transformers.models.llama.modeling_llama
|
||||||
|
from einops import rearrange
|
||||||
|
|
||||||
|
|
||||||
|
class XposRotaryEmbedding(torch.nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
max_position_embeddings=2048,
|
||||||
|
base=10000,
|
||||||
|
device=None,
|
||||||
|
scale_base=2048,
|
||||||
|
use_xpos=True,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.max_seq_len_cached = max_position_embeddings
|
||||||
|
self.scale_base = scale_base
|
||||||
|
|
||||||
|
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
|
||||||
|
t = torch.arange(self.max_seq_len_cached, device=device).type_as(inv_freq)
|
||||||
|
freqs = torch.einsum("i , j -> i j", t, inv_freq)
|
||||||
|
freqs = torch.cat((freqs, freqs), dim=-1)
|
||||||
|
|
||||||
|
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||||
|
self.register_buffer("freqs_cached", freqs, persistent=False)
|
||||||
|
|
||||||
|
if not use_xpos:
|
||||||
|
self.register_buffer("scale", None)
|
||||||
|
self.register_buffer("scale_cached", torch.ones(1))
|
||||||
|
return
|
||||||
|
|
||||||
|
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
|
||||||
|
power = (t - (self.max_seq_len_cached // 2)) / self.scale_base
|
||||||
|
scale_cached = scale ** rearrange(power, "n -> n 1")
|
||||||
|
scale_cached = torch.cat((scale_cached, scale_cached), dim=-1)
|
||||||
|
|
||||||
|
self.register_buffer("scale", scale, persistent=False)
|
||||||
|
self.register_buffer("scale_cached", scale_cached, persistent=False)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
x,
|
||||||
|
seq_len,
|
||||||
|
):
|
||||||
|
if seq_len > self.max_seq_len_cached:
|
||||||
|
self.max_seq_len_cached = seq_len
|
||||||
|
t = torch.arange(self.max_seq_len_cached, device=x.device).type_as(
|
||||||
|
self.inv_freq
|
||||||
|
)
|
||||||
|
freqs = torch.einsum("i , j -> i j", t, self.inv_freq)
|
||||||
|
freqs = torch.cat((freqs, freqs), dim=-1).to(dtype=x.dtype)
|
||||||
|
|
||||||
|
self.register_buffer("freqs_cached", freqs)
|
||||||
|
|
||||||
|
if self.scale is None:
|
||||||
|
self.register_buffer(
|
||||||
|
"scale_cached", torch.ones(1, device=x.device).to(dtype=x.dtype)
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.freqs_cached.to(dtype=x.dtype), self.scale_cached
|
||||||
|
|
||||||
|
power = (t - (seq_len // 2)) / self.scale_base
|
||||||
|
scale = self.scale ** rearrange(power, "n -> n 1")
|
||||||
|
scale = torch.cat((scale, scale), dim=-1).to(dtype=x.dtype)
|
||||||
|
self.register_buffer("scale_cached", scale)
|
||||||
|
|
||||||
|
return self.freqs_cached.to(dtype=x.dtype), self.scale_cached.to(dtype=x.dtype)
|
||||||
|
|
||||||
|
|
||||||
|
def rotate_half(x):
|
||||||
|
x1, x2 = x.chunk(2, dim=-1)
|
||||||
|
return torch.cat((-x2, x1), dim=-1)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_rotary_pos_emb(q, k, freqs, scale=1, position_ids=None):
|
||||||
|
freqs = freqs[position_ids, :]
|
||||||
|
if scale.shape[-1] != 1:
|
||||||
|
scale = scale[position_ids, :]
|
||||||
|
|
||||||
|
q_embed = (q * freqs.cos() * scale) + (rotate_half(q) * freqs.sin() * scale)
|
||||||
|
k_embed = (k * freqs.cos() * 1 / scale) + (rotate_half(k) * freqs.sin() * 1 / scale)
|
||||||
|
|
||||||
|
return q_embed, k_embed
|
||||||
|
|
||||||
|
|
||||||
|
def replace_llama_rope_with_xpos_rope():
|
||||||
|
transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = XposRotaryEmbedding
|
||||||
|
transformers.models.llama.modeling_llama.apply_rotary_pos_emb = apply_rotary_pos_emb
|
||||||
@@ -6,7 +6,7 @@ from axolotl.prompt_tokenizers import (
|
|||||||
AlpacaPromptTokenizingStrategy,
|
AlpacaPromptTokenizingStrategy,
|
||||||
InstructionPromptTokenizingStrategy,
|
InstructionPromptTokenizingStrategy,
|
||||||
)
|
)
|
||||||
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
from axolotl.prompters import AlpacaPrompter, PromptStyle, UnpromptedPrompter
|
||||||
|
|
||||||
|
|
||||||
def load(tokenizer, cfg):
|
def load(tokenizer, cfg):
|
||||||
@@ -18,6 +18,42 @@ def load(tokenizer, cfg):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AlpacaConcisePrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Alpaca Prompter extending the system prompt to ask for concise chat-instruct answers
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = "Below is an instruction from a USER that describes a task, paired with an input that provides further context. The ASSISTANT writes a response that concisely and appropriately completes the request.\n\n"
|
||||||
|
system_no_input_prompt = "Below is an instruction from a USER that describes a task. The ASSISTANT writes a response that appropriately and concisely completes the request.\n\n"
|
||||||
|
|
||||||
|
|
||||||
|
class AlpacaChatPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Alpaca Chat Prompter extending the system prompt to for chat-instruct answers
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = "Below is an instruction from a USER that describes a task, paired with an input that provides further context. The ASSISTANT writes a response that concisely and appropriately completes the request.\n\n"
|
||||||
|
system_no_input_prompt = "Below is an instruction from a USER that describes a task. The ASSISTANT writes a response that appropriately and concisely completes the request.\n\n"
|
||||||
|
|
||||||
|
def __init__(self): # pylint: disable=super-init-not-called
|
||||||
|
self.prompt_style = PromptStyle.CHAT.value
|
||||||
|
self.match_prompt_style()
|
||||||
|
|
||||||
|
|
||||||
|
class NoSystemPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Null Prompter with no system prompts
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = ""
|
||||||
|
system_no_input_prompt = ""
|
||||||
|
turn_format = "{instruction} {input} "
|
||||||
|
turn_no_input_format = "{instruction} "
|
||||||
|
|
||||||
|
def __init__(self): # pylint: disable=super-init-not-called
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||||
"""
|
"""
|
||||||
Tokenizing strategy for AlpacaQA
|
Tokenizing strategy for AlpacaQA
|
||||||
@@ -31,9 +67,49 @@ class AlpacaQAPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def load_qa(tokenizer, cfg):
|
class CamelAIPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||||
return AlpacaQAPromptTokenizingStrategy(
|
"""
|
||||||
AlpacaPrompter(PromptStyle.CHAT.value),
|
Tokenizing strategy for CamelAI datasets
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["message_1"],
|
||||||
|
"",
|
||||||
|
prompt["message_2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_concise(tokenizer, cfg):
|
||||||
|
return AlpacaPromptTokenizingStrategy(
|
||||||
|
AlpacaConcisePrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_qa(tokenizer, cfg):
|
||||||
|
return AlpacaQAPromptTokenizingStrategy(
|
||||||
|
AlpacaChatPrompter(),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_camel_ai(tokenizer, cfg):
|
||||||
|
return CamelAIPromptTokenizingStrategy(
|
||||||
|
AlpacaChatPrompter(),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_no_prompt(tokenizer, cfg):
|
||||||
|
return AlpacaPromptTokenizingStrategy(
|
||||||
|
UnpromptedPrompter(PromptStyle.CHAT.value),
|
||||||
tokenizer,
|
tokenizer,
|
||||||
cfg.train_on_inputs,
|
cfg.train_on_inputs,
|
||||||
cfg.sequence_len,
|
cfg.sequence_len,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
"""Module loading the AlpacaInstructPromptTokenizingStrategy class"""
|
"""Module loading the AlpacaInstructPromptTokenizingStrategy class"""
|
||||||
|
|
||||||
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
|
from axolotl.prompt_tokenizers import AlpacaPromptTokenizingStrategy
|
||||||
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
from axolotl.prompters import AlpacaPrompter, PromptStyle, UnpromptedPrompter
|
||||||
|
|
||||||
|
|
||||||
def load(tokenizer, cfg):
|
def load(tokenizer, cfg):
|
||||||
@@ -11,3 +11,12 @@ def load(tokenizer, cfg):
|
|||||||
cfg.train_on_inputs,
|
cfg.train_on_inputs,
|
||||||
cfg.sequence_len,
|
cfg.sequence_len,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_no_prompt(tokenizer, cfg):
|
||||||
|
return AlpacaPromptTokenizingStrategy(
|
||||||
|
UnpromptedPrompter(PromptStyle.INSTRUCT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|||||||
120
src/axolotl/prompt_strategies/alpaca_w_system.py
Normal file
120
src/axolotl/prompt_strategies/alpaca_w_system.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
"""
|
||||||
|
Prompt strategies loader for alpaca instruction datasets with system prompts
|
||||||
|
"""
|
||||||
|
from typing import Generator, Tuple, Union
|
||||||
|
|
||||||
|
from axolotl.prompt_tokenizers import PromptTokenizingStrategy
|
||||||
|
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
||||||
|
|
||||||
|
|
||||||
|
class InstructionWSystemPromptTokenizingStrategy(PromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
Tokenizing strategy for instruction-based prompts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["instruction"],
|
||||||
|
prompt["input"] if "input" in prompt else "",
|
||||||
|
prompt["output"],
|
||||||
|
prompt["system"],
|
||||||
|
)
|
||||||
|
|
||||||
|
def tokenize_prompt(self, prompt):
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
(
|
||||||
|
instruction,
|
||||||
|
input, # pylint: disable=redefined-builtin
|
||||||
|
response,
|
||||||
|
system,
|
||||||
|
) = self.parse_instruction_fields(prompt)
|
||||||
|
user_prompt = next(
|
||||||
|
iter(
|
||||||
|
self.prompter.build_prompt_w_system(
|
||||||
|
system,
|
||||||
|
instruction,
|
||||||
|
input,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
tokenized_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
||||||
|
if not self.train_on_inputs:
|
||||||
|
user_prompt_len = len(tokenized_prompt["input_ids"])
|
||||||
|
# TODO this could be sped up using numpy array slicing
|
||||||
|
tokenized_prompt["labels"] = [-100] * user_prompt_len
|
||||||
|
tokenized_res_prompt = self._tokenize(
|
||||||
|
response, strip_bos_token=True, add_eos_token=True
|
||||||
|
)
|
||||||
|
tokenized_prompt["input_ids"] += tokenized_res_prompt["input_ids"]
|
||||||
|
tokenized_prompt["attention_mask"] += tokenized_res_prompt["attention_mask"]
|
||||||
|
tokenized_prompt["labels"] += tokenized_res_prompt["input_ids"]
|
||||||
|
|
||||||
|
return tokenized_prompt
|
||||||
|
|
||||||
|
|
||||||
|
class SystemDataPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Alpaca Style Prompter that uses system prompts from the dataset
|
||||||
|
"""
|
||||||
|
|
||||||
|
def build_prompt_w_system(
|
||||||
|
self,
|
||||||
|
system: str,
|
||||||
|
instruction: str,
|
||||||
|
input: Union[None, str] = None, # pylint: disable=redefined-builtin
|
||||||
|
output: Union[None, str] = None,
|
||||||
|
) -> Generator[str, None, None]:
|
||||||
|
# returns the full prompt from instruction and optional input
|
||||||
|
# if a label (=response, =output) is provided, it's also appended.
|
||||||
|
if input:
|
||||||
|
res = system + self.turn_format.format(instruction=instruction, input=input)
|
||||||
|
else:
|
||||||
|
res = system + self.turn_no_input_format.format(instruction=instruction)
|
||||||
|
if output:
|
||||||
|
res = f"{res}{output}"
|
||||||
|
yield res
|
||||||
|
|
||||||
|
|
||||||
|
class OpenOrcaPromptTokenizingStrategy(InstructionWSystemPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
Tokenizing strategy for OpenOrca datasets
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["question"],
|
||||||
|
"",
|
||||||
|
prompt["response"],
|
||||||
|
prompt["system_prompt"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load(tokenizer, cfg):
|
||||||
|
return load_chat(tokenizer, cfg)
|
||||||
|
|
||||||
|
|
||||||
|
def load_instruct(tokenizer, cfg):
|
||||||
|
return InstructionWSystemPromptTokenizingStrategy(
|
||||||
|
SystemDataPrompter(PromptStyle.INSTRUCT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_chat(tokenizer, cfg):
|
||||||
|
return InstructionWSystemPromptTokenizingStrategy(
|
||||||
|
SystemDataPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_open_orca(tokenizer, cfg):
|
||||||
|
return OpenOrcaPromptTokenizingStrategy(
|
||||||
|
SystemDataPrompter(PromptStyle.INSTRUCT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
67
src/axolotl/prompt_strategies/context_qa.py
Normal file
67
src/axolotl/prompt_strategies/context_qa.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""Module containing the classes for Context QA Prompt Tokenization Strategies"""
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from axolotl.prompt_tokenizers import InstructionPromptTokenizingStrategy
|
||||||
|
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
||||||
|
|
||||||
|
|
||||||
|
# article, unanswerable_question, question, answer
|
||||||
|
def load_404(tokenizer, cfg):
|
||||||
|
return AlpacaMissingInfoContextPromptTokenizingStrategy(
|
||||||
|
AlpacaContextPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load(tokenizer, cfg):
|
||||||
|
return AlpacaContextPromptTokenizingStrategy(
|
||||||
|
AlpacaContextPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AlpacaContextPrompter(AlpacaPrompter):
|
||||||
|
"""
|
||||||
|
Customized system prompted for concise QA
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = (
|
||||||
|
"Use the following contextual information to concisely answer the question.\n"
|
||||||
|
)
|
||||||
|
system_no_input_prompt = (
|
||||||
|
"Use the following contextual information to concisely answer the question.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AlpacaContextPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
Tokenization Strategy to combine in-context article with a question and answer
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["article"] + "\n===\n" + prompt["question"],
|
||||||
|
"",
|
||||||
|
prompt["answer"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AlpacaMissingInfoContextPromptTokenizingStrategy(
|
||||||
|
InstructionPromptTokenizingStrategy
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Tokenization Strategy to combine in-context article with a question that can't be answered
|
||||||
|
from the context and a default response to that effect
|
||||||
|
"""
|
||||||
|
|
||||||
|
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
|
||||||
|
return (
|
||||||
|
prompt["article"] + "\n===\n" + prompt["unanswerable_question"],
|
||||||
|
"",
|
||||||
|
"The context provided does not contain any information about your inquiry. "
|
||||||
|
"Therefore, I'm unable to answer your question based on the given context.",
|
||||||
|
)
|
||||||
28
src/axolotl/prompt_strategies/sharegpt_jokes.py
Normal file
28
src/axolotl/prompt_strategies/sharegpt_jokes.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
"""Module for Jokes prompts using sharegpt style """
|
||||||
|
from axolotl.prompt_tokenizers import ShareGPTPromptTokenizingStrategy
|
||||||
|
from axolotl.prompters import PromptStyle, ShareGPTPrompter
|
||||||
|
|
||||||
|
|
||||||
|
def load(tokenizer, cfg):
|
||||||
|
return SimpleJokesShareGPTPromptTokenizingStrategy(
|
||||||
|
ShareGPTPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleJokesShareGPTPromptTokenizingStrategy(ShareGPTPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
Tokenization strategy for asking bot to tell a joke and then explain why its funny
|
||||||
|
"""
|
||||||
|
|
||||||
|
# title, text, explanation
|
||||||
|
def get_conversation_thread(self, prompt):
|
||||||
|
title = "" if not prompt["title"] else prompt["title"] + " "
|
||||||
|
return [
|
||||||
|
{"from": "human", "value": "Tell me a joke."},
|
||||||
|
{"from": "gpt", "value": title + prompt["text"]},
|
||||||
|
{"from": "human", "value": "Why is that joke funny?"},
|
||||||
|
{"from": "gpt", "value": prompt["explanation"]},
|
||||||
|
]
|
||||||
67
src/axolotl/prompt_strategies/sharegpt_simple.py
Normal file
67
src/axolotl/prompt_strategies/sharegpt_simple.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""Module containing the SimpleShareGPTPromptTokenizingStrategy class"""
|
||||||
|
|
||||||
|
from axolotl.prompt_tokenizers import ShareGPTPromptTokenizingStrategy
|
||||||
|
from axolotl.prompters import PromptStyle, ShareGPTPrompter
|
||||||
|
|
||||||
|
|
||||||
|
def load(tokenizer, cfg):
|
||||||
|
return SimpleShareGPTPromptTokenizingStrategy(
|
||||||
|
ShareGPTPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_role(tokenizer, cfg):
|
||||||
|
return SimpleRoleShareGPTPromptTokenizingStrategy(
|
||||||
|
ShareGPTPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_guanaco(tokenizer, cfg):
|
||||||
|
return GuanacoShareGPTPromptTokenizingStrategy(
|
||||||
|
ShareGPTPrompter(PromptStyle.CHAT.value),
|
||||||
|
tokenizer,
|
||||||
|
cfg.train_on_inputs,
|
||||||
|
cfg.sequence_len,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleShareGPTPromptTokenizingStrategy(ShareGPTPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
basic sharegpt strategy to grab conversations from the sample row
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_conversation_thread(self, prompt):
|
||||||
|
return prompt["conversations"]
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleRoleShareGPTPromptTokenizingStrategy(ShareGPTPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
basic sharegpt strategy to grab conversations from the sample row, but uses role instead of from
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_conversation_thread(self, prompt):
|
||||||
|
conversations = prompt["conversations"]
|
||||||
|
# remap role: prompter/assistant, text: ... => from: human/gpt, value: ...
|
||||||
|
turns = [{"from": t["role"], "value": t["value"]} for t in conversations]
|
||||||
|
return turns
|
||||||
|
|
||||||
|
|
||||||
|
class GuanacoShareGPTPromptTokenizingStrategy(ShareGPTPromptTokenizingStrategy):
|
||||||
|
"""
|
||||||
|
sharegpt strategy that remaps oasst data to sharegpt format
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_conversation_thread(self, prompt):
|
||||||
|
conversations = prompt["conversations"]
|
||||||
|
# remap role: prompter/assistant, text: ... => from: human/gpt, value: ...
|
||||||
|
role_map = {"prompter": "human", "assistant": "gpt"}
|
||||||
|
turns = [
|
||||||
|
{"from": role_map[t["role"]], "value": t["text"]} for t in conversations
|
||||||
|
]
|
||||||
|
return turns
|
||||||
@@ -87,7 +87,9 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
Tokenizing strategy for instruction-based prompts.
|
Tokenizing strategy for instruction-based prompts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def parse_instruction_fields(self, prompt) -> Tuple[str, str, str]:
|
def parse_instruction_fields(
|
||||||
|
self, prompt
|
||||||
|
) -> Union[Tuple[str, str, str], Tuple[str, str, str, str]]:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def tokenize_prompt(self, prompt):
|
def tokenize_prompt(self, prompt):
|
||||||
@@ -96,25 +98,27 @@ class InstructionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
|||||||
input, # pylint: disable=redefined-builtin
|
input, # pylint: disable=redefined-builtin
|
||||||
response,
|
response,
|
||||||
) = self.parse_instruction_fields(prompt)
|
) = self.parse_instruction_fields(prompt)
|
||||||
full_prompt = self._build_full_prompt(instruction, input, response)
|
user_prompt = next(
|
||||||
tokenized_full_prompt = self._tokenize(full_prompt)
|
iter(
|
||||||
if not self.train_on_inputs:
|
self.prompter.build_prompt(
|
||||||
user_prompt = next(
|
instruction,
|
||||||
iter(
|
input,
|
||||||
self.prompter.build_prompt(
|
|
||||||
instruction,
|
|
||||||
input,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
tokenized_user_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
)
|
||||||
user_prompt_len = len(tokenized_user_prompt["input_ids"])
|
tokenized_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
||||||
|
if not self.train_on_inputs:
|
||||||
|
user_prompt_len = len(tokenized_prompt["input_ids"])
|
||||||
# TODO this could be sped up using numpy array slicing
|
# TODO this could be sped up using numpy array slicing
|
||||||
tokenized_full_prompt["labels"] = [
|
tokenized_prompt["labels"] = [-100] * user_prompt_len
|
||||||
-100
|
tokenized_res_prompt = self._tokenize(
|
||||||
] * user_prompt_len + tokenized_full_prompt["labels"][user_prompt_len:]
|
response, strip_bos_token=True, add_eos_token=True
|
||||||
|
)
|
||||||
|
tokenized_prompt["input_ids"] += tokenized_res_prompt["input_ids"]
|
||||||
|
tokenized_prompt["attention_mask"] += tokenized_res_prompt["attention_mask"]
|
||||||
|
tokenized_prompt["labels"] += tokenized_res_prompt["input_ids"]
|
||||||
|
|
||||||
return tokenized_full_prompt
|
return tokenized_prompt
|
||||||
|
|
||||||
def _build_full_prompt(
|
def _build_full_prompt(
|
||||||
self, instruction, input, response # pylint: disable=redefined-builtin
|
self, instruction, input, response # pylint: disable=redefined-builtin
|
||||||
@@ -436,7 +440,7 @@ def parse_tokenized_to_result(
|
|||||||
result: Dict[str, List[int]],
|
result: Dict[str, List[int]],
|
||||||
current_len: int,
|
current_len: int,
|
||||||
res: Dict[str, List[int]],
|
res: Dict[str, List[int]],
|
||||||
labels: list[int],
|
labels: List[int],
|
||||||
pad_token_id: Union[int, None] = None,
|
pad_token_id: Union[int, None] = None,
|
||||||
) -> Tuple[Dict[str, List[int]], int]:
|
) -> Tuple[Dict[str, List[int]], int]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ class AlpacaPrompter:
|
|||||||
|
|
||||||
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
||||||
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
|
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
|
||||||
|
turn_format: str
|
||||||
|
turn_no_input_format: str
|
||||||
prompt_style: Optional[PromptStyle] = None
|
prompt_style: Optional[PromptStyle] = None
|
||||||
|
|
||||||
def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
|
def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
|
||||||
@@ -32,23 +34,13 @@ class AlpacaPrompter:
|
|||||||
|
|
||||||
def match_prompt_style(self):
|
def match_prompt_style(self):
|
||||||
if self.prompt_style == PromptStyle.INSTRUCT.value:
|
if self.prompt_style == PromptStyle.INSTRUCT.value:
|
||||||
self.prompt_input = (
|
self.turn_format = "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
|
||||||
self.system_prompt
|
self.turn_no_input_format = (
|
||||||
+ "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
|
"### Instruction:\n{instruction}\n\n### Response:\n"
|
||||||
)
|
)
|
||||||
self.prompt_no_input = (
|
|
||||||
self.system_no_input_prompt
|
|
||||||
+ "### Instruction:\n{instruction}\n\n### Response:\n"
|
|
||||||
)
|
|
||||||
self.response_split = "### Response:"
|
|
||||||
if self.prompt_style == PromptStyle.CHAT.value:
|
if self.prompt_style == PromptStyle.CHAT.value:
|
||||||
self.prompt_input = (
|
self.turn_format = "USER: {instruction}\n{input}\nASSISTANT:"
|
||||||
self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
|
self.turn_no_input_format = "USER: {instruction}\nASSISTANT:"
|
||||||
)
|
|
||||||
self.prompt_no_input = (
|
|
||||||
self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
|
|
||||||
)
|
|
||||||
self.response_split = "ASSISTANT:"
|
|
||||||
|
|
||||||
def build_prompt(
|
def build_prompt(
|
||||||
self,
|
self,
|
||||||
@@ -59,16 +51,17 @@ class AlpacaPrompter:
|
|||||||
# returns the full prompt from instruction and optional input
|
# returns the full prompt from instruction and optional input
|
||||||
# if a label (=response, =output) is provided, it's also appended.
|
# if a label (=response, =output) is provided, it's also appended.
|
||||||
if input:
|
if input:
|
||||||
res = self.prompt_input.format(instruction=instruction, input=input)
|
res = self.system_prompt + self.turn_format.format(
|
||||||
|
instruction=instruction, input=input
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
res = self.prompt_no_input.format(instruction=instruction)
|
res = self.system_no_input_prompt + self.turn_no_input_format.format(
|
||||||
|
instruction=instruction
|
||||||
|
)
|
||||||
if output:
|
if output:
|
||||||
res = f"{res}{output}"
|
res = f"{res}{output}"
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
def get_response(self, output: str) -> str:
|
|
||||||
return output.split(self.response_split)[1].strip()
|
|
||||||
|
|
||||||
|
|
||||||
class UnpromptedPrompter(AlpacaPrompter):
|
class UnpromptedPrompter(AlpacaPrompter):
|
||||||
"""
|
"""
|
||||||
@@ -93,7 +86,10 @@ class MultipleChoiceExplainPrompter(AlpacaPrompter):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
system_prompt = (
|
system_prompt = (
|
||||||
"Choose the answer that best answers the question. Explain your reasoning."
|
"Choose the answer that best answers the question. Explain your reasoning.\n"
|
||||||
|
)
|
||||||
|
system_no_input_prompt = (
|
||||||
|
"Choose the answer that best answers the question. Explain your reasoning.\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -102,7 +98,12 @@ class MultipleChoiceConcisePrompter(AlpacaPrompter):
|
|||||||
Prompter for multiple choice concise
|
Prompter for multiple choice concise
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
|
system_prompt = "Choose the answer that best answers the question. Be concise in your response.\n\n"
|
||||||
|
system_no_input_prompt = "Choose the answer that best answers the question. Be concise in your response.\n\n"
|
||||||
|
|
||||||
|
def match_prompt_style(self):
|
||||||
|
self.turn_format = "USER: {instruction}\n{input}\nASSISTANT:"
|
||||||
|
self.turn_no_input_format = "USER: {instruction}\nASSISTANT:"
|
||||||
|
|
||||||
|
|
||||||
class SummarizeTLDRPrompter(AlpacaPrompter):
|
class SummarizeTLDRPrompter(AlpacaPrompter):
|
||||||
@@ -110,9 +111,12 @@ class SummarizeTLDRPrompter(AlpacaPrompter):
|
|||||||
Prompter for summarize TLDR
|
Prompter for summarize TLDR
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prompt_no_input = (
|
system_prompt = ""
|
||||||
"USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
|
system_no_input_prompt = ""
|
||||||
)
|
|
||||||
|
def match_prompt_style(self):
|
||||||
|
self.turn_format = "USER: Summarize the following article as a TL;DR.\n{instruction}\n{input}\nASSISTANT:"
|
||||||
|
self.turn_no_input_format = "USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
|
||||||
|
|
||||||
|
|
||||||
class CompletionPrompter:
|
class CompletionPrompter:
|
||||||
@@ -128,9 +132,6 @@ class CompletionPrompter:
|
|||||||
) -> Generator[str, None, None]:
|
) -> Generator[str, None, None]:
|
||||||
yield instruction
|
yield instruction
|
||||||
|
|
||||||
def get_response(self, output: str) -> str:
|
|
||||||
return output.strip()
|
|
||||||
|
|
||||||
|
|
||||||
class GPTeacherPrompter(AlpacaPrompter):
|
class GPTeacherPrompter(AlpacaPrompter):
|
||||||
"""
|
"""
|
||||||
@@ -210,9 +211,6 @@ class ReflectAlpacaPrompter:
|
|||||||
res = f"{res}{label}"
|
res = f"{res}{label}"
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
def get_response(self, output: str) -> str:
|
|
||||||
return output.split(self.response_split)[1].strip()
|
|
||||||
|
|
||||||
|
|
||||||
class SeparatorStyle(Enum):
|
class SeparatorStyle(Enum):
|
||||||
"""Different separator style."""
|
"""Different separator style."""
|
||||||
@@ -261,34 +259,33 @@ class Conversation:
|
|||||||
self.messages.append([role, message])
|
self.messages.append([role, message])
|
||||||
|
|
||||||
|
|
||||||
conv_vicuna_v1_1 = Conversation(
|
|
||||||
system="A chat between a curious user and an artificial intelligence assistant. "
|
|
||||||
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
|
||||||
roles=["USER", "ASSISTANT"],
|
|
||||||
messages=[],
|
|
||||||
offset=0,
|
|
||||||
sep_style=SeparatorStyle.TWO,
|
|
||||||
sep=" ",
|
|
||||||
sep2=" ",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ShareGPTPrompter: # pylint: disable=too-few-public-methods
|
class ShareGPTPrompter: # pylint: disable=too-few-public-methods
|
||||||
"""
|
"""
|
||||||
A prompter that generates prompts for the ShareGPT
|
A prompter that generates prompts for the ShareGPT
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, prompt_style=None):
|
def __init__(self, prompt_style=None, system_prompt: Optional[str] = None):
|
||||||
if prompt_style != PromptStyle.CHAT.value:
|
if prompt_style != PromptStyle.CHAT.value:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
|
f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
|
||||||
)
|
)
|
||||||
|
system: str = (
|
||||||
# def match_prompt_style(self):
|
system_prompt
|
||||||
# if self.prompt_style == PromptStyle.chat.value:
|
if system_prompt
|
||||||
# self.prompt_input = self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
|
else (
|
||||||
# self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
|
"A chat between a curious user and an artificial intelligence assistant. "
|
||||||
# self.response_split = "ASSISTANT:"
|
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self._conversation = Conversation(
|
||||||
|
system=system,
|
||||||
|
roles=["USER", "ASSISTANT"],
|
||||||
|
messages=[],
|
||||||
|
offset=0,
|
||||||
|
sep_style=SeparatorStyle.TWO,
|
||||||
|
sep=" ",
|
||||||
|
sep2=" ",
|
||||||
|
)
|
||||||
|
|
||||||
def build_prompt(self, source) -> Generator[str, None, None]:
|
def build_prompt(self, source) -> Generator[str, None, None]:
|
||||||
# ignore the system prompt if provided
|
# ignore the system prompt if provided
|
||||||
@@ -300,7 +297,7 @@ class ShareGPTPrompter: # pylint: disable=too-few-public-methods
|
|||||||
# also happens on the data splitting leaving empty conversations
|
# also happens on the data splitting leaving empty conversations
|
||||||
raise IndexError
|
raise IndexError
|
||||||
|
|
||||||
conv = conv_vicuna_v1_1.copy()
|
conv = self._conversation.copy()
|
||||||
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
|
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
"""Module containing data utilities"""
|
"""Module containing data utilities"""
|
||||||
|
import functools
|
||||||
import logging
|
import logging
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Tuple, Union
|
from typing import List, Tuple, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from datasets import Dataset, DatasetDict, IterableDataset, load_dataset, load_from_disk
|
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
|
||||||
from huggingface_hub import hf_hub_download
|
from huggingface_hub import hf_hub_download
|
||||||
from transformers import PreTrainedTokenizerBase
|
from transformers import PreTrainedTokenizerBase
|
||||||
|
|
||||||
@@ -79,6 +79,13 @@ def load_tokenized_prepared_datasets(
|
|||||||
else:
|
else:
|
||||||
logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
|
logging.info(f"Unable to find prepared dataset in {prepared_ds_path}")
|
||||||
logging.info("Loading raw datasets...")
|
logging.info("Loading raw datasets...")
|
||||||
|
|
||||||
|
if cfg.seed:
|
||||||
|
seed = cfg.seed
|
||||||
|
else:
|
||||||
|
logging.info("No seed provided, using default seed of 42")
|
||||||
|
seed = 42
|
||||||
|
|
||||||
datasets = []
|
datasets = []
|
||||||
# pylint: disable=invalid-name
|
# pylint: disable=invalid-name
|
||||||
for d in cfg.datasets:
|
for d in cfg.datasets:
|
||||||
@@ -95,13 +102,26 @@ def load_tokenized_prepared_datasets(
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
# prefer local dataset, even if hub exists
|
# prefer local dataset, even if hub exists
|
||||||
if Path(d.path).exists():
|
local_path = Path(d.path)
|
||||||
ds = load_dataset(
|
if local_path.exists():
|
||||||
"json",
|
if local_path.is_dir():
|
||||||
data_files=d.path,
|
ds = load_dataset(
|
||||||
streaming=False,
|
d.path,
|
||||||
split=None,
|
data_files=d.data_files,
|
||||||
)
|
streaming=False,
|
||||||
|
split=None,
|
||||||
|
)
|
||||||
|
elif local_path.is_file():
|
||||||
|
ds = load_dataset(
|
||||||
|
"json",
|
||||||
|
data_files=d.path,
|
||||||
|
streaming=False,
|
||||||
|
split=None,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"unhandled dataset load: local path exists, but is neither a directory or a file"
|
||||||
|
)
|
||||||
elif ds_from_hub:
|
elif ds_from_hub:
|
||||||
if d.data_files:
|
if d.data_files:
|
||||||
ds = load_dataset(
|
ds = load_dataset(
|
||||||
@@ -128,11 +148,11 @@ def load_tokenized_prepared_datasets(
|
|||||||
# support for using a subset of the data
|
# support for using a subset of the data
|
||||||
if d.shards:
|
if d.shards:
|
||||||
if "train" in ds:
|
if "train" in ds:
|
||||||
ds = ds.shuffle(seed=42)["train"].shard(
|
ds = ds.shuffle(seed=seed)["train"].shard(
|
||||||
num_shards=d.shards, index=0
|
num_shards=d.shards, index=0
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
ds = ds.shuffle(seed=42).shard(num_shards=d.shards, index=0)
|
ds = ds.shuffle(seed=seed).shard(num_shards=d.shards, index=0)
|
||||||
d_type = d.type
|
d_type = d.type
|
||||||
d_type_split = d_type.split(":")
|
d_type_split = d_type.split(":")
|
||||||
d_base_type = d_type_split[0]
|
d_base_type = d_type_split[0]
|
||||||
@@ -233,14 +253,21 @@ def load_tokenized_prepared_datasets(
|
|||||||
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
|
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds)
|
||||||
datasets.append(ds_wrapper)
|
datasets.append(ds_wrapper)
|
||||||
else:
|
else:
|
||||||
logging.error(f"unhandled prompt tokenization strategy: {d.type}")
|
suffix = ""
|
||||||
raise ValueError(f"unhandled prompt tokenization strategy: {d.type}")
|
if ":load_" in d.type:
|
||||||
|
suffix = f" Did you mean {d.type.replace(':load_', '.load_')}?"
|
||||||
|
logging.error(
|
||||||
|
f"unhandled prompt tokenization strategy: {d.type}. {suffix}"
|
||||||
|
)
|
||||||
|
raise ValueError(
|
||||||
|
f"unhandled prompt tokenization strategy: {d.type} {suffix}"
|
||||||
|
)
|
||||||
logging.info("tokenizing, merging, and shuffling master dataset")
|
logging.info("tokenizing, merging, and shuffling master dataset")
|
||||||
|
|
||||||
samples: List[int] = []
|
samples: List[int] = []
|
||||||
for d in datasets:
|
for d in datasets:
|
||||||
samples = samples + list(d)
|
samples = samples + list(d)
|
||||||
dataset = Dataset.from_list(samples).shuffle(seed=42)
|
dataset = Dataset.from_list(samples).shuffle(seed=seed)
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
logging.info(
|
logging.info(
|
||||||
f"Saving merged prepared dataset to disk... {prepared_ds_path}"
|
f"Saving merged prepared dataset to disk... {prepared_ds_path}"
|
||||||
@@ -392,32 +419,116 @@ def load_prepare_datasets(
|
|||||||
return train_dataset, eval_dataset
|
return train_dataset, eval_dataset
|
||||||
|
|
||||||
|
|
||||||
class PretrainingDatasetWrapper(IterableDataset):
|
def encode_pretraining(tokenizer, max_tokens, examples):
|
||||||
"""
|
res = tokenizer(
|
||||||
Wrapper for pretraining dataset that avoids loading the dataset into memory
|
examples["text"],
|
||||||
"""
|
truncation=True,
|
||||||
|
max_length=max_tokens - 2,
|
||||||
|
add_special_tokens=True,
|
||||||
|
)
|
||||||
|
# Convert to PyTorch tensors
|
||||||
|
input_ids = [torch.tensor(seq) for seq in res["input_ids"]]
|
||||||
|
attention_mask = [torch.tensor(seq) for seq in res["attention_mask"]]
|
||||||
|
new_input_ids = []
|
||||||
|
new_attention_mask = []
|
||||||
|
# Append EOS and PAD tokens to input_ids, and correct attention_mask
|
||||||
|
for i, _ in enumerate(input_ids):
|
||||||
|
input_ids[i] = torch.cat(
|
||||||
|
(
|
||||||
|
input_ids[i],
|
||||||
|
torch.tensor([tokenizer.eos_token_id, tokenizer.pad_token_id]),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
attention_mask[i] = torch.cat((attention_mask[i], torch.tensor([1, 0])), dim=0)
|
||||||
|
|
||||||
def __init__(self, tokenizer, dataset_path, max_tokens=2048):
|
# Concatenate tokens so that their lengths are less than max_tokens
|
||||||
self.tokenizer = tokenizer
|
buffer_input_ids = torch.tensor([], dtype=torch.long)
|
||||||
self.dataset_path = dataset_path
|
buffer_attention_mask = torch.tensor([], dtype=torch.long)
|
||||||
self.max_tokens = max_tokens
|
|
||||||
|
|
||||||
def __iter__(self):
|
for ids, mask in zip(input_ids, attention_mask):
|
||||||
buffer = []
|
if buffer_input_ids.numel() == max_tokens:
|
||||||
for sample in load_dataset(
|
new_input_ids.append(buffer_input_ids)
|
||||||
self.dataset_path,
|
new_attention_mask.append(buffer_attention_mask)
|
||||||
)["train"].shuffle():
|
buffer_input_ids = torch.tensor([], dtype=torch.long)
|
||||||
buffer += self.tokenizer(sample["text"])["input_ids"]
|
buffer_attention_mask = torch.tensor([], dtype=torch.long)
|
||||||
buffer += [self.tokenizer.eos_token_id]
|
buffer_input_ids = torch.cat((buffer_input_ids, ids), dim=0)
|
||||||
while len(buffer) > self.max_tokens:
|
buffer_attention_mask = torch.cat((buffer_attention_mask, mask), dim=0)
|
||||||
input_ids = torch.tensor(buffer[: self.max_tokens])
|
elif buffer_input_ids.numel() + ids.numel() <= max_tokens:
|
||||||
yield {
|
buffer_input_ids = torch.cat((buffer_input_ids, ids), dim=0)
|
||||||
"input_ids": input_ids,
|
buffer_attention_mask = torch.cat((buffer_attention_mask, mask), dim=0)
|
||||||
"attention_mask": torch.ones(input_ids.size()),
|
else:
|
||||||
"labels": input_ids,
|
buffer_input_ids = torch.cat(
|
||||||
}
|
(
|
||||||
buffer = buffer[self.max_tokens :]
|
buffer_input_ids,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_input_ids.numel(),),
|
||||||
|
tokenizer.pad_token_id,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
buffer_attention_mask = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_attention_mask,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_attention_mask.numel(),),
|
||||||
|
0,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
new_input_ids.append(buffer_input_ids)
|
||||||
|
new_attention_mask.append(buffer_attention_mask)
|
||||||
|
buffer_input_ids = torch.tensor([], dtype=torch.long)
|
||||||
|
buffer_attention_mask = torch.tensor([], dtype=torch.long)
|
||||||
|
|
||||||
|
buffer_input_ids = torch.cat((buffer_input_ids, ids), dim=0)
|
||||||
|
buffer_attention_mask = torch.cat((buffer_attention_mask, mask), dim=0)
|
||||||
|
|
||||||
|
if buffer_input_ids.numel() > 0: # for any leftover tokens
|
||||||
|
while buffer_input_ids.numel() < max_tokens: # make all sequences equal in size
|
||||||
|
buffer_input_ids = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_input_ids,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_input_ids.numel(),),
|
||||||
|
tokenizer.pad_token_id,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
buffer_attention_mask = torch.cat(
|
||||||
|
(
|
||||||
|
buffer_attention_mask,
|
||||||
|
torch.full(
|
||||||
|
(max_tokens - buffer_attention_mask.numel(),),
|
||||||
|
0,
|
||||||
|
dtype=torch.long,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
new_input_ids.append(buffer_input_ids)
|
||||||
|
new_attention_mask.append(buffer_attention_mask)
|
||||||
|
|
||||||
|
ret = {
|
||||||
|
"input_ids": [seq.tolist() for seq in new_input_ids],
|
||||||
|
"labels": [seq.tolist() for seq in new_input_ids],
|
||||||
|
"attention_mask": [seq.tolist() for seq in new_attention_mask],
|
||||||
|
}
|
||||||
|
|
||||||
|
logging.debug(len(ret["input_ids"]))
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def load_pretraining_dataset(path, tokenizer, max_tokens=2048):
|
def load_pretraining_dataset(path, tokenizer, max_tokens=2048, seed=42):
|
||||||
return PretrainingDatasetWrapper(tokenizer, path, max_tokens=max_tokens)
|
encode = functools.partial(encode_pretraining, tokenizer, max_tokens)
|
||||||
|
dataset = load_dataset(path, streaming=True, split="train")
|
||||||
|
dataset = dataset.shuffle(seed=seed, buffer_size=10_000)
|
||||||
|
# TODO dynamically figure out which columns/features to remove
|
||||||
|
dataset = dataset.map(encode, batched=True, remove_columns=["text", "meta"])
|
||||||
|
return dataset
|
||||||
|
|||||||
@@ -11,22 +11,16 @@ import bitsandbytes as bnb
|
|||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from optimum.bettertransformer import BetterTransformer
|
from optimum.bettertransformer import BetterTransformer
|
||||||
from transformers import PreTrainedModel # noqa: F401
|
from transformers import ( # noqa: F401
|
||||||
from transformers import (
|
|
||||||
AutoConfig,
|
AutoConfig,
|
||||||
AutoModelForCausalLM,
|
AutoModelForCausalLM,
|
||||||
AutoTokenizer,
|
AutoTokenizer,
|
||||||
BitsAndBytesConfig,
|
BitsAndBytesConfig,
|
||||||
LlamaConfig,
|
LlamaConfig,
|
||||||
|
PreTrainedModel,
|
||||||
|
PreTrainedTokenizerBase,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
|
||||||
from transformers import LlamaForCausalLM
|
|
||||||
except ImportError:
|
|
||||||
logging.warning(
|
|
||||||
"This version of transformers does not support Llama. Consider upgrading."
|
|
||||||
)
|
|
||||||
|
|
||||||
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
|
from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@@ -40,15 +34,20 @@ def load_tokenizer(
|
|||||||
tokenizer_type,
|
tokenizer_type,
|
||||||
cfg,
|
cfg,
|
||||||
):
|
):
|
||||||
|
use_fast = True # this is the default
|
||||||
|
if cfg.tokenizer_use_fast is not None:
|
||||||
|
use_fast = cfg.tokenizer_use_fast
|
||||||
if tokenizer_type:
|
if tokenizer_type:
|
||||||
tokenizer = getattr(transformers, tokenizer_type).from_pretrained(
|
tokenizer = getattr(transformers, tokenizer_type).from_pretrained(
|
||||||
tokenizer_config,
|
tokenizer_config,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
|
use_fast=use_fast,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
tokenizer_config,
|
tokenizer_config,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
|
use_fast=use_fast,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
|
logging.debug(f"EOS: {tokenizer.eos_token_id} / {tokenizer.eos_token}")
|
||||||
@@ -76,45 +75,58 @@ def load_tokenizer(
|
|||||||
|
|
||||||
|
|
||||||
def load_model(
|
def load_model(
|
||||||
base_model,
|
base_model, base_model_config, model_type, tokenizer, cfg, adapter="lora"
|
||||||
base_model_config,
|
|
||||||
model_type,
|
|
||||||
tokenizer,
|
|
||||||
cfg,
|
|
||||||
adapter="lora",
|
|
||||||
inference=False,
|
|
||||||
):
|
):
|
||||||
# type: (str, str, str, str, DictDefault, Optional[str], bool) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
|
# type: (str, str, str, PreTrainedTokenizerBase, DictDefault, Optional[str]) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
|
||||||
"""
|
"""
|
||||||
Load a model from a base model and a model type.
|
Load a model from a base model and a model type.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO refactor as a kwarg
|
# TODO refactor as a kwarg
|
||||||
load_in_8bit = cfg.load_in_8bit
|
load_in_8bit = cfg.load_in_8bit
|
||||||
is_llama_derived_model = "llama" in base_model or (
|
cfg.is_llama_derived_model = "llama" in base_model or (
|
||||||
cfg.model_type and "llama" in cfg.model_type.lower()
|
cfg.model_type and "llama" in cfg.model_type.lower()
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_llama_derived_model and cfg.flash_attention:
|
if cfg.is_llama_derived_model and cfg.flash_attention:
|
||||||
if cfg.device not in ["mps", "cpu"] and inference is False:
|
if cfg.device not in ["mps", "cpu"] and not cfg.inference:
|
||||||
from axolotl.flash_attn import replace_llama_attn_with_flash_attn
|
from axolotl.flash_attn import replace_llama_attn_with_flash_attn
|
||||||
|
|
||||||
logging.info("patching with flash attention")
|
logging.info("patching with flash attention")
|
||||||
replace_llama_attn_with_flash_attn()
|
replace_llama_attn_with_flash_attn()
|
||||||
elif is_llama_derived_model and cfg.xformers_attention:
|
elif cfg.is_llama_derived_model and cfg.xformers_attention:
|
||||||
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
||||||
hijack_llama_attention,
|
hijack_llama_attention,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("patching with xformers attention")
|
logging.info("patching with xformers attention")
|
||||||
hijack_llama_attention()
|
hijack_llama_attention()
|
||||||
elif is_llama_derived_model and cfg.sdp_attention:
|
elif cfg.is_llama_derived_model and cfg.sdp_attention:
|
||||||
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
from axolotl.monkeypatch.llama_attn_hijack_xformers import (
|
||||||
hijack_llama_sdp_attention,
|
hijack_llama_sdp_attention,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("patching with sdp attention")
|
logging.info("patching with sdp attention")
|
||||||
hijack_llama_sdp_attention()
|
hijack_llama_sdp_attention()
|
||||||
|
elif cfg.is_llama_derived_model and cfg.landmark_attention:
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import (
|
||||||
|
MEM_TOKEN,
|
||||||
|
patch_llama_with_landmark_attn,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info("patching with landmark attention")
|
||||||
|
patch_llama_with_landmark_attn()
|
||||||
|
|
||||||
|
# Note: This might overwrite previous additional_special_tokens
|
||||||
|
tokenizer.add_special_tokens({"additional_special_tokens": [MEM_TOKEN]})
|
||||||
|
|
||||||
|
if cfg.is_llama_derived_model and cfg.xpos_rope:
|
||||||
|
from axolotl.monkeypatch.xpos_rope_llama_monkey_patch import (
|
||||||
|
replace_llama_rope_with_xpos_rope,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info("patching with xpos rope")
|
||||||
|
replace_llama_rope_with_xpos_rope()
|
||||||
|
|
||||||
if cfg.bf16 or cfg.bfloat16:
|
if cfg.bf16 or cfg.bfloat16:
|
||||||
torch_dtype = torch.bfloat16
|
torch_dtype = torch.bfloat16
|
||||||
@@ -129,12 +141,21 @@ def load_model(
|
|||||||
)
|
)
|
||||||
|
|
||||||
replace_peft_model_with_int4_lora_model()
|
replace_peft_model_with_int4_lora_model()
|
||||||
from peft import prepare_model_for_int8_training
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logging.exception(err)
|
logging.exception(err)
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
|
try:
|
||||||
|
from peft import prepare_model_for_kbit_training
|
||||||
|
except ImportError:
|
||||||
|
# For backward compatibility
|
||||||
|
from peft import (
|
||||||
|
prepare_model_for_int8_training as prepare_model_for_kbit_training,
|
||||||
|
)
|
||||||
|
|
||||||
model_kwargs = {}
|
model_kwargs = {}
|
||||||
|
if cfg.model_revision:
|
||||||
|
model_kwargs["revision"] = cfg.model_revision
|
||||||
if cfg.adapter == "qlora" and cfg.load_in_4bit:
|
if cfg.adapter == "qlora" and cfg.load_in_4bit:
|
||||||
model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
model_kwargs["quantization_config"] = BitsAndBytesConfig(
|
||||||
load_in_4bit=True,
|
load_in_4bit=True,
|
||||||
@@ -145,7 +166,7 @@ def load_model(
|
|||||||
bnb_4bit_quant_type="nf4",
|
bnb_4bit_quant_type="nf4",
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
if cfg.gptq and is_llama_derived_model:
|
if cfg.gptq and cfg.is_llama_derived_model:
|
||||||
from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
|
from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
|
||||||
from huggingface_hub import snapshot_download
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
@@ -183,7 +204,9 @@ def load_model(
|
|||||||
else True,
|
else True,
|
||||||
)
|
)
|
||||||
load_in_8bit = False
|
load_in_8bit = False
|
||||||
elif is_llama_derived_model and "LlamaForCausalLM" in globals():
|
elif cfg.is_llama_derived_model and not cfg.trust_remote_code:
|
||||||
|
from transformers import LlamaForCausalLM
|
||||||
|
|
||||||
config = LlamaConfig.from_pretrained(base_model_config)
|
config = LlamaConfig.from_pretrained(base_model_config)
|
||||||
model = LlamaForCausalLM.from_pretrained(
|
model = LlamaForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
@@ -220,7 +243,7 @@ def load_model(
|
|||||||
# device=cfg.device,
|
# device=cfg.device,
|
||||||
# )
|
# )
|
||||||
# model.train() # sets to train instead of eval mode
|
# model.train() # sets to train instead of eval mode
|
||||||
elif model_type:
|
elif model_type and not cfg.trust_remote_code:
|
||||||
model = getattr(transformers, model_type).from_pretrained(
|
model = getattr(transformers, model_type).from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
@@ -235,6 +258,22 @@ def load_model(
|
|||||||
base_model,
|
base_model,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
)
|
)
|
||||||
|
# Shouldn't be a problem most of the time. will obviously error if the model doesn't support this
|
||||||
|
# when training starts
|
||||||
|
if (
|
||||||
|
hasattr(config, "max_seq_len")
|
||||||
|
and config.max_seq_len
|
||||||
|
and cfg.sequence_len > config.max_seq_len
|
||||||
|
):
|
||||||
|
config.max_seq_len = cfg.sequence_len
|
||||||
|
logging.warning(f"increasing context length to {cfg.sequence_len}")
|
||||||
|
elif (
|
||||||
|
hasattr(config, "max_sequence_length")
|
||||||
|
and config.max_sequence_length
|
||||||
|
and cfg.sequence_len > config.max_sequence_length
|
||||||
|
):
|
||||||
|
config.max_sequence_length = cfg.sequence_len
|
||||||
|
logging.warning(f"increasing context length to {cfg.sequence_len}")
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
config=config,
|
config=config,
|
||||||
@@ -253,6 +292,7 @@ def load_model(
|
|||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
torch_dtype=torch_dtype,
|
torch_dtype=torch_dtype,
|
||||||
device_map=cfg.device_map,
|
device_map=cfg.device_map,
|
||||||
trust_remote_code=cfg.trust_remote_code or False,
|
trust_remote_code=cfg.trust_remote_code or False,
|
||||||
@@ -262,7 +302,11 @@ def load_model(
|
|||||||
embeddings_len = math.ceil(len(tokenizer) / 32) * 32
|
embeddings_len = math.ceil(len(tokenizer) / 32) * 32
|
||||||
model.resize_token_embeddings(embeddings_len)
|
model.resize_token_embeddings(embeddings_len)
|
||||||
|
|
||||||
if cfg.sequence_len >= model.config.max_position_embeddings:
|
if (
|
||||||
|
hasattr(model.config, "max_position_embeddings")
|
||||||
|
and model.config.max_position_embeddings
|
||||||
|
and cfg.sequence_len >= model.config.max_position_embeddings
|
||||||
|
):
|
||||||
logging.warning(
|
logging.warning(
|
||||||
f"increasing model.config.max_position_embeddings to {cfg.sequence_len}"
|
f"increasing model.config.max_position_embeddings to {cfg.sequence_len}"
|
||||||
)
|
)
|
||||||
@@ -272,8 +316,10 @@ def load_model(
|
|||||||
(cfg.adapter == "lora" and load_in_8bit)
|
(cfg.adapter == "lora" and load_in_8bit)
|
||||||
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
|
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
|
||||||
):
|
):
|
||||||
logging.info("converting PEFT model w/ prepare_model_for_int8_training")
|
logging.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
||||||
model = prepare_model_for_int8_training(model)
|
model = prepare_model_for_kbit_training(
|
||||||
|
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
||||||
|
)
|
||||||
|
|
||||||
model, lora_config = load_adapter(model, cfg, adapter)
|
model, lora_config = load_adapter(model, cfg, adapter)
|
||||||
|
|
||||||
@@ -346,7 +392,6 @@ def load_llama_adapter(model, cfg):
|
|||||||
model = PeftModel.from_pretrained(
|
model = PeftModel.from_pretrained(
|
||||||
model,
|
model,
|
||||||
cfg.lora_model_dir,
|
cfg.lora_model_dir,
|
||||||
device_map=cfg.device_map,
|
|
||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@@ -408,8 +453,7 @@ def load_lora(model, cfg):
|
|||||||
model = PeftModel.from_pretrained(
|
model = PeftModel.from_pretrained(
|
||||||
model,
|
model,
|
||||||
cfg.lora_model_dir,
|
cfg.lora_model_dir,
|
||||||
device_map=cfg.device_map,
|
is_trainable=not cfg.inference,
|
||||||
# torch_dtype=torch.float16,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
model = get_peft_model(model, lora_config)
|
model = get_peft_model(model, lora_config)
|
||||||
|
|||||||
173
src/axolotl/utils/sampler.py
Normal file
173
src/axolotl/utils/sampler.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
# pylint: skip-file
|
||||||
|
|
||||||
|
from typing import Any, List, Optional
|
||||||
|
|
||||||
|
import numba
|
||||||
|
import numpy as np
|
||||||
|
import torch.distributed as dist
|
||||||
|
from torch.utils.data import Sampler
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def ffd_check(a: np.ndarray, c: int, n: int):
|
||||||
|
# First-fit-decreasing bin packing
|
||||||
|
# Check if a[] could fit in n bins with capacity c
|
||||||
|
# https://en.wikipedia.org/wiki/First-fit-decreasing_bin_packing
|
||||||
|
|
||||||
|
a = np.sort(a)[::-1]
|
||||||
|
bins = np.full((n,), c, dtype=a.dtype)
|
||||||
|
for size in a:
|
||||||
|
not_found = True
|
||||||
|
for idx in range(n):
|
||||||
|
if bins[idx] >= size:
|
||||||
|
bins[idx] -= size
|
||||||
|
not_found = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if not_found:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def ffd_with_result(a: np.ndarray, c: int, start_index: int):
|
||||||
|
# First-fit-decreasing bin packing (with result return)
|
||||||
|
|
||||||
|
indices = np.argsort(a)[::-1]
|
||||||
|
a = a[indices]
|
||||||
|
|
||||||
|
bins: List[int] = []
|
||||||
|
bins_result: List[Any] = []
|
||||||
|
for a_id, size in enumerate(a):
|
||||||
|
add_new = True
|
||||||
|
for idx in range(len(bins)):
|
||||||
|
if bins[idx] >= size:
|
||||||
|
bins[idx] -= size
|
||||||
|
bins_result[idx].append(indices[a_id] + start_index)
|
||||||
|
add_new = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if add_new:
|
||||||
|
bins.append(c - size)
|
||||||
|
bins_result.append([indices[a_id] + start_index])
|
||||||
|
|
||||||
|
return bins_result
|
||||||
|
|
||||||
|
|
||||||
|
@numba.njit
|
||||||
|
def allocate(
|
||||||
|
lengths: np.ndarray, lengths_cumsum: np.ndarray, rank: int, c: int, n: int
|
||||||
|
):
|
||||||
|
# Dynamic batch allocator, similar to Multifit
|
||||||
|
# https://en.wikipedia.org/wiki/Multifit_algorithm
|
||||||
|
# ~99.5% efficiency on OpenChat training set (12 * 2048 ctx len)
|
||||||
|
|
||||||
|
s = 0
|
||||||
|
start_index = 0
|
||||||
|
result = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# binary search [l, r)
|
||||||
|
left = 1
|
||||||
|
right = 1 + np.searchsorted(lengths_cumsum[start_index:], s + c * n, "right")
|
||||||
|
|
||||||
|
while right - left > 1:
|
||||||
|
m = (left + right) // 2
|
||||||
|
if ffd_check(lengths[start_index : start_index + m], c, n):
|
||||||
|
left = m
|
||||||
|
else:
|
||||||
|
right = m
|
||||||
|
|
||||||
|
# use length l
|
||||||
|
batch = ffd_with_result(
|
||||||
|
lengths[start_index : start_index + left], c, start_index
|
||||||
|
)
|
||||||
|
assert len(batch) <= n
|
||||||
|
if len(batch) < n:
|
||||||
|
break
|
||||||
|
|
||||||
|
start_index += left
|
||||||
|
s = lengths_cumsum[start_index - 1]
|
||||||
|
|
||||||
|
# add local rank
|
||||||
|
result.append(batch[rank])
|
||||||
|
|
||||||
|
return result, s, len(result) * c * n
|
||||||
|
|
||||||
|
|
||||||
|
class MultipackDistributedBatchSampler(Sampler):
|
||||||
|
"""Unpadded length sampling using Multipack.
|
||||||
|
Approximate (at most ~1.22x) the optimal solution of the identical-machines scheduling problem, which is NP-hard.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
batch_max_length: int,
|
||||||
|
lengths: List[int],
|
||||||
|
num_replicas: Optional[int] = None,
|
||||||
|
rank: Optional[int] = None,
|
||||||
|
seed: int = 0,
|
||||||
|
):
|
||||||
|
# Get rank
|
||||||
|
if num_replicas is None:
|
||||||
|
if not dist.is_available():
|
||||||
|
raise RuntimeError("Requires distributed package to be available")
|
||||||
|
num_replicas = dist.get_world_size()
|
||||||
|
if rank is None:
|
||||||
|
if not dist.is_available():
|
||||||
|
raise RuntimeError("Requires distributed package to be available")
|
||||||
|
rank = dist.get_rank()
|
||||||
|
|
||||||
|
self.num_replicas = num_replicas
|
||||||
|
self.rank = rank
|
||||||
|
self.seed = seed
|
||||||
|
|
||||||
|
self.batch_max_length = batch_max_length
|
||||||
|
self.lengths = lengths
|
||||||
|
assert isinstance(self.lengths, np.ndarray)
|
||||||
|
|
||||||
|
self.epoch = 0
|
||||||
|
|
||||||
|
# statistics
|
||||||
|
self.eff_total_used = 0
|
||||||
|
self.eff_total_slots = 0
|
||||||
|
|
||||||
|
def set_epoch(self, epoch: int):
|
||||||
|
self.epoch = epoch
|
||||||
|
|
||||||
|
def generate_batches(self, set_stats=False):
|
||||||
|
indices = np.random.default_rng(seed=self.seed + self.epoch).permutation(
|
||||||
|
len(self.lengths)
|
||||||
|
)
|
||||||
|
|
||||||
|
lengths = self.lengths[indices]
|
||||||
|
lengths_cumsum = np.cumsum(lengths)
|
||||||
|
|
||||||
|
batches, total_used, total_slots = allocate(
|
||||||
|
lengths=lengths,
|
||||||
|
lengths_cumsum=lengths_cumsum,
|
||||||
|
rank=self.rank,
|
||||||
|
c=self.batch_max_length,
|
||||||
|
n=self.num_replicas,
|
||||||
|
)
|
||||||
|
|
||||||
|
batches = [indices[batch] for batch in batches]
|
||||||
|
|
||||||
|
# statistics
|
||||||
|
if set_stats:
|
||||||
|
self.eff_total_used += total_used
|
||||||
|
self.eff_total_slots += total_slots
|
||||||
|
|
||||||
|
return batches
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
batches = self.generate_batches(set_stats=True)
|
||||||
|
return iter(batches)
|
||||||
|
|
||||||
|
def num_batches(self):
|
||||||
|
batches = self.generate_batches()
|
||||||
|
return len(batches)
|
||||||
|
|
||||||
|
def efficiency(self):
|
||||||
|
return self.eff_total_used / self.eff_total_slots
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
"""Module for custom LRScheduler class"""
|
"""Module for custom LRScheduler class"""
|
||||||
|
import math
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
from torch.optim.lr_scheduler import LRScheduler
|
from torch.optim import Optimizer
|
||||||
|
from torch.optim.lr_scheduler import LambdaLR, LRScheduler
|
||||||
|
|
||||||
|
|
||||||
class InterpolatingLogScheduler(LRScheduler):
|
class InterpolatingLogScheduler(LRScheduler):
|
||||||
@@ -42,3 +45,58 @@ class InterpolatingLogScheduler(LRScheduler):
|
|||||||
lrs = [self.max_lr for base_lr in self.base_lrs]
|
lrs = [self.max_lr for base_lr in self.base_lrs]
|
||||||
|
|
||||||
return lrs
|
return lrs
|
||||||
|
|
||||||
|
|
||||||
|
def _get_cosine_schedule_with_quadratic_warmup_lr_lambda(
|
||||||
|
current_step: int,
|
||||||
|
*,
|
||||||
|
num_warmup_steps: int,
|
||||||
|
num_training_steps: int,
|
||||||
|
num_cycles: float
|
||||||
|
):
|
||||||
|
if current_step < num_warmup_steps:
|
||||||
|
return (float(current_step) / float(max(1, num_warmup_steps))) ** 2
|
||||||
|
progress = float(current_step - num_warmup_steps) / float(
|
||||||
|
max(1, num_training_steps - num_warmup_steps)
|
||||||
|
)
|
||||||
|
return max(
|
||||||
|
0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_cosine_schedule_with_quadratic_warmup(
|
||||||
|
optimizer: Optimizer,
|
||||||
|
num_warmup_steps: int,
|
||||||
|
num_training_steps: int,
|
||||||
|
num_cycles: float = 0.5,
|
||||||
|
last_epoch: int = -1,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create a schedule with a learning rate that decreases following the values of the cosine function between the
|
||||||
|
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
|
||||||
|
initial lr set in the optimizer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
optimizer ([`~torch.optim.Optimizer`]):
|
||||||
|
The optimizer for which to schedule the learning rate.
|
||||||
|
num_warmup_steps (`int`):
|
||||||
|
The number of steps for the warmup phase.
|
||||||
|
num_training_steps (`int`):
|
||||||
|
The total number of training steps.
|
||||||
|
num_cycles (`float`, *optional*, defaults to 0.5):
|
||||||
|
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
|
||||||
|
following a half-cosine).
|
||||||
|
last_epoch (`int`, *optional*, defaults to -1):
|
||||||
|
The index of the last epoch when resuming training.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lr_lambda = partial(
|
||||||
|
_get_cosine_schedule_with_quadratic_warmup_lr_lambda,
|
||||||
|
num_warmup_steps=num_warmup_steps,
|
||||||
|
num_training_steps=num_training_steps,
|
||||||
|
num_cycles=num_cycles,
|
||||||
|
)
|
||||||
|
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
||||||
|
|||||||
@@ -34,3 +34,5 @@ def check_example_labels(example, tokenizer):
|
|||||||
|
|
||||||
logging.info(" ".join(colored_tokens))
|
logging.info(" ".join(colored_tokens))
|
||||||
logging.info("\n\n\n")
|
logging.info("\n\n\n")
|
||||||
|
|
||||||
|
return " ".join(colored_tokens)
|
||||||
|
|||||||
@@ -5,25 +5,185 @@ import logging
|
|||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from dataclasses import dataclass, field
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import bitsandbytes as bnb
|
import bitsandbytes as bnb
|
||||||
|
import numpy as np
|
||||||
import torch.cuda
|
import torch.cuda
|
||||||
import transformers
|
import transformers
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.optim.lr_scheduler import OneCycleLR
|
from torch.optim.lr_scheduler import OneCycleLR
|
||||||
from transformers import EarlyStoppingCallback, Trainer
|
from torch.utils.data import Dataset
|
||||||
|
from transformers import EarlyStoppingCallback, Trainer, TrainingArguments
|
||||||
from transformers.trainer_pt_utils import get_parameter_names
|
from transformers.trainer_pt_utils import get_parameter_names
|
||||||
|
|
||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
SaveBetterTransformerModelCallback,
|
SaveBetterTransformerModelCallback,
|
||||||
SavePeftModelCallback,
|
SavePeftModelCallback,
|
||||||
)
|
)
|
||||||
from axolotl.utils.schedulers import InterpolatingLogScheduler
|
from axolotl.utils.sampler import MultipackDistributedBatchSampler
|
||||||
|
from axolotl.utils.schedulers import (
|
||||||
|
InterpolatingLogScheduler,
|
||||||
|
get_cosine_schedule_with_quadratic_warmup,
|
||||||
|
)
|
||||||
|
|
||||||
|
IGNORE_LABEL_ID = -100
|
||||||
|
|
||||||
|
|
||||||
class OneCycleLRSchedulerTrainer(Trainer):
|
def _find_multiple(val1, val2):
|
||||||
|
return (-(val1 // -val2)) * val2
|
||||||
|
|
||||||
|
|
||||||
|
def batch_to_tensor(batch, pad_id=0, dtype=torch.long, loss_dtype=torch.bfloat16):
|
||||||
|
# Pad an unused item to reach multiple of 64, for faster GEMM
|
||||||
|
pad_cur_len = sum(list(batch["length"]))
|
||||||
|
pad_len = _find_multiple(pad_cur_len, 64) - pad_cur_len
|
||||||
|
|
||||||
|
if pad_len > 0:
|
||||||
|
assert pad_len < 64
|
||||||
|
|
||||||
|
batch["input_ids"].append([pad_id] * pad_len)
|
||||||
|
batch["labels"].append([pad_id] * pad_len)
|
||||||
|
batch["attention_mask"].append([0] * pad_len)
|
||||||
|
batch["length"].append(pad_len)
|
||||||
|
|
||||||
|
# seqlen
|
||||||
|
batch_lengths = torch.tensor(list(batch["length"]), dtype=torch.int32, device="cpu")
|
||||||
|
|
||||||
|
max_seqlen = torch.max(batch_lengths)
|
||||||
|
cu_seqlens = torch.nn.functional.pad(
|
||||||
|
batch_lengths.cumsum(-1, dtype=torch.int32), (1, 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
# nz elements
|
||||||
|
nz_num = cu_seqlens[-1]
|
||||||
|
nz_input_ids = torch.zeros((nz_num,), dtype=dtype, pin_memory=True, device="cpu")
|
||||||
|
nz_position_ids = torch.zeros((nz_num,), dtype=dtype, pin_memory=True, device="cpu")
|
||||||
|
nz_shifted_label_ids = torch.zeros(
|
||||||
|
(nz_num,), dtype=dtype, pin_memory=True, device="cpu"
|
||||||
|
)
|
||||||
|
nz_shifted_loss_weights = torch.zeros(
|
||||||
|
(nz_num,), dtype=loss_dtype, pin_memory=True, device="cpu"
|
||||||
|
)
|
||||||
|
|
||||||
|
index = 0
|
||||||
|
for token_list, length, labels_list in zip(
|
||||||
|
batch["input_ids"], batch["length"], batch["labels"]
|
||||||
|
):
|
||||||
|
tokens = torch.tensor(token_list, dtype=dtype, device="cpu")
|
||||||
|
position_ids = torch.arange(length, dtype=dtype, device="cpu")
|
||||||
|
|
||||||
|
# Input IDs & shifted labels
|
||||||
|
# shifted_label_ids = torch.where(masks, tokens, IGNORE_LABEL_ID)
|
||||||
|
shifted_label_ids = labels_list
|
||||||
|
shifted_label_ids = torch.nn.functional.pad(
|
||||||
|
shifted_label_ids[1:], (0, 1), "constant", IGNORE_LABEL_ID
|
||||||
|
)
|
||||||
|
|
||||||
|
nz_input_ids[index : index + length] = tokens
|
||||||
|
nz_position_ids[index : index + length] = position_ids
|
||||||
|
nz_shifted_label_ids[index : index + length] = shifted_label_ids
|
||||||
|
|
||||||
|
# Loss weights
|
||||||
|
mask_count = sum(1 for label in labels_list[1:] if label != IGNORE_LABEL_ID)
|
||||||
|
loss_weight = (
|
||||||
|
1 / mask_count if mask_count > 0 else 0
|
||||||
|
) # Avoid division by zero for paddings
|
||||||
|
|
||||||
|
nz_shifted_loss_weights[index : index + length] = loss_weight
|
||||||
|
|
||||||
|
index += length
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
return {
|
||||||
|
"max_seqlen": max_seqlen,
|
||||||
|
"cu_seqlens": cu_seqlens,
|
||||||
|
"nz_input_ids": nz_input_ids,
|
||||||
|
"nz_position_ids": nz_position_ids,
|
||||||
|
"nz_shifted_label_ids": nz_shifted_label_ids,
|
||||||
|
"nz_shifted_loss_weights": nz_shifted_loss_weights,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AxolotlTrainingArguments(TrainingArguments):
|
||||||
|
"""
|
||||||
|
Extend the base TrainingArguments for axolotl helpers
|
||||||
|
"""
|
||||||
|
|
||||||
|
lr_quadratic_warmup: bool = field(
|
||||||
|
default=False,
|
||||||
|
metadata={"help": "Use quadratic warmup for cosine scheduling."},
|
||||||
|
)
|
||||||
|
sample_packing: bool = field(
|
||||||
|
default=True,
|
||||||
|
metadata={"help": "Use sample packing for efficient training."},
|
||||||
|
)
|
||||||
|
max_seq_length: int = field(
|
||||||
|
default=2048,
|
||||||
|
metadata={"help": "The maximum sequence length the model can handle"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AxolotlTrainer(Trainer):
|
||||||
|
"""
|
||||||
|
Extend the base Trainer for axolotl helpers
|
||||||
|
"""
|
||||||
|
|
||||||
|
args = None # type: AxolotlTrainingArguments
|
||||||
|
|
||||||
|
def create_scheduler(
|
||||||
|
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
|
||||||
|
passed as an argument.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
num_training_steps (int): The number of training steps to do.
|
||||||
|
optimizer (torch.optim.Optimizer): The training optimizer
|
||||||
|
"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
if self.lr_scheduler is None: # type: ignore # pylint: disable=access-member-before-definition
|
||||||
|
# fmt: on
|
||||||
|
if (
|
||||||
|
self.args.lr_scheduler_type == "cosine"
|
||||||
|
and self.args.lr_quadratic_warmup is True
|
||||||
|
):
|
||||||
|
self.lr_scheduler = get_cosine_schedule_with_quadratic_warmup( # pylint: disable=attribute-defined-outside-init
|
||||||
|
optimizer,
|
||||||
|
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
|
||||||
|
num_training_steps=num_training_steps,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return super().create_scheduler(num_training_steps, optimizer)
|
||||||
|
return self.lr_scheduler
|
||||||
|
|
||||||
|
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
|
||||||
|
lengths = np.array([len(sample["input_ids"]) for sample in self.train_dataset])
|
||||||
|
return MultipackDistributedBatchSampler(
|
||||||
|
batch_max_length=self.args.per_device_train_batch_size
|
||||||
|
* self.args.max_seq_length,
|
||||||
|
lengths=lengths,
|
||||||
|
seed=self.args.seed,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_eval_sampler(
|
||||||
|
self, eval_dataset: Dataset
|
||||||
|
) -> Optional[torch.utils.data.Sampler]:
|
||||||
|
lengths = np.array([len(sample["input_ids"]) for sample in eval_dataset])
|
||||||
|
return MultipackDistributedBatchSampler(
|
||||||
|
batch_max_length=self.args.per_device_eval_batch_size
|
||||||
|
* self.args.max_seq_length,
|
||||||
|
lengths=lengths,
|
||||||
|
seed=self.args.seed,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OneCycleLRSchedulerTrainer(AxolotlTrainer):
|
||||||
"""
|
"""
|
||||||
Trainer subclass that uses the OneCycleLR scheduler
|
Trainer subclass that uses the OneCycleLR scheduler
|
||||||
"""
|
"""
|
||||||
@@ -66,8 +226,6 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
if cfg.logging_steps is not None
|
if cfg.logging_steps is not None
|
||||||
else max(min(int(0.005 * total_num_steps), 10), 1)
|
else max(min(int(0.005 * total_num_steps), 10), 1)
|
||||||
)
|
)
|
||||||
save_steps = cfg.save_steps
|
|
||||||
eval_steps = cfg.eval_steps
|
|
||||||
|
|
||||||
training_arguments_kwargs = {}
|
training_arguments_kwargs = {}
|
||||||
if cfg.bf16 == "full":
|
if cfg.bf16 == "full":
|
||||||
@@ -78,6 +236,10 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
training_arguments_kwargs["tf32"] = cfg.tf32
|
training_arguments_kwargs["tf32"] = cfg.tf32
|
||||||
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
training_arguments_kwargs["warmup_steps"] = warmup_steps
|
||||||
training_arguments_kwargs["logging_steps"] = logging_steps
|
training_arguments_kwargs["logging_steps"] = logging_steps
|
||||||
|
|
||||||
|
if cfg.seed:
|
||||||
|
training_arguments_kwargs["seed"] = cfg.seed
|
||||||
|
|
||||||
if cfg.gradient_checkpointing:
|
if cfg.gradient_checkpointing:
|
||||||
if cfg.gptq:
|
if cfg.gptq:
|
||||||
from alpaca_lora_4bit.gradient_checkpointing import (
|
from alpaca_lora_4bit.gradient_checkpointing import (
|
||||||
@@ -101,6 +263,9 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
if cfg.fsdp_config:
|
if cfg.fsdp_config:
|
||||||
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
|
training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
|
||||||
|
|
||||||
|
if cfg.lr_quadratic_warmup is not None:
|
||||||
|
training_arguments_kwargs["lr_quadratic_warmup"] = cfg.lr_quadratic_warmup
|
||||||
|
|
||||||
# deepspeed
|
# deepspeed
|
||||||
if (
|
if (
|
||||||
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
|
os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
|
||||||
@@ -113,7 +278,25 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
# TODO search Path("./") for one
|
# TODO search Path("./") for one
|
||||||
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
|
training_arguments_kwargs["deepspeed"] = "./ds_config.json"
|
||||||
|
|
||||||
training_args = transformers.TrainingArguments(
|
if cfg.adam_beta1:
|
||||||
|
training_arguments_kwargs["adam_beta1"] = cfg.adam_beta1
|
||||||
|
if cfg.adam_beta2:
|
||||||
|
training_arguments_kwargs["adam_beta2"] = cfg.adam_beta2
|
||||||
|
if cfg.adam_epsilon:
|
||||||
|
training_arguments_kwargs["adam_epsilon"] = cfg.adam_epsilon
|
||||||
|
if cfg.max_grad_norm:
|
||||||
|
training_arguments_kwargs["max_grad_norm"] = cfg.max_grad_norm
|
||||||
|
|
||||||
|
if cfg.hub_model_id:
|
||||||
|
training_arguments_kwargs["hub_model_id"] = cfg.hub_model_id
|
||||||
|
training_arguments_kwargs["push_to_hub"] = True
|
||||||
|
training_arguments_kwargs["hub_private_repo"] = True
|
||||||
|
|
||||||
|
if cfg.save_safetensors:
|
||||||
|
training_arguments_kwargs["save_safetensors"] = cfg.save_safetensors
|
||||||
|
|
||||||
|
training_args = AxolotlTrainingArguments( # pylint: disable=unexpected-keyword-arg
|
||||||
|
max_steps=total_num_steps * cfg.num_epochs,
|
||||||
per_device_train_batch_size=cfg.micro_batch_size,
|
per_device_train_batch_size=cfg.micro_batch_size,
|
||||||
per_device_eval_batch_size=cfg.eval_batch_size
|
per_device_eval_batch_size=cfg.eval_batch_size
|
||||||
if cfg.eval_batch_size is not None
|
if cfg.eval_batch_size is not None
|
||||||
@@ -123,16 +306,16 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
num_train_epochs=cfg.num_epochs,
|
num_train_epochs=cfg.num_epochs,
|
||||||
learning_rate=cfg.learning_rate,
|
learning_rate=cfg.learning_rate,
|
||||||
evaluation_strategy="steps" if cfg.val_set_size > 0 else "no",
|
evaluation_strategy="steps" if cfg.val_set_size > 0 else "no",
|
||||||
save_strategy="steps" if save_steps else "epoch",
|
save_strategy="steps" if cfg.save_steps else "epoch",
|
||||||
eval_steps=eval_steps if cfg.val_set_size > 0 else None,
|
eval_steps=cfg.eval_steps if cfg.val_set_size > 0 else None,
|
||||||
save_steps=save_steps,
|
save_steps=cfg.save_steps,
|
||||||
output_dir=cfg.output_dir,
|
output_dir=cfg.output_dir,
|
||||||
save_total_limit=3,
|
save_total_limit=3,
|
||||||
load_best_model_at_end=(
|
load_best_model_at_end=(
|
||||||
cfg.load_best_model_at_end is not False
|
cfg.load_best_model_at_end is not False
|
||||||
and cfg.val_set_size > 0
|
and cfg.val_set_size > 0
|
||||||
and save_steps
|
and cfg.save_steps
|
||||||
and save_steps % eval_steps == 0
|
and cfg.save_steps % cfg.eval_steps == 0
|
||||||
and cfg.load_in_8bit is not True
|
and cfg.load_in_8bit is not True
|
||||||
)
|
)
|
||||||
or False,
|
or False,
|
||||||
@@ -230,7 +413,6 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
callbacks.append(SavePeftModelCallback)
|
callbacks.append(SavePeftModelCallback)
|
||||||
|
|
||||||
if hasattr(model, "use_bettertransformer") and model.use_bettertransformer is True:
|
if hasattr(model, "use_bettertransformer") and model.use_bettertransformer is True:
|
||||||
logging.info("Setting up SaveBetterTransformerModelCallback.")
|
|
||||||
callbacks.append(SaveBetterTransformerModelCallback)
|
callbacks.append(SaveBetterTransformerModelCallback)
|
||||||
|
|
||||||
data_collator_kwargs = {
|
data_collator_kwargs = {
|
||||||
@@ -241,10 +423,30 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
|||||||
else:
|
else:
|
||||||
data_collator_kwargs["pad_to_multiple_of"] = 8
|
data_collator_kwargs["pad_to_multiple_of"] = 8
|
||||||
|
|
||||||
|
if cfg.is_llama_derived_model and cfg.landmark_attention:
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from axolotl.monkeypatch.llama_landmark_attn import (
|
||||||
|
add_mem_tokens,
|
||||||
|
get_mem_id,
|
||||||
|
set_model_mem_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
set_model_mem_id(model, tokenizer)
|
||||||
|
|
||||||
|
logging.info("Adding landmark attention tokens to dataset")
|
||||||
|
|
||||||
|
for dataset in [train_dataset, eval_dataset]:
|
||||||
|
dataset = dataset.map(
|
||||||
|
partial(add_mem_tokens, mem_freq=50, mem_id=get_mem_id(tokenizer)),
|
||||||
|
batched=False,
|
||||||
|
num_proc=32,
|
||||||
|
)
|
||||||
|
|
||||||
trainer_cls = (
|
trainer_cls = (
|
||||||
OneCycleLRSchedulerTrainer
|
OneCycleLRSchedulerTrainer
|
||||||
if cfg.lr_scheduler == "one_cycle" and (cfg.fsdp or cfg.adapter == "qlora")
|
if cfg.lr_scheduler == "one_cycle" and (cfg.fsdp or cfg.adapter == "qlora")
|
||||||
else transformers.Trainer
|
else AxolotlTrainer
|
||||||
)
|
)
|
||||||
trainer = trainer_cls(
|
trainer = trainer_cls(
|
||||||
model=model,
|
model=model,
|
||||||
|
|||||||
@@ -10,6 +10,12 @@ def validate_config(cfg):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"please set only one of gradient_accumulation_steps or batch_size"
|
"please set only one of gradient_accumulation_steps or batch_size"
|
||||||
)
|
)
|
||||||
|
if cfg.batch_size:
|
||||||
|
logging.warning(
|
||||||
|
"%s\n%s",
|
||||||
|
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
||||||
|
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
||||||
|
)
|
||||||
if cfg.load_4bit:
|
if cfg.load_4bit:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"cfg.load_4bit parameter has been deprecated and replaced by cfg.gptq"
|
"cfg.load_4bit parameter has been deprecated and replaced by cfg.gptq"
|
||||||
@@ -50,6 +56,14 @@ def validate_config(cfg):
|
|||||||
"Require cfg.hf_use_auth_token to be True for push_dataset_to_hub"
|
"Require cfg.hf_use_auth_token to be True for push_dataset_to_hub"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (cfg.base_model and "falcon" in cfg.base_model.lower()) and cfg.fsdp:
|
||||||
|
raise ValueError("FSDP is not supported for falcon models")
|
||||||
|
|
||||||
|
if (
|
||||||
|
cfg.base_model and "mpt" in cfg.base_model.lower()
|
||||||
|
) and cfg.gradient_checkpointing:
|
||||||
|
raise ValueError("gradient_checkpointing is not supported for MPT models")
|
||||||
|
|
||||||
if cfg.flash_optimum is True:
|
if cfg.flash_optimum is True:
|
||||||
if cfg.adapter:
|
if cfg.adapter:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
@@ -67,6 +81,22 @@ def validate_config(cfg):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"flash_optimum for BetterTransformers may not be used with {torch.__version__}"
|
f"flash_optimum for BetterTransformers may not be used with {torch.__version__}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if cfg.pretraining_dataset and cfg.group_by_length:
|
||||||
|
logging.warning(
|
||||||
|
"You probably want to disable group_by_length as it will force a streamed dataset to download completely."
|
||||||
|
)
|
||||||
|
|
||||||
|
if any([cfg.adam_beta1, cfg.adam_beta2, cfg.adam_epsilon]) and (
|
||||||
|
not cfg.optimizer or "adamw" not in cfg.optimizer
|
||||||
|
):
|
||||||
|
logging.warning("adamw hyperparameters found, but no adamw optimizer set")
|
||||||
|
|
||||||
|
if cfg.push_to_hub_model_id:
|
||||||
|
raise ValueError(
|
||||||
|
"push_to_hub_model_id is deprecated. Please use hub_model_id instead."
|
||||||
|
)
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# MPT 7b
|
# MPT 7b
|
||||||
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
||||||
|
|||||||
@@ -15,3 +15,5 @@ def setup_wandb_env_vars(cfg):
|
|||||||
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
|
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
|
||||||
if cfg.wandb_run_id and len(cfg.wandb_run_id) > 0:
|
if cfg.wandb_run_id and len(cfg.wandb_run_id) > 0:
|
||||||
os.environ["WANDB_RUN_ID"] = cfg.wandb_run_id
|
os.environ["WANDB_RUN_ID"] = cfg.wandb_run_id
|
||||||
|
else:
|
||||||
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
|
|||||||
@@ -6,8 +6,16 @@ from pathlib import Path
|
|||||||
|
|
||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
from axolotl.prompt_tokenizers import ShareGPTPromptTokenizingStrategy
|
from axolotl.prompt_strategies.alpaca_chat import NoSystemPrompter
|
||||||
from axolotl.prompters import ShareGPTPrompter
|
from axolotl.prompt_strategies.alpaca_w_system import (
|
||||||
|
InstructionWSystemPromptTokenizingStrategy,
|
||||||
|
SystemDataPrompter,
|
||||||
|
)
|
||||||
|
from axolotl.prompt_tokenizers import (
|
||||||
|
AlpacaPromptTokenizingStrategy,
|
||||||
|
ShareGPTPromptTokenizingStrategy,
|
||||||
|
)
|
||||||
|
from axolotl.prompters import AlpacaPrompter, PromptStyle, ShareGPTPrompter
|
||||||
|
|
||||||
logging.basicConfig(level="INFO")
|
logging.basicConfig(level="INFO")
|
||||||
|
|
||||||
@@ -29,7 +37,6 @@ class TestPromptTokenizationStrategies(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def test_sharegpt_integration(self):
|
def test_sharegpt_integration(self):
|
||||||
print(Path(__file__).parent)
|
|
||||||
with open(
|
with open(
|
||||||
Path(__file__).parent / "fixtures/conversation.json", encoding="utf-8"
|
Path(__file__).parent / "fixtures/conversation.json", encoding="utf-8"
|
||||||
) as fin:
|
) as fin:
|
||||||
@@ -53,6 +60,79 @@ class TestPromptTokenizationStrategies(unittest.TestCase):
|
|||||||
self.assertEqual(len(example[fields]), len(tokenized_conversation[fields]))
|
self.assertEqual(len(example[fields]), len(tokenized_conversation[fields]))
|
||||||
self.assertEqual(example[fields], tokenized_conversation[fields])
|
self.assertEqual(example[fields], tokenized_conversation[fields])
|
||||||
|
|
||||||
|
def test_no_sys_prompt(self):
|
||||||
|
"""
|
||||||
|
tests the interface between the user and assistant parts
|
||||||
|
"""
|
||||||
|
prompter = NoSystemPrompter()
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
strat = AlpacaPromptTokenizingStrategy(
|
||||||
|
prompter,
|
||||||
|
self.tokenizer,
|
||||||
|
False,
|
||||||
|
2048,
|
||||||
|
)
|
||||||
|
sample = {
|
||||||
|
"instruction": "hello cruel. lorem ipsum dolor sit amet.",
|
||||||
|
"output": "world!",
|
||||||
|
}
|
||||||
|
example = strat.tokenize_prompt(sample)
|
||||||
|
world_idx = example["input_ids"].index(3186)
|
||||||
|
assert example["labels"][world_idx] == 3186
|
||||||
|
assert example["labels"][world_idx - 1] == -100
|
||||||
|
|
||||||
|
def test_alpaca(self):
|
||||||
|
"""
|
||||||
|
tests the interface between the user and assistant parts
|
||||||
|
"""
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
prompter = AlpacaPrompter()
|
||||||
|
strat = AlpacaPromptTokenizingStrategy(
|
||||||
|
prompter,
|
||||||
|
self.tokenizer,
|
||||||
|
False,
|
||||||
|
2048,
|
||||||
|
)
|
||||||
|
sample = {"instruction": "hello!", "output": "Hi! How can I help?"}
|
||||||
|
example = strat.tokenize_prompt(sample)
|
||||||
|
world_idx = example["input_ids"].index(6324)
|
||||||
|
assert example["labels"][world_idx] == 6324
|
||||||
|
assert example["labels"][world_idx - 1] == -100
|
||||||
|
|
||||||
|
|
||||||
|
class InstructionWSystemPromptTokenizingStrategyTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Test class for prompt tokenization strategies with sys prompt from the dataset
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self) -> None:
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||||
|
self.tokenizer.add_special_tokens(
|
||||||
|
{
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"unk_token": "<unk>",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_system_alpaca(self):
|
||||||
|
prompter = SystemDataPrompter(PromptStyle.CHAT.value)
|
||||||
|
strat = InstructionWSystemPromptTokenizingStrategy(
|
||||||
|
prompter,
|
||||||
|
self.tokenizer,
|
||||||
|
False,
|
||||||
|
2048,
|
||||||
|
)
|
||||||
|
sample = {
|
||||||
|
"system": "use cot",
|
||||||
|
"instruction": "hello!",
|
||||||
|
"output": "Hi! How can I help?",
|
||||||
|
}
|
||||||
|
example = strat.tokenize_prompt(sample)
|
||||||
|
assert example["input_ids"][0:3] == [1, 671, 20118] # <s>use cot
|
||||||
|
assert example["input_ids"][3] == 11889 # USER
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -2,7 +2,13 @@
|
|||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
from axolotl.prompters import AlpacaPrompter, PromptStyle
|
from axolotl.prompt_strategies.alpaca_w_system import SystemDataPrompter
|
||||||
|
from axolotl.prompters import (
|
||||||
|
AlpacaPrompter,
|
||||||
|
MultipleChoiceExplainPrompter,
|
||||||
|
PromptStyle,
|
||||||
|
UnpromptedPrompter,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AlpacaPrompterTest(unittest.TestCase):
|
class AlpacaPrompterTest(unittest.TestCase):
|
||||||
@@ -55,3 +61,64 @@ class AlpacaPrompterTest(unittest.TestCase):
|
|||||||
assert "### Response:" not in res
|
assert "### Response:" not in res
|
||||||
assert "USER:" in res
|
assert "USER:" in res
|
||||||
assert "ASSISTANT:" in res
|
assert "ASSISTANT:" in res
|
||||||
|
|
||||||
|
def test_system_prompt(self):
|
||||||
|
prompter = SystemDataPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||||
|
res = next(
|
||||||
|
prompter.build_prompt_w_system(
|
||||||
|
"use cot", "tell me a joke about the following", "alpacas"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert "use cot" in res
|
||||||
|
assert res.startswith("use cot")
|
||||||
|
assert "### Instruction:" not in res
|
||||||
|
assert "### Input:" not in res
|
||||||
|
assert "alpacas" in res
|
||||||
|
assert "### Response:" not in res
|
||||||
|
assert "USER:" in res
|
||||||
|
assert "ASSISTANT:" in res
|
||||||
|
|
||||||
|
|
||||||
|
class UnpromptedPrompterTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Test class for UnpromptedPrompter with no system prompts
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_prompt_style_w_none(self):
|
||||||
|
prompter = UnpromptedPrompter(prompt_style=None)
|
||||||
|
res = next(prompter.build_prompt("tell me a joke"))
|
||||||
|
assert "### Instruction:" in res
|
||||||
|
assert "tell me a joke" in res
|
||||||
|
assert res.startswith("###")
|
||||||
|
|
||||||
|
def test_prompt_style_w_instruct(self):
|
||||||
|
prompter = UnpromptedPrompter(prompt_style=PromptStyle.INSTRUCT.value)
|
||||||
|
res = next(
|
||||||
|
prompter.build_prompt("tell me a joke about the following", "alpacas")
|
||||||
|
)
|
||||||
|
assert "### Instruction:" in res
|
||||||
|
assert "tell me a joke" in res
|
||||||
|
assert res.startswith("###")
|
||||||
|
|
||||||
|
def test_prompt_style_w_chat(self):
|
||||||
|
prompter = UnpromptedPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||||
|
res = next(
|
||||||
|
prompter.build_prompt("tell me a joke about the following", "alpacas")
|
||||||
|
)
|
||||||
|
assert "USER:" in res
|
||||||
|
assert "tell me a joke" in res
|
||||||
|
assert res.startswith("USER:")
|
||||||
|
|
||||||
|
|
||||||
|
class MultipleChoiceExplainPrompterTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Test class for MultipleChoiceExplainPrompter
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_prompt_style_w_chat(self):
|
||||||
|
prompter = MultipleChoiceExplainPrompter(prompt_style=PromptStyle.CHAT.value)
|
||||||
|
res = next(prompter.build_prompt("choose one", "- A\n- B\n- C", "C"))
|
||||||
|
assert "USER:" in res
|
||||||
|
assert "choose one" in res
|
||||||
|
assert "Choose the answer that best answers the question." in res
|
||||||
|
assert "- A\n- B\n- C" in res
|
||||||
|
|||||||
31
tests/test_tokenizers.py
Normal file
31
tests/test_tokenizers.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
"""
|
||||||
|
Test cases for the tokenizer loading
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.models import load_tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class TestTokenizers(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
test class for the load_tokenizer fn
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_default_use_fast(self):
|
||||||
|
cfg = DictDefault({})
|
||||||
|
tokenizer = load_tokenizer("huggyllama/llama-7b", None, cfg)
|
||||||
|
assert "Fast" in tokenizer.__class__.__name__
|
||||||
|
|
||||||
|
def test_dont_use_fast(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"tokenizer_use_fast": False,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
tokenizer = load_tokenizer("huggyllama/llama-7b", None, cfg)
|
||||||
|
assert "Fast" not in tokenizer.__class__.__name__
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
"""Module for testing the validation module"""
|
"""Module for testing the validation module"""
|
||||||
|
|
||||||
|
import logging
|
||||||
import unittest
|
import unittest
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -13,6 +15,12 @@ class ValidationTest(unittest.TestCase):
|
|||||||
Test the validation module
|
Test the validation module
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_caplog: Optional[pytest.LogCaptureFixture] = None
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def inject_fixtures(self, caplog):
|
||||||
|
self._caplog = caplog
|
||||||
|
|
||||||
def test_load_4bit_deprecate(self):
|
def test_load_4bit_deprecate(self):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
@@ -23,6 +31,17 @@ class ValidationTest(unittest.TestCase):
|
|||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_batch_size_unused_warning(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"batch_size": 32,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert "batch_size is not recommended" in self._caplog.records[0].message
|
||||||
|
|
||||||
def test_qlora(self):
|
def test_qlora(self):
|
||||||
base_cfg = DictDefault(
|
base_cfg = DictDefault(
|
||||||
{
|
{
|
||||||
@@ -146,3 +165,151 @@ class ValidationTest(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_falcon_fsdp(self):
|
||||||
|
regex_exp = r".*FSDP is not supported for falcon models.*"
|
||||||
|
|
||||||
|
# Check for lower-case
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "tiiuae/falcon-7b",
|
||||||
|
"fsdp": ["full_shard", "auto_wrap"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
# Check for upper-case
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "Falcon-7b",
|
||||||
|
"fsdp": ["full_shard", "auto_wrap"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "tiiuae/falcon-7b",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_mpt_gradient_checkpointing(self):
|
||||||
|
regex_exp = r".*gradient_checkpointing is not supported for MPT models*"
|
||||||
|
|
||||||
|
# Check for lower-case
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"base_model": "mosaicml/mpt-7b",
|
||||||
|
"gradient_checkpointing": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_flash_optimum(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
"adapter": "lora",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"BetterTransformers probably doesn't work with PEFT adapters"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"probably set bfloat16 or float16" in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
"fp16": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
regex_exp = r".*AMP is not supported.*"
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"flash_optimum": True,
|
||||||
|
"bf16": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
regex_exp = r".*AMP is not supported.*"
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match=regex_exp):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_adamw_hyperparams(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": None,
|
||||||
|
"adam_epsilon": 0.0001,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"adamw hyperparameters found, but no adamw optimizer set"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": "adafactor",
|
||||||
|
"adam_beta1": 0.0001,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"adamw hyperparameters found, but no adamw optimizer set"
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": "adamw_bnb_8bit",
|
||||||
|
"adam_beta1": 0.9,
|
||||||
|
"adam_beta2": 0.99,
|
||||||
|
"adam_epsilon": 0.0001,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"optimizer": "adafactor",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|||||||
Reference in New Issue
Block a user