Compare commits
2 Commits
datasets-3
...
activation
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7610a02881 | ||
|
|
b0cd54bcb9 |
108
.github/workflows/tests.yml
vendored
108
.github/workflows/tests.yml
vendored
@@ -44,98 +44,12 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SKIP: no-commit-to-branch
|
SKIP: no-commit-to-branch
|
||||||
|
|
||||||
preload-cache:
|
|
||||||
name: Preload HF cache
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
python_version: ["3.11"]
|
|
||||||
pytorch_version: ["2.6.0"]
|
|
||||||
timeout-minutes: 20
|
|
||||||
|
|
||||||
env:
|
|
||||||
AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Restore HF cache
|
|
||||||
id: hf-cache-restore
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ runner.os }}-hf-hub-cache-v2
|
|
||||||
|
|
||||||
- name: Setup Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python_version }}
|
|
||||||
cache: 'pip' # caching pip dependencies
|
|
||||||
|
|
||||||
- name: upgrade pip
|
|
||||||
run: |
|
|
||||||
pip3 install --upgrade pip
|
|
||||||
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
|
||||||
|
|
||||||
- name: Install PyTorch
|
|
||||||
run: |
|
|
||||||
pip3 install torch==${{ matrix.pytorch_version }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
pip3 show torch
|
|
||||||
pip3 install --no-build-isolation -U -e .
|
|
||||||
python scripts/unsloth_install.py | sh
|
|
||||||
python scripts/cutcrossentropy_install.py | sh
|
|
||||||
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
|
||||||
|
|
||||||
- name: Make sure PyTorch version wasn't clobbered
|
|
||||||
run: |
|
|
||||||
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
|
||||||
|
|
||||||
- name: Ensure axolotl CLI was installed
|
|
||||||
run: |
|
|
||||||
axolotl --help
|
|
||||||
|
|
||||||
- name: Pre-Download dataset fixture
|
|
||||||
run: |
|
|
||||||
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
pytest -v tests/conftest.py
|
|
||||||
|
|
||||||
- name: Upload coverage to Codecov
|
|
||||||
uses: codecov/codecov-action@v5
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
|
||||||
files: ./coverage.xml
|
|
||||||
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
|
||||||
fail_ci_if_error: false
|
|
||||||
|
|
||||||
- name: cleanup pip cache
|
|
||||||
run: |
|
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
pytest:
|
pytest:
|
||||||
name: PyTest
|
name: PyTest
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [preload-cache]
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 2
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
@@ -207,12 +121,21 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
|
- name: Save HF cache
|
||||||
|
id: hf-cache
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||||
|
|
||||||
pytest-sdist:
|
pytest-sdist:
|
||||||
name: PyTest from Source Dist
|
name: PyTest from Source Dist
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [preload-cache]
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 1
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
@@ -276,6 +199,15 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
|
- name: Save HF cache
|
||||||
|
id: hf-cache
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||||
|
|
||||||
docker-e2e-tests-1st:
|
docker-e2e-tests-1st:
|
||||||
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
|
|||||||
@@ -547,7 +547,7 @@ gradient_checkpointing: false
|
|||||||
early_stopping_patience: 3
|
early_stopping_patience: 3
|
||||||
|
|
||||||
# Specify a scheduler and kwargs to use with the optimizer
|
# Specify a scheduler and kwargs to use with the optimizer
|
||||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
|
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine
|
||||||
lr_scheduler_kwargs:
|
lr_scheduler_kwargs:
|
||||||
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
||||||
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
||||||
|
|||||||
@@ -1,341 +0,0 @@
|
|||||||
# Finetuning LLMs to output audio
|
|
||||||
|
|
||||||
In this example, we finetune Orpcanopylabs/orpheus-tts-0.1-pretrained (a LLaMA 3.2 3b model) to output audio.
|
|
||||||
|
|
||||||
The `finetune.yml` withe current settings will run on any Nvidia GPU with 45GB VRAM or more. If you adjust the batch size it can easily run on any GPU under 24GB.
|
|
||||||
|
|
||||||
## Dataset pre-processing for pre-training
|
|
||||||
If you are adding another voice in English, please jump ahead to finetuning pre-processing.
|
|
||||||
|
|
||||||
For this to work, we need to preprocess our dataset. Since we are expecting to output audio, we will need to add tokens to the tokenizer.
|
|
||||||
|
|
||||||
Using this code, it will download the SNAC model and add the correct tokens and upload the final dataset.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from snac import SNAC
|
|
||||||
from datasets import load_dataset
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
from datasets import load_dataset
|
|
||||||
import random
|
|
||||||
import torchaudio.transforms as T
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
import os
|
|
||||||
|
|
||||||
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
|
||||||
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
|
||||||
|
|
||||||
dsn = my_original_dataset_name
|
|
||||||
|
|
||||||
snapshot_download(
|
|
||||||
repo_id=dsn,
|
|
||||||
repo_type="dataset",
|
|
||||||
revision="main",
|
|
||||||
max_workers=64,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
ds = load_dataset(dsn, split="train")
|
|
||||||
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
|
||||||
|
|
||||||
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
|
||||||
model = model.to("mps")
|
|
||||||
|
|
||||||
def tokenise_audio(waveform):
|
|
||||||
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
|
||||||
waveform = waveform.to(dtype=torch.float32)
|
|
||||||
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
|
||||||
waveform = resample_transform(waveform)
|
|
||||||
|
|
||||||
waveform = waveform.unsqueeze(0).to("cuda")
|
|
||||||
|
|
||||||
#generate the codes from snac
|
|
||||||
with torch.inference_mode():
|
|
||||||
codes = model.encode(waveform)
|
|
||||||
|
|
||||||
all_codes = []
|
|
||||||
for i in range(codes[0].shape[1]):
|
|
||||||
all_codes.append(codes[0][0][i].item()+128266)
|
|
||||||
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
|
||||||
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
|
||||||
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
|
||||||
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
|
||||||
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
|
||||||
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
|
||||||
|
|
||||||
|
|
||||||
return all_codes
|
|
||||||
|
|
||||||
def add_codes(example):
|
|
||||||
# Always initialize codes_list to None
|
|
||||||
codes_list = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
answer_audio = example.get("audio")
|
|
||||||
# If there's a valid audio array, tokenise it
|
|
||||||
if answer_audio and "array" in answer_audio:
|
|
||||||
audio_array = answer_audio["array"]
|
|
||||||
codes_list = tokenise_audio(audio_array)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Skipping row due to error: {e}")
|
|
||||||
# Keep codes_list as None if we fail
|
|
||||||
example["codes_list"] = codes_list
|
|
||||||
|
|
||||||
return example
|
|
||||||
|
|
||||||
ds = ds.map(add_codes, remove_columns=["audio"])
|
|
||||||
|
|
||||||
#@title Load Tokenizer
|
|
||||||
tokeniser_length = 128256
|
|
||||||
start_of_text = 128000
|
|
||||||
end_of_text = 128009
|
|
||||||
|
|
||||||
start_of_speech = tokeniser_length + 1
|
|
||||||
end_of_speech = tokeniser_length + 2
|
|
||||||
|
|
||||||
start_of_human = tokeniser_length + 3
|
|
||||||
end_of_human = tokeniser_length + 4
|
|
||||||
|
|
||||||
start_of_ai = tokeniser_length + 5
|
|
||||||
end_of_ai = tokeniser_length + 6
|
|
||||||
pad_token = tokeniser_length + 7
|
|
||||||
|
|
||||||
audio_tokens_start = tokeniser_length + 10
|
|
||||||
|
|
||||||
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
|
||||||
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
|
||||||
num_proc = os.cpu_count() - 2
|
|
||||||
|
|
||||||
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
|
||||||
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
|
||||||
|
|
||||||
#@title Create Input Ids
|
|
||||||
def remove_duplicate_frames(example):
|
|
||||||
vals = example["codes_list"]
|
|
||||||
if len(vals) % 7 != 0:
|
|
||||||
raise ValueError("Input list length must be divisible by 7")
|
|
||||||
|
|
||||||
result = vals[:7]
|
|
||||||
|
|
||||||
removed_frames = 0
|
|
||||||
|
|
||||||
for i in range(7, len(vals), 7):
|
|
||||||
current_first = vals[i]
|
|
||||||
previous_first = result[-7]
|
|
||||||
|
|
||||||
if current_first != previous_first:
|
|
||||||
result.extend(vals[i:i+7])
|
|
||||||
else:
|
|
||||||
removed_frames += 1
|
|
||||||
|
|
||||||
example["codes_list"] = result
|
|
||||||
|
|
||||||
return example
|
|
||||||
|
|
||||||
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
|
||||||
|
|
||||||
|
|
||||||
def create_input_ids(example):
|
|
||||||
text_ids = tokenizer.encode({example['text']}, add_special_tokens=True)
|
|
||||||
text_ids.append(end_of_text)
|
|
||||||
example["text_tokens"] = text_ids
|
|
||||||
input_ids = (
|
|
||||||
[start_of_human]
|
|
||||||
+ example["text_tokens"]
|
|
||||||
+ [end_of_human]
|
|
||||||
+ [start_of_ai]
|
|
||||||
+ [start_of_speech]
|
|
||||||
+ example["codes_list"]
|
|
||||||
+ [end_of_speech]
|
|
||||||
+ [end_of_ai]
|
|
||||||
)
|
|
||||||
example["input_ids"] = input_ids
|
|
||||||
example["labels"] = input_ids
|
|
||||||
example["attention_mask"] = [1] * len(input_ids)
|
|
||||||
|
|
||||||
return example
|
|
||||||
|
|
||||||
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
|
||||||
|
|
||||||
#@title Remove unnecessary columns
|
|
||||||
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
|
||||||
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
|
||||||
|
|
||||||
ds = ds.remove_columns(columns_to_remove)
|
|
||||||
|
|
||||||
ds.push_to_hub(name_to_push_dataset_to)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Finetune pre-processing
|
|
||||||
Use this code to add a new voice.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import torch
|
|
||||||
from snac import SNAC
|
|
||||||
from datasets import load_dataset
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
from datasets import load_dataset
|
|
||||||
import random
|
|
||||||
import torchaudio.transforms as T
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
import os
|
|
||||||
|
|
||||||
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
|
||||||
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
|
||||||
|
|
||||||
dsn = my_original_dataset_name
|
|
||||||
|
|
||||||
snapshot_download(
|
|
||||||
repo_id=dsn,
|
|
||||||
repo_type="dataset",
|
|
||||||
revision="main",
|
|
||||||
max_workers=64,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
ds = load_dataset(dsn, split="train")
|
|
||||||
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
|
||||||
|
|
||||||
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
|
||||||
model = model.to("mps")
|
|
||||||
|
|
||||||
def tokenise_audio(waveform):
|
|
||||||
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
|
||||||
waveform = waveform.to(dtype=torch.float32)
|
|
||||||
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
|
||||||
waveform = resample_transform(waveform)
|
|
||||||
|
|
||||||
waveform = waveform.unsqueeze(0).to("cuda")
|
|
||||||
|
|
||||||
#generate the codes from snac
|
|
||||||
with torch.inference_mode():
|
|
||||||
codes = model.encode(waveform)
|
|
||||||
|
|
||||||
all_codes = []
|
|
||||||
for i in range(codes[0].shape[1]):
|
|
||||||
all_codes.append(codes[0][0][i].item()+128266)
|
|
||||||
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
|
||||||
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
|
||||||
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
|
||||||
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
|
||||||
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
|
||||||
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
|
||||||
|
|
||||||
|
|
||||||
return all_codes
|
|
||||||
|
|
||||||
def add_codes(example):
|
|
||||||
# Always initialize codes_list to None
|
|
||||||
codes_list = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
answer_audio = example.get("audio")
|
|
||||||
# If there's a valid audio array, tokenise it
|
|
||||||
if answer_audio and "array" in answer_audio:
|
|
||||||
audio_array = answer_audio["array"]
|
|
||||||
codes_list = tokenise_audio(audio_array)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Skipping row due to error: {e}")
|
|
||||||
# Keep codes_list as None if we fail
|
|
||||||
example["codes_list"] = codes_list
|
|
||||||
|
|
||||||
return example
|
|
||||||
|
|
||||||
ds = ds.map(add_codes, remove_columns=["audio"])
|
|
||||||
|
|
||||||
#@title Load Tokenizer
|
|
||||||
tokeniser_length = 128256
|
|
||||||
start_of_text = 128000
|
|
||||||
end_of_text = 128009
|
|
||||||
|
|
||||||
start_of_speech = tokeniser_length + 1
|
|
||||||
end_of_speech = tokeniser_length + 2
|
|
||||||
|
|
||||||
start_of_human = tokeniser_length + 3
|
|
||||||
end_of_human = tokeniser_length + 4
|
|
||||||
|
|
||||||
start_of_ai = tokeniser_length + 5
|
|
||||||
end_of_ai = tokeniser_length + 6
|
|
||||||
pad_token = tokeniser_length + 7
|
|
||||||
|
|
||||||
audio_tokens_start = tokeniser_length + 10
|
|
||||||
|
|
||||||
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
|
||||||
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
|
||||||
num_proc = os.cpu_count() - 2
|
|
||||||
|
|
||||||
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
|
||||||
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
|
||||||
|
|
||||||
#@title Create Input Ids
|
|
||||||
def remove_duplicate_frames(example):
|
|
||||||
vals = example["codes_list"]
|
|
||||||
if len(vals) % 7 != 0:
|
|
||||||
raise ValueError("Input list length must be divisible by 7")
|
|
||||||
|
|
||||||
result = vals[:7]
|
|
||||||
|
|
||||||
removed_frames = 0
|
|
||||||
|
|
||||||
for i in range(7, len(vals), 7):
|
|
||||||
current_first = vals[i]
|
|
||||||
previous_first = result[-7]
|
|
||||||
|
|
||||||
if current_first != previous_first:
|
|
||||||
result.extend(vals[i:i+7])
|
|
||||||
else:
|
|
||||||
removed_frames += 1
|
|
||||||
|
|
||||||
example["codes_list"] = result
|
|
||||||
|
|
||||||
return example
|
|
||||||
|
|
||||||
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
|
||||||
|
|
||||||
tok_info = '''*** HERE you can modify the text prompt
|
|
||||||
i.e. if you wanted a multispeaker model like canopylabs/orpheus-3b-0.1-ft, you can pass:
|
|
||||||
f"{example["source"]}: {example["text"]}", as is passed.
|
|
||||||
'''
|
|
||||||
print(tok_info)
|
|
||||||
|
|
||||||
def create_input_ids(example):
|
|
||||||
text_ids = tokenizer.encode(f"{example['speaker_id']}: {example['text']}", add_special_tokens=True)
|
|
||||||
text_ids.append(end_of_text)
|
|
||||||
example["text_tokens"] = text_ids
|
|
||||||
input_ids = (
|
|
||||||
[start_of_human]
|
|
||||||
+ example["text_tokens"]
|
|
||||||
+ [end_of_human]
|
|
||||||
+ [start_of_ai]
|
|
||||||
+ [start_of_speech]
|
|
||||||
+ example["codes_list"]
|
|
||||||
+ [end_of_speech]
|
|
||||||
+ [end_of_ai]
|
|
||||||
)
|
|
||||||
example["input_ids"] = input_ids
|
|
||||||
example["labels"] = input_ids
|
|
||||||
example["attention_mask"] = [1] * len(input_ids)
|
|
||||||
|
|
||||||
return example
|
|
||||||
|
|
||||||
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
|
||||||
|
|
||||||
#@title Remove unnecessary columns
|
|
||||||
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
|
||||||
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
|
||||||
|
|
||||||
ds = ds.remove_columns(columns_to_remove)
|
|
||||||
|
|
||||||
ds.push_to_hub(name_to_push_dataset_to)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Training
|
|
||||||
After preprocessing is done, fill out the blanks in finetune.yml and simply run `axolotl train finetune.yml`
|
|
||||||
|
|
||||||
## Inference
|
|
||||||
For inference, please refer to the original [orpheus github](https://github.com/canopyai/Orpheus-TTS/tree/main).
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
base_model: canopylabs/orpheus-3b-0.1-pretrained
|
|
||||||
|
|
||||||
hub_model_id: <your-hub-model-id>
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
|
||||||
liger_rope: true
|
|
||||||
liger_rms_norm: true
|
|
||||||
liger_glu_activation: true
|
|
||||||
liger_fused_linear_cross_entropy: true
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: <your-hf-dataset-id>
|
|
||||||
type: # leave empty to load pre-tokenized
|
|
||||||
dataset_prepared_path: last_run_prepared
|
|
||||||
val_set_size: 0.01
|
|
||||||
output_dir: ./outputs/out
|
|
||||||
|
|
||||||
sequence_len: 8192
|
|
||||||
sample_packing: true
|
|
||||||
pad_to_sequence_len: true
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 8
|
|
||||||
micro_batch_size: 4
|
|
||||||
num_epochs: 3
|
|
||||||
optimizer: adamw_torch_fused
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 2e-5
|
|
||||||
|
|
||||||
bf16: auto
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
gradient_checkpointing_kwargs:
|
|
||||||
use_reentrant: false
|
|
||||||
resume_from_checkpoint:
|
|
||||||
logging_steps: 1
|
|
||||||
flash_attention: true
|
|
||||||
|
|
||||||
warmup_steps: 20
|
|
||||||
evals_per_epoch: 5
|
|
||||||
saves_per_epoch: 5
|
|
||||||
weight_decay: 0.05
|
|
||||||
|
|
||||||
special_tokens:
|
|
||||||
pad_token: <custom_token_7>
|
|
||||||
@@ -15,7 +15,7 @@ peft==0.15.2
|
|||||||
transformers==4.51.3
|
transformers==4.51.3
|
||||||
tokenizers>=0.21.1
|
tokenizers>=0.21.1
|
||||||
accelerate==1.6.0
|
accelerate==1.6.0
|
||||||
datasets==3.5.1
|
datasets==3.5.0
|
||||||
deepspeed>=0.15.4
|
deepspeed>=0.15.4
|
||||||
trl==0.17.0
|
trl==0.17.0
|
||||||
hf_xet==1.1.0
|
hf_xet==1.1.0
|
||||||
|
|||||||
@@ -16,15 +16,8 @@ AXOLOTL_LOGO = """
|
|||||||
@@@@ @@@@@@@@@@@@@@@@
|
@@@@ @@@@@@@@@@@@@@@@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
HAS_PRINTED_LOGO = False
|
|
||||||
|
|
||||||
|
|
||||||
def print_axolotl_text_art():
|
def print_axolotl_text_art():
|
||||||
"""Prints axolotl ASCII art."""
|
"""Prints axolotl ASCII art."""
|
||||||
|
|
||||||
global HAS_PRINTED_LOGO # pylint: disable=global-statement
|
|
||||||
if HAS_PRINTED_LOGO:
|
|
||||||
return
|
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
HAS_PRINTED_LOGO = True
|
|
||||||
print(AXOLOTL_LOGO)
|
print(AXOLOTL_LOGO)
|
||||||
|
|||||||
@@ -48,7 +48,6 @@ def load_datasets(
|
|||||||
*,
|
*,
|
||||||
cfg: DictDefault,
|
cfg: DictDefault,
|
||||||
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||||
debug: bool = False,
|
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""
|
||||||
Loads one or more training or evaluation datasets, calling
|
Loads one or more training or evaluation datasets, calling
|
||||||
@@ -57,7 +56,6 @@ def load_datasets(
|
|||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: Command-specific CLI arguments.
|
cli_args: Command-specific CLI arguments.
|
||||||
debug: Whether to print out tokenization of sample
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
Dataclass with fields for training and evaluation datasets and the computed
|
||||||
@@ -79,25 +77,20 @@ def load_datasets(
|
|||||||
preprocess_iterable=preprocess_iterable,
|
preprocess_iterable=preprocess_iterable,
|
||||||
)
|
)
|
||||||
|
|
||||||
if ( # pylint: disable=too-many-boolean-expressions
|
if cli_args and (
|
||||||
cli_args
|
cli_args.debug
|
||||||
and (
|
or cfg.debug
|
||||||
cli_args.debug
|
or cli_args.debug_text_only
|
||||||
or cfg.debug
|
or int(cli_args.debug_num_examples) > 0
|
||||||
or cli_args.debug_text_only
|
):
|
||||||
or int(cli_args.debug_num_examples) > 0
|
|
||||||
)
|
|
||||||
) or debug:
|
|
||||||
LOG.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
num_examples = cli_args.debug_num_examples if cli_args else 1
|
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
||||||
text_only = cli_args.debug_text_only if cli_args else False
|
|
||||||
train_samples = sample_dataset(train_dataset, num_examples)
|
|
||||||
check_dataset_labels(
|
check_dataset_labels(
|
||||||
train_samples,
|
train_samples,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
num_examples=num_examples,
|
num_examples=cli_args.debug_num_examples,
|
||||||
text_only=text_only,
|
text_only=cli_args.debug_text_only,
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.info("printing prompters...")
|
LOG.info("printing prompters...")
|
||||||
|
|||||||
@@ -168,9 +168,6 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.cfg.gc_steps:
|
|
||||||
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
|
||||||
|
|
||||||
if self.cfg.use_wandb:
|
if self.cfg.use_wandb:
|
||||||
callbacks.append(
|
callbacks.append(
|
||||||
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||||
@@ -252,6 +249,9 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.loss_watchdog_threshold is not None:
|
if self.cfg.loss_watchdog_threshold is not None:
|
||||||
callbacks.append(LossWatchDogCallback(self.cfg))
|
callbacks.append(LossWatchDogCallback(self.cfg))
|
||||||
|
|
||||||
|
if self.cfg.gc_steps:
|
||||||
|
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
||||||
|
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
def get_post_trainer_create_callbacks(self, trainer):
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
|
|||||||
@@ -610,3 +610,15 @@ class AxolotlTrainer(
|
|||||||
output_dir = os.path.join(run_dir, checkpoint_folder)
|
output_dir = os.path.join(run_dir, checkpoint_folder)
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
return super()._save_checkpoint(model, trial, **kwargs)
|
return super()._save_checkpoint(model, trial, **kwargs)
|
||||||
|
|
||||||
|
def compute_loss_context_manager(self):
|
||||||
|
from contextlib import ExitStack
|
||||||
|
|
||||||
|
from torchtune.training import OffloadActivations
|
||||||
|
|
||||||
|
stack = ExitStack()
|
||||||
|
|
||||||
|
stack.enter_context(super().compute_loss_context_manager())
|
||||||
|
stack.enter_context(OffloadActivations())
|
||||||
|
|
||||||
|
return stack
|
||||||
|
|||||||
@@ -247,9 +247,7 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Base evaluation
|
# Base evaluation
|
||||||
initial_output = super( # pylint: disable=bad-super-call
|
initial_output = super().evaluation_loop(
|
||||||
DPOTrainer, self
|
|
||||||
).evaluation_loop(
|
|
||||||
dataloader,
|
dataloader,
|
||||||
description,
|
description,
|
||||||
prediction_loss_only,
|
prediction_loss_only,
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
|
|||||||
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
|
||||||
from axolotl.common.datasets import TrainDatasetMeta
|
from axolotl.common.datasets import TrainDatasetMeta
|
||||||
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
||||||
fix_untrained_tokens,
|
fix_untrained_tokens,
|
||||||
@@ -517,8 +516,6 @@ def train(
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple of (model, tokenizer) after training
|
Tuple of (model, tokenizer) after training
|
||||||
"""
|
"""
|
||||||
print_axolotl_text_art()
|
|
||||||
|
|
||||||
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
||||||
(
|
(
|
||||||
trainer,
|
trainer,
|
||||||
|
|||||||
@@ -2,6 +2,13 @@
|
|||||||
|
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch.utils.checkpoint import (
|
||||||
|
CheckpointPolicy,
|
||||||
|
checkpoint,
|
||||||
|
create_selective_checkpoint_contexts,
|
||||||
|
)
|
||||||
|
|
||||||
from axolotl.utils.gradient_checkpointing.unsloth import (
|
from axolotl.utils.gradient_checkpointing.unsloth import (
|
||||||
Unsloth_Offloaded_Gradient_Checkpointer,
|
Unsloth_Offloaded_Gradient_Checkpointer,
|
||||||
)
|
)
|
||||||
@@ -18,3 +25,32 @@ def hf_grad_checkpoint_offload_wrapper(
|
|||||||
),
|
),
|
||||||
*args,
|
*args,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
aten = torch.ops.aten
|
||||||
|
compute_intensive_ops = [
|
||||||
|
aten.mm.default,
|
||||||
|
aten.bmm.default,
|
||||||
|
aten.addmm.default,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def policy_fn(ctx, op, *args, **kwargs):
|
||||||
|
if op in compute_intensive_ops:
|
||||||
|
return CheckpointPolicy.MUST_SAVE
|
||||||
|
else:
|
||||||
|
return CheckpointPolicy.PREFER_RECOMPUTE
|
||||||
|
|
||||||
|
|
||||||
|
context_fn = partial(create_selective_checkpoint_contexts, policy_fn)
|
||||||
|
|
||||||
|
|
||||||
|
def checkpoint_w_policy(
|
||||||
|
decoder_layer, *args, use_reentrant=None
|
||||||
|
): # pylint: disable=unused-argument
|
||||||
|
return checkpoint(
|
||||||
|
decoder_layer,
|
||||||
|
*args,
|
||||||
|
use_reentrant=use_reentrant,
|
||||||
|
context_fn=context_fn,
|
||||||
|
)
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ shared pytest fixtures
|
|||||||
|
|
||||||
import functools
|
import functools
|
||||||
import importlib
|
import importlib
|
||||||
import os
|
|
||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
@@ -530,32 +529,31 @@ def dataset_fozziethebeat_alpaca_messages_2k_dpo_test_rev_ea82cff(
|
|||||||
|
|
||||||
|
|
||||||
# # pylint: disable=redefined-outer-name,unused-argument
|
# # pylint: disable=redefined-outer-name,unused-argument
|
||||||
@pytest.mark.skipif(
|
# def test_load_fixtures(
|
||||||
os.environ.get("AXOLOTL_IS_CI_CACHE_PRELOAD", "-1") != "1",
|
# download_smollm2_135m_model,
|
||||||
reason="Not running in CI cache preload",
|
# download_llama_68m_random_model,
|
||||||
)
|
# download_qwen_2_5_half_billion_model,
|
||||||
def test_load_fixtures(
|
# download_tatsu_lab_alpaca_dataset,
|
||||||
download_smollm2_135m_model,
|
# download_mhenrichsen_alpaca_2k_dataset,
|
||||||
download_qwen_2_5_half_billion_model,
|
# download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
||||||
download_tatsu_lab_alpaca_dataset,
|
# download_mlabonne_finetome_100k_dataset,
|
||||||
download_mhenrichsen_alpaca_2k_dataset,
|
# download_argilla_distilabel_capybara_dpo_7k_binarized_dataset,
|
||||||
download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
# download_argilla_ultrafeedback_binarized_preferences_cleaned_dataset,
|
||||||
download_mlabonne_finetome_100k_dataset,
|
# download_fozzie_alpaca_dpo_dataset,
|
||||||
download_argilla_distilabel_capybara_dpo_7k_binarized_dataset,
|
# download_arcee_ai_distilabel_intel_orca_dpo_pairs_dataset,
|
||||||
download_arcee_ai_distilabel_intel_orca_dpo_pairs_dataset,
|
# download_argilla_dpo_pairs_dataset,
|
||||||
download_argilla_dpo_pairs_dataset,
|
# download_tiny_shakespeare_dataset,
|
||||||
download_tiny_shakespeare_dataset,
|
# download_deepseek_model_fixture,
|
||||||
download_deepseek_model_fixture,
|
# download_huggyllama_model_fixture,
|
||||||
download_huggyllama_model_fixture,
|
# download_llama_1b_model_fixture,
|
||||||
download_llama_1b_model_fixture,
|
# download_llama3_8b_model_fixture,
|
||||||
download_llama3_8b_model_fixture,
|
# download_llama3_8b_instruct_model_fixture,
|
||||||
download_llama3_8b_instruct_model_fixture,
|
# download_phi_35_mini_model_fixture,
|
||||||
download_phi_35_mini_model_fixture,
|
# download_phi_3_medium_model_fixture,
|
||||||
download_phi_3_medium_model_fixture,
|
# download_mistral_7b_model_fixture,
|
||||||
download_mistral_7b_model_fixture,
|
# download_gemma_2b_model_fixture,
|
||||||
download_gemma_2b_model_fixture,
|
# download_gemma2_9b_model_fixture,
|
||||||
download_gemma2_9b_model_fixture,
|
# download_mlx_mistral_7b_model_fixture,
|
||||||
download_mlx_mistral_7b_model_fixture,
|
# download_llama2_model_fixture,
|
||||||
download_llama2_model_fixture,
|
# ):
|
||||||
):
|
# pass
|
||||||
pass
|
|
||||||
|
|||||||
Reference in New Issue
Block a user