Compare commits
10 Commits
fix/vllm-v
...
datasets-3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d790371b64 | ||
|
|
a6cac5dd32 | ||
|
|
b71c0e3447 | ||
|
|
ddaebf8309 | ||
|
|
679743087a | ||
|
|
f720b6e72d | ||
|
|
a980618fd0 | ||
|
|
54960d4de0 | ||
|
|
ed922796b7 | ||
|
|
3dd9c3bf3f |
112
.github/workflows/tests.yml
vendored
112
.github/workflows/tests.yml
vendored
@@ -44,12 +44,98 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
SKIP: no-commit-to-branch
|
SKIP: no-commit-to-branch
|
||||||
|
|
||||||
pytest:
|
preload-cache:
|
||||||
name: PyTest
|
name: Preload HF cache
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 2
|
matrix:
|
||||||
|
python_version: ["3.11"]
|
||||||
|
pytorch_version: ["2.6.0"]
|
||||||
|
timeout-minutes: 20
|
||||||
|
|
||||||
|
env:
|
||||||
|
AXOLOTL_IS_CI_CACHE_PRELOAD: "1"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Restore HF cache
|
||||||
|
id: hf-cache-restore
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ runner.os }}-hf-hub-cache-v2
|
||||||
|
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python_version }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
|
|
||||||
|
- name: upgrade pip
|
||||||
|
run: |
|
||||||
|
pip3 install --upgrade pip
|
||||||
|
pip3 install --upgrade packaging==23.2 setuptools==75.8.0 wheel
|
||||||
|
|
||||||
|
- name: Install PyTorch
|
||||||
|
run: |
|
||||||
|
pip3 install torch==${{ matrix.pytorch_version }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip3 show torch
|
||||||
|
pip3 install --no-build-isolation -U -e .
|
||||||
|
python scripts/unsloth_install.py | sh
|
||||||
|
python scripts/cutcrossentropy_install.py | sh
|
||||||
|
pip3 install -r requirements-dev.txt -r requirements-tests.txt
|
||||||
|
|
||||||
|
- name: Make sure PyTorch version wasn't clobbered
|
||||||
|
run: |
|
||||||
|
python -c "import torch; assert '${{ matrix.pytorch_version }}' in torch.__version__"
|
||||||
|
|
||||||
|
- name: Ensure axolotl CLI was installed
|
||||||
|
run: |
|
||||||
|
axolotl --help
|
||||||
|
|
||||||
|
- name: Pre-Download dataset fixture
|
||||||
|
run: |
|
||||||
|
huggingface-cli download --repo-type=dataset axolotl-ai-internal/axolotl-oss-dataset-fixtures
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
pytest -v tests/conftest.py
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.xml
|
||||||
|
flags: unittests,pytorch-${{ matrix.pytorch_version }}
|
||||||
|
fail_ci_if_error: false
|
||||||
|
|
||||||
|
- name: cleanup pip cache
|
||||||
|
run: |
|
||||||
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
|
- name: Save HF cache
|
||||||
|
id: hf-cache
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/runner/.cache/huggingface/hub/datasets--*
|
||||||
|
/home/runner/.cache/huggingface/hub/models--*
|
||||||
|
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
||||||
|
|
||||||
|
pytest:
|
||||||
|
name: PyTest
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [preload-cache]
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
@@ -121,21 +207,12 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
pytest-sdist:
|
pytest-sdist:
|
||||||
name: PyTest from Source Dist
|
name: PyTest from Source Dist
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: [preload-cache]
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
max-parallel: 1
|
|
||||||
matrix:
|
matrix:
|
||||||
python_version: ["3.11"]
|
python_version: ["3.11"]
|
||||||
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
pytorch_version: ["2.5.1", "2.6.0", "2.7.0"]
|
||||||
@@ -199,15 +276,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
find "$(pip cache dir)/http-v2" -type f -mtime +14 -exec rm {} \;
|
||||||
|
|
||||||
- name: Save HF cache
|
|
||||||
id: hf-cache
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
/home/runner/.cache/huggingface/hub/datasets--*
|
|
||||||
/home/runner/.cache/huggingface/hub/models--*
|
|
||||||
key: ${{ steps.hf-cache-restore.outputs.cache-primary-key }}
|
|
||||||
|
|
||||||
docker-e2e-tests-1st:
|
docker-e2e-tests-1st:
|
||||||
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' }}
|
||||||
# this job needs to be run on self-hosted GPU runners...
|
# this job needs to be run on self-hosted GPU runners...
|
||||||
|
|||||||
@@ -547,7 +547,7 @@ gradient_checkpointing: false
|
|||||||
early_stopping_patience: 3
|
early_stopping_patience: 3
|
||||||
|
|
||||||
# Specify a scheduler and kwargs to use with the optimizer
|
# Specify a scheduler and kwargs to use with the optimizer
|
||||||
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | empty for cosine
|
lr_scheduler: # 'one_cycle' | 'rex' | 'log_sweep' | 'linear' | 'cosine_with_restarts' | 'polynomial' | 'constant' | 'constant_with_warmup' | 'inverse_sqrt' | 'reduce_lr_on_plateau' | 'cosine_with_min_lr' | 'warmup_stable_decay' | empty for cosine
|
||||||
lr_scheduler_kwargs:
|
lr_scheduler_kwargs:
|
||||||
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
cosine_min_lr_ratio: # decay lr to some percentage of the peak lr, e.g. cosine_min_lr_ratio=0.1 for 10% of peak lr
|
||||||
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
cosine_constant_lr_ratio: # freeze lr at some percentage of the step, e.g. cosine_constant_lr_ratio=0.8 means start cosine_min_lr at 80% of training step (https://arxiv.org/pdf/2308.04014.pdf)
|
||||||
|
|||||||
341
examples/orpheus/README.md
Normal file
341
examples/orpheus/README.md
Normal file
@@ -0,0 +1,341 @@
|
|||||||
|
# Finetuning LLMs to output audio
|
||||||
|
|
||||||
|
In this example, we finetune Orpcanopylabs/orpheus-tts-0.1-pretrained (a LLaMA 3.2 3b model) to output audio.
|
||||||
|
|
||||||
|
The `finetune.yml` withe current settings will run on any Nvidia GPU with 45GB VRAM or more. If you adjust the batch size it can easily run on any GPU under 24GB.
|
||||||
|
|
||||||
|
## Dataset pre-processing for pre-training
|
||||||
|
If you are adding another voice in English, please jump ahead to finetuning pre-processing.
|
||||||
|
|
||||||
|
For this to work, we need to preprocess our dataset. Since we are expecting to output audio, we will need to add tokens to the tokenizer.
|
||||||
|
|
||||||
|
Using this code, it will download the SNAC model and add the correct tokens and upload the final dataset.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from snac import SNAC
|
||||||
|
from datasets import load_dataset
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
from datasets import load_dataset
|
||||||
|
import random
|
||||||
|
import torchaudio.transforms as T
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
import os
|
||||||
|
|
||||||
|
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
||||||
|
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
||||||
|
|
||||||
|
dsn = my_original_dataset_name
|
||||||
|
|
||||||
|
snapshot_download(
|
||||||
|
repo_id=dsn,
|
||||||
|
repo_type="dataset",
|
||||||
|
revision="main",
|
||||||
|
max_workers=64,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ds = load_dataset(dsn, split="train")
|
||||||
|
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
||||||
|
|
||||||
|
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
||||||
|
model = model.to("mps")
|
||||||
|
|
||||||
|
def tokenise_audio(waveform):
|
||||||
|
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
||||||
|
waveform = waveform.to(dtype=torch.float32)
|
||||||
|
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
||||||
|
waveform = resample_transform(waveform)
|
||||||
|
|
||||||
|
waveform = waveform.unsqueeze(0).to("cuda")
|
||||||
|
|
||||||
|
#generate the codes from snac
|
||||||
|
with torch.inference_mode():
|
||||||
|
codes = model.encode(waveform)
|
||||||
|
|
||||||
|
all_codes = []
|
||||||
|
for i in range(codes[0].shape[1]):
|
||||||
|
all_codes.append(codes[0][0][i].item()+128266)
|
||||||
|
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
||||||
|
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
||||||
|
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
||||||
|
|
||||||
|
|
||||||
|
return all_codes
|
||||||
|
|
||||||
|
def add_codes(example):
|
||||||
|
# Always initialize codes_list to None
|
||||||
|
codes_list = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
answer_audio = example.get("audio")
|
||||||
|
# If there's a valid audio array, tokenise it
|
||||||
|
if answer_audio and "array" in answer_audio:
|
||||||
|
audio_array = answer_audio["array"]
|
||||||
|
codes_list = tokenise_audio(audio_array)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Skipping row due to error: {e}")
|
||||||
|
# Keep codes_list as None if we fail
|
||||||
|
example["codes_list"] = codes_list
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(add_codes, remove_columns=["audio"])
|
||||||
|
|
||||||
|
#@title Load Tokenizer
|
||||||
|
tokeniser_length = 128256
|
||||||
|
start_of_text = 128000
|
||||||
|
end_of_text = 128009
|
||||||
|
|
||||||
|
start_of_speech = tokeniser_length + 1
|
||||||
|
end_of_speech = tokeniser_length + 2
|
||||||
|
|
||||||
|
start_of_human = tokeniser_length + 3
|
||||||
|
end_of_human = tokeniser_length + 4
|
||||||
|
|
||||||
|
start_of_ai = tokeniser_length + 5
|
||||||
|
end_of_ai = tokeniser_length + 6
|
||||||
|
pad_token = tokeniser_length + 7
|
||||||
|
|
||||||
|
audio_tokens_start = tokeniser_length + 10
|
||||||
|
|
||||||
|
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
||||||
|
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
||||||
|
num_proc = os.cpu_count() - 2
|
||||||
|
|
||||||
|
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
||||||
|
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
||||||
|
|
||||||
|
#@title Create Input Ids
|
||||||
|
def remove_duplicate_frames(example):
|
||||||
|
vals = example["codes_list"]
|
||||||
|
if len(vals) % 7 != 0:
|
||||||
|
raise ValueError("Input list length must be divisible by 7")
|
||||||
|
|
||||||
|
result = vals[:7]
|
||||||
|
|
||||||
|
removed_frames = 0
|
||||||
|
|
||||||
|
for i in range(7, len(vals), 7):
|
||||||
|
current_first = vals[i]
|
||||||
|
previous_first = result[-7]
|
||||||
|
|
||||||
|
if current_first != previous_first:
|
||||||
|
result.extend(vals[i:i+7])
|
||||||
|
else:
|
||||||
|
removed_frames += 1
|
||||||
|
|
||||||
|
example["codes_list"] = result
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
||||||
|
|
||||||
|
|
||||||
|
def create_input_ids(example):
|
||||||
|
text_ids = tokenizer.encode({example['text']}, add_special_tokens=True)
|
||||||
|
text_ids.append(end_of_text)
|
||||||
|
example["text_tokens"] = text_ids
|
||||||
|
input_ids = (
|
||||||
|
[start_of_human]
|
||||||
|
+ example["text_tokens"]
|
||||||
|
+ [end_of_human]
|
||||||
|
+ [start_of_ai]
|
||||||
|
+ [start_of_speech]
|
||||||
|
+ example["codes_list"]
|
||||||
|
+ [end_of_speech]
|
||||||
|
+ [end_of_ai]
|
||||||
|
)
|
||||||
|
example["input_ids"] = input_ids
|
||||||
|
example["labels"] = input_ids
|
||||||
|
example["attention_mask"] = [1] * len(input_ids)
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
||||||
|
|
||||||
|
#@title Remove unnecessary columns
|
||||||
|
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
||||||
|
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
||||||
|
|
||||||
|
ds = ds.remove_columns(columns_to_remove)
|
||||||
|
|
||||||
|
ds.push_to_hub(name_to_push_dataset_to)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Finetune pre-processing
|
||||||
|
Use this code to add a new voice.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from snac import SNAC
|
||||||
|
from datasets import load_dataset
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
from datasets import load_dataset
|
||||||
|
import random
|
||||||
|
import torchaudio.transforms as T
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
import os
|
||||||
|
|
||||||
|
my_original_dataset_name = "<huggingface-id-of-dataset-that-we-want-to-preprocess>"
|
||||||
|
name_to_push_dataset_to = "<huggingface-id-of-where-to-save-dataset>"
|
||||||
|
|
||||||
|
dsn = my_original_dataset_name
|
||||||
|
|
||||||
|
snapshot_download(
|
||||||
|
repo_id=dsn,
|
||||||
|
repo_type="dataset",
|
||||||
|
revision="main",
|
||||||
|
max_workers=64,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ds = load_dataset(dsn, split="train")
|
||||||
|
ds_sample_rate = ds[0]["audio"]["sampling_rate"]
|
||||||
|
|
||||||
|
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
|
||||||
|
model = model.to("mps")
|
||||||
|
|
||||||
|
def tokenise_audio(waveform):
|
||||||
|
waveform = torch.from_numpy(waveform).unsqueeze(0)
|
||||||
|
waveform = waveform.to(dtype=torch.float32)
|
||||||
|
resample_transform = T.Resample(orig_freq=ds_sample_rate, new_freq=24000)
|
||||||
|
waveform = resample_transform(waveform)
|
||||||
|
|
||||||
|
waveform = waveform.unsqueeze(0).to("cuda")
|
||||||
|
|
||||||
|
#generate the codes from snac
|
||||||
|
with torch.inference_mode():
|
||||||
|
codes = model.encode(waveform)
|
||||||
|
|
||||||
|
all_codes = []
|
||||||
|
for i in range(codes[0].shape[1]):
|
||||||
|
all_codes.append(codes[0][0][i].item()+128266)
|
||||||
|
all_codes.append(codes[1][0][2*i].item()+128266+4096)
|
||||||
|
all_codes.append(codes[2][0][4*i].item()+128266+(2*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+1].item()+128266+(3*4096))
|
||||||
|
all_codes.append(codes[1][0][(2*i)+1].item()+128266+(4*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+2].item()+128266+(5*4096))
|
||||||
|
all_codes.append(codes[2][0][(4*i)+3].item()+128266+(6*4096))
|
||||||
|
|
||||||
|
|
||||||
|
return all_codes
|
||||||
|
|
||||||
|
def add_codes(example):
|
||||||
|
# Always initialize codes_list to None
|
||||||
|
codes_list = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
answer_audio = example.get("audio")
|
||||||
|
# If there's a valid audio array, tokenise it
|
||||||
|
if answer_audio and "array" in answer_audio:
|
||||||
|
audio_array = answer_audio["array"]
|
||||||
|
codes_list = tokenise_audio(audio_array)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Skipping row due to error: {e}")
|
||||||
|
# Keep codes_list as None if we fail
|
||||||
|
example["codes_list"] = codes_list
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(add_codes, remove_columns=["audio"])
|
||||||
|
|
||||||
|
#@title Load Tokenizer
|
||||||
|
tokeniser_length = 128256
|
||||||
|
start_of_text = 128000
|
||||||
|
end_of_text = 128009
|
||||||
|
|
||||||
|
start_of_speech = tokeniser_length + 1
|
||||||
|
end_of_speech = tokeniser_length + 2
|
||||||
|
|
||||||
|
start_of_human = tokeniser_length + 3
|
||||||
|
end_of_human = tokeniser_length + 4
|
||||||
|
|
||||||
|
start_of_ai = tokeniser_length + 5
|
||||||
|
end_of_ai = tokeniser_length + 6
|
||||||
|
pad_token = tokeniser_length + 7
|
||||||
|
|
||||||
|
audio_tokens_start = tokeniser_length + 10
|
||||||
|
|
||||||
|
tokenizer_name = "canopylabs/orpheus-3b-0.1-pretrained"
|
||||||
|
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
||||||
|
num_proc = os.cpu_count() - 2
|
||||||
|
|
||||||
|
ds = ds.filter(lambda x: x["codes_list"] is not None)
|
||||||
|
ds = ds.filter(lambda x: len(x["codes_list"]) > 0)
|
||||||
|
|
||||||
|
#@title Create Input Ids
|
||||||
|
def remove_duplicate_frames(example):
|
||||||
|
vals = example["codes_list"]
|
||||||
|
if len(vals) % 7 != 0:
|
||||||
|
raise ValueError("Input list length must be divisible by 7")
|
||||||
|
|
||||||
|
result = vals[:7]
|
||||||
|
|
||||||
|
removed_frames = 0
|
||||||
|
|
||||||
|
for i in range(7, len(vals), 7):
|
||||||
|
current_first = vals[i]
|
||||||
|
previous_first = result[-7]
|
||||||
|
|
||||||
|
if current_first != previous_first:
|
||||||
|
result.extend(vals[i:i+7])
|
||||||
|
else:
|
||||||
|
removed_frames += 1
|
||||||
|
|
||||||
|
example["codes_list"] = result
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(remove_duplicate_frames, num_proc=num_proc)
|
||||||
|
|
||||||
|
tok_info = '''*** HERE you can modify the text prompt
|
||||||
|
i.e. if you wanted a multispeaker model like canopylabs/orpheus-3b-0.1-ft, you can pass:
|
||||||
|
f"{example["source"]}: {example["text"]}", as is passed.
|
||||||
|
'''
|
||||||
|
print(tok_info)
|
||||||
|
|
||||||
|
def create_input_ids(example):
|
||||||
|
text_ids = tokenizer.encode(f"{example['speaker_id']}: {example['text']}", add_special_tokens=True)
|
||||||
|
text_ids.append(end_of_text)
|
||||||
|
example["text_tokens"] = text_ids
|
||||||
|
input_ids = (
|
||||||
|
[start_of_human]
|
||||||
|
+ example["text_tokens"]
|
||||||
|
+ [end_of_human]
|
||||||
|
+ [start_of_ai]
|
||||||
|
+ [start_of_speech]
|
||||||
|
+ example["codes_list"]
|
||||||
|
+ [end_of_speech]
|
||||||
|
+ [end_of_ai]
|
||||||
|
)
|
||||||
|
example["input_ids"] = input_ids
|
||||||
|
example["labels"] = input_ids
|
||||||
|
example["attention_mask"] = [1] * len(input_ids)
|
||||||
|
|
||||||
|
return example
|
||||||
|
|
||||||
|
ds = ds.map(create_input_ids, num_proc=num_proc, remove_columns=["text", "codes_list"])
|
||||||
|
|
||||||
|
#@title Remove unnecessary columns
|
||||||
|
columns_to_keep = ["input_ids", "labels", "attention_mask"]
|
||||||
|
columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
|
||||||
|
|
||||||
|
ds = ds.remove_columns(columns_to_remove)
|
||||||
|
|
||||||
|
ds.push_to_hub(name_to_push_dataset_to)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Training
|
||||||
|
After preprocessing is done, fill out the blanks in finetune.yml and simply run `axolotl train finetune.yml`
|
||||||
|
|
||||||
|
## Inference
|
||||||
|
For inference, please refer to the original [orpheus github](https://github.com/canopyai/Orpheus-TTS/tree/main).
|
||||||
52
examples/orpheus/finetune.yml
Normal file
52
examples/orpheus/finetune.yml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
base_model: canopylabs/orpheus-3b-0.1-pretrained
|
||||||
|
|
||||||
|
hub_model_id: <your-hub-model-id>
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
liger_rope: true
|
||||||
|
liger_rms_norm: true
|
||||||
|
liger_glu_activation: true
|
||||||
|
liger_fused_linear_cross_entropy: true
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: <your-hf-dataset-id>
|
||||||
|
type: # leave empty to load pre-tokenized
|
||||||
|
dataset_prepared_path: last_run_prepared
|
||||||
|
val_set_size: 0.01
|
||||||
|
output_dir: ./outputs/out
|
||||||
|
|
||||||
|
sequence_len: 8192
|
||||||
|
sample_packing: true
|
||||||
|
pad_to_sequence_len: true
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
micro_batch_size: 4
|
||||||
|
num_epochs: 3
|
||||||
|
optimizer: adamw_torch_fused
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 2e-5
|
||||||
|
|
||||||
|
bf16: auto
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: true
|
||||||
|
gradient_checkpointing_kwargs:
|
||||||
|
use_reentrant: false
|
||||||
|
resume_from_checkpoint:
|
||||||
|
logging_steps: 1
|
||||||
|
flash_attention: true
|
||||||
|
|
||||||
|
warmup_steps: 20
|
||||||
|
evals_per_epoch: 5
|
||||||
|
saves_per_epoch: 5
|
||||||
|
weight_decay: 0.05
|
||||||
|
|
||||||
|
special_tokens:
|
||||||
|
pad_token: <custom_token_7>
|
||||||
@@ -15,7 +15,7 @@ peft==0.15.2
|
|||||||
transformers==4.51.3
|
transformers==4.51.3
|
||||||
tokenizers>=0.21.1
|
tokenizers>=0.21.1
|
||||||
accelerate==1.6.0
|
accelerate==1.6.0
|
||||||
datasets==3.5.0
|
datasets==3.5.1
|
||||||
deepspeed>=0.15.4
|
deepspeed>=0.15.4
|
||||||
trl==0.17.0
|
trl==0.17.0
|
||||||
hf_xet==1.1.0
|
hf_xet==1.1.0
|
||||||
|
|||||||
@@ -16,8 +16,15 @@ AXOLOTL_LOGO = """
|
|||||||
@@@@ @@@@@@@@@@@@@@@@
|
@@@@ @@@@@@@@@@@@@@@@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
HAS_PRINTED_LOGO = False
|
||||||
|
|
||||||
|
|
||||||
def print_axolotl_text_art():
|
def print_axolotl_text_art():
|
||||||
"""Prints axolotl ASCII art."""
|
"""Prints axolotl ASCII art."""
|
||||||
|
|
||||||
|
global HAS_PRINTED_LOGO # pylint: disable=global-statement
|
||||||
|
if HAS_PRINTED_LOGO:
|
||||||
|
return
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
|
HAS_PRINTED_LOGO = True
|
||||||
print(AXOLOTL_LOGO)
|
print(AXOLOTL_LOGO)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
|||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli.config import load_cfg
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.evaluate import evaluate
|
from axolotl.evaluate import evaluate
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -32,7 +32,7 @@ def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
|||||||
cli_args: CLI arguments.
|
cli_args: CLI arguments.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from axolotl.cli.utils import (
|
|||||||
filter_none_kwargs,
|
filter_none_kwargs,
|
||||||
)
|
)
|
||||||
from axolotl.integrations.lm_eval.cli import lm_eval
|
from axolotl.integrations.lm_eval.cli import lm_eval
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.schemas.config import AxolotlInputConfig
|
from axolotl.utils.schemas.config import AxolotlInputConfig
|
||||||
|
|
||||||
|
|
||||||
@@ -55,6 +55,8 @@ def preprocess(config: str, cloud: Optional[str] = None, **kwargs) -> None:
|
|||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
|
patch_optimized_env()
|
||||||
|
|
||||||
if cloud:
|
if cloud:
|
||||||
from axolotl.cli.cloud import do_cli_preprocess
|
from axolotl.cli.cloud import do_cli_preprocess
|
||||||
|
|
||||||
@@ -100,7 +102,7 @@ def train(
|
|||||||
config options.
|
config options.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
if "use_ray" in kwargs and kwargs["use_ray"]:
|
if "use_ray" in kwargs and kwargs["use_ray"]:
|
||||||
accelerate = False
|
accelerate = False
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from axolotl.cli.config import load_cfg
|
|||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import patch_optimized_env
|
||||||
from axolotl.utils.config import normalize_config, resolve_dtype
|
from axolotl.utils.config import normalize_config, resolve_dtype
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ def do_train(cfg: DictDefault, cli_args: TrainerCliArgs):
|
|||||||
cli_args: Training-specific CLI arguments.
|
cli_args: Training-specific CLI arguments.
|
||||||
"""
|
"""
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
patch_optimized_env()
|
||||||
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ def load_datasets(
|
|||||||
*,
|
*,
|
||||||
cfg: DictDefault,
|
cfg: DictDefault,
|
||||||
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
cli_args: PreprocessCliArgs | TrainerCliArgs | None = None,
|
||||||
|
debug: bool = False,
|
||||||
) -> TrainDatasetMeta:
|
) -> TrainDatasetMeta:
|
||||||
"""
|
"""
|
||||||
Loads one or more training or evaluation datasets, calling
|
Loads one or more training or evaluation datasets, calling
|
||||||
@@ -56,6 +57,7 @@ def load_datasets(
|
|||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Dictionary mapping `axolotl` config keys to values.
|
||||||
cli_args: Command-specific CLI arguments.
|
cli_args: Command-specific CLI arguments.
|
||||||
|
debug: Whether to print out tokenization of sample
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
Dataclass with fields for training and evaluation datasets and the computed
|
||||||
@@ -77,20 +79,25 @@ def load_datasets(
|
|||||||
preprocess_iterable=preprocess_iterable,
|
preprocess_iterable=preprocess_iterable,
|
||||||
)
|
)
|
||||||
|
|
||||||
if cli_args and (
|
if ( # pylint: disable=too-many-boolean-expressions
|
||||||
cli_args.debug
|
cli_args
|
||||||
or cfg.debug
|
and (
|
||||||
or cli_args.debug_text_only
|
cli_args.debug
|
||||||
or int(cli_args.debug_num_examples) > 0
|
or cfg.debug
|
||||||
):
|
or cli_args.debug_text_only
|
||||||
|
or int(cli_args.debug_num_examples) > 0
|
||||||
|
)
|
||||||
|
) or debug:
|
||||||
LOG.info("check_dataset_labels...")
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
num_examples = cli_args.debug_num_examples if cli_args else 1
|
||||||
|
text_only = cli_args.debug_text_only if cli_args else False
|
||||||
|
train_samples = sample_dataset(train_dataset, num_examples)
|
||||||
check_dataset_labels(
|
check_dataset_labels(
|
||||||
train_samples,
|
train_samples,
|
||||||
tokenizer,
|
tokenizer,
|
||||||
num_examples=cli_args.debug_num_examples,
|
num_examples=num_examples,
|
||||||
text_only=cli_args.debug_text_only,
|
text_only=text_only,
|
||||||
)
|
)
|
||||||
|
|
||||||
LOG.info("printing prompters...")
|
LOG.info("printing prompters...")
|
||||||
|
|||||||
@@ -168,6 +168,9 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.cfg.gc_steps:
|
||||||
|
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
||||||
|
|
||||||
if self.cfg.use_wandb:
|
if self.cfg.use_wandb:
|
||||||
callbacks.append(
|
callbacks.append(
|
||||||
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||||
@@ -249,9 +252,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.loss_watchdog_threshold is not None:
|
if self.cfg.loss_watchdog_threshold is not None:
|
||||||
callbacks.append(LossWatchDogCallback(self.cfg))
|
callbacks.append(LossWatchDogCallback(self.cfg))
|
||||||
|
|
||||||
if self.cfg.gc_steps:
|
|
||||||
callbacks.append(GCCallback(gc_steps=self.cfg.gc_steps))
|
|
||||||
|
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
def get_post_trainer_create_callbacks(self, trainer):
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
|
|||||||
@@ -247,7 +247,9 @@ class AxolotlDPOTrainer(RngLoaderMixin, SchedulerMixin, DPOTrainer):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Base evaluation
|
# Base evaluation
|
||||||
initial_output = super().evaluation_loop(
|
initial_output = super( # pylint: disable=bad-super-call
|
||||||
|
DPOTrainer, self
|
||||||
|
).evaluation_loop(
|
||||||
dataloader,
|
dataloader,
|
||||||
description,
|
description,
|
||||||
prediction_loss_only,
|
prediction_loss_only,
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ SUPPORTED_MULTIPACK_MODEL_TYPES = [
|
|||||||
"mixtral",
|
"mixtral",
|
||||||
"qwen2",
|
"qwen2",
|
||||||
"qwen2_moe",
|
"qwen2_moe",
|
||||||
|
"qwen3",
|
||||||
|
"qwen3_moe",
|
||||||
"falcon",
|
"falcon",
|
||||||
"phi",
|
"phi",
|
||||||
"phi3",
|
"phi3",
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
|
|||||||
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
||||||
from transformers.trainer import Trainer
|
from transformers.trainer import Trainer
|
||||||
|
|
||||||
|
from axolotl.cli.art import print_axolotl_text_art
|
||||||
from axolotl.common.datasets import TrainDatasetMeta
|
from axolotl.common.datasets import TrainDatasetMeta
|
||||||
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
from axolotl.contribs.lgpl import ( # pylint: disable = no-name-in-module
|
||||||
fix_untrained_tokens,
|
fix_untrained_tokens,
|
||||||
@@ -516,6 +517,8 @@ def train(
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple of (model, tokenizer) after training
|
Tuple of (model, tokenizer) after training
|
||||||
"""
|
"""
|
||||||
|
print_axolotl_text_art()
|
||||||
|
|
||||||
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
||||||
(
|
(
|
||||||
trainer,
|
trainer,
|
||||||
|
|||||||
@@ -43,3 +43,12 @@ def set_pytorch_cuda_alloc_conf():
|
|||||||
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
|
||||||
"expandable_segments:True,roundup_power2_divisions:16"
|
"expandable_segments:True,roundup_power2_divisions:16"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_optimized_env():
|
||||||
|
"""
|
||||||
|
Patch environment variables to improve VRAM usage and increase download speed
|
||||||
|
"""
|
||||||
|
if os.getenv("HF_HUB_ENABLE_HF_TRANSFER") is None:
|
||||||
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||||
|
set_pytorch_cuda_alloc_conf()
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ def choose_device(cfg):
|
|||||||
|
|
||||||
def resolve_dtype(cfg):
|
def resolve_dtype(cfg):
|
||||||
if (
|
if (
|
||||||
cfg.bf16 == "auto" and not cfg.use_ray
|
not cfg.fp16 and cfg.bf16 == "auto" and not cfg.use_ray
|
||||||
): # if we use ray we want to defer this check to the worker node
|
): # if we use ray we want to defer this check to the worker node
|
||||||
if is_torch_bf16_gpu_available():
|
if is_torch_bf16_gpu_available():
|
||||||
LOG.debug("bf16 support detected, enabling for this configuration.")
|
LOG.debug("bf16 support detected, enabling for this configuration.")
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ class MultipackBatchSampler(BatchSampler):
|
|||||||
self.len_across_ranks = None
|
self.len_across_ranks = None
|
||||||
|
|
||||||
if self.sequential and not isinstance(sampler, SequentialSampler):
|
if self.sequential and not isinstance(sampler, SequentialSampler):
|
||||||
LOG.warn(
|
LOG.warning(
|
||||||
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
"using sequential sample packing with non-sequential sampler, did you want to also enable curriculum_sampling?"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ shared pytest fixtures
|
|||||||
|
|
||||||
import functools
|
import functools
|
||||||
import importlib
|
import importlib
|
||||||
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
@@ -529,31 +530,32 @@ def dataset_fozziethebeat_alpaca_messages_2k_dpo_test_rev_ea82cff(
|
|||||||
|
|
||||||
|
|
||||||
# # pylint: disable=redefined-outer-name,unused-argument
|
# # pylint: disable=redefined-outer-name,unused-argument
|
||||||
# def test_load_fixtures(
|
@pytest.mark.skipif(
|
||||||
# download_smollm2_135m_model,
|
os.environ.get("AXOLOTL_IS_CI_CACHE_PRELOAD", "-1") != "1",
|
||||||
# download_llama_68m_random_model,
|
reason="Not running in CI cache preload",
|
||||||
# download_qwen_2_5_half_billion_model,
|
)
|
||||||
# download_tatsu_lab_alpaca_dataset,
|
def test_load_fixtures(
|
||||||
# download_mhenrichsen_alpaca_2k_dataset,
|
download_smollm2_135m_model,
|
||||||
# download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
download_qwen_2_5_half_billion_model,
|
||||||
# download_mlabonne_finetome_100k_dataset,
|
download_tatsu_lab_alpaca_dataset,
|
||||||
# download_argilla_distilabel_capybara_dpo_7k_binarized_dataset,
|
download_mhenrichsen_alpaca_2k_dataset,
|
||||||
# download_argilla_ultrafeedback_binarized_preferences_cleaned_dataset,
|
download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
||||||
# download_fozzie_alpaca_dpo_dataset,
|
download_mlabonne_finetome_100k_dataset,
|
||||||
# download_arcee_ai_distilabel_intel_orca_dpo_pairs_dataset,
|
download_argilla_distilabel_capybara_dpo_7k_binarized_dataset,
|
||||||
# download_argilla_dpo_pairs_dataset,
|
download_arcee_ai_distilabel_intel_orca_dpo_pairs_dataset,
|
||||||
# download_tiny_shakespeare_dataset,
|
download_argilla_dpo_pairs_dataset,
|
||||||
# download_deepseek_model_fixture,
|
download_tiny_shakespeare_dataset,
|
||||||
# download_huggyllama_model_fixture,
|
download_deepseek_model_fixture,
|
||||||
# download_llama_1b_model_fixture,
|
download_huggyllama_model_fixture,
|
||||||
# download_llama3_8b_model_fixture,
|
download_llama_1b_model_fixture,
|
||||||
# download_llama3_8b_instruct_model_fixture,
|
download_llama3_8b_model_fixture,
|
||||||
# download_phi_35_mini_model_fixture,
|
download_llama3_8b_instruct_model_fixture,
|
||||||
# download_phi_3_medium_model_fixture,
|
download_phi_35_mini_model_fixture,
|
||||||
# download_mistral_7b_model_fixture,
|
download_phi_3_medium_model_fixture,
|
||||||
# download_gemma_2b_model_fixture,
|
download_mistral_7b_model_fixture,
|
||||||
# download_gemma2_9b_model_fixture,
|
download_gemma_2b_model_fixture,
|
||||||
# download_mlx_mistral_7b_model_fixture,
|
download_gemma2_9b_model_fixture,
|
||||||
# download_llama2_model_fixture,
|
download_mlx_mistral_7b_model_fixture,
|
||||||
# ):
|
download_llama2_model_fixture,
|
||||||
# pass
|
):
|
||||||
|
pass
|
||||||
|
|||||||
Reference in New Issue
Block a user