* use smaller pretrained models for ci * more steps for loss check * fix tests * more train steps * fix losses
133 lines
4.0 KiB
Python
133 lines
4.0 KiB
Python
"""
|
|
E2E tests for lora llama
|
|
"""
|
|
|
|
import unittest
|
|
|
|
from axolotl.common.datasets import load_datasets
|
|
from axolotl.train import train
|
|
from axolotl.utils.config import normalize_config, validate_config
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
from ..utils import (
|
|
check_model_output_exists,
|
|
check_tensorboard_loss_decreased,
|
|
require_torch_2_6_0,
|
|
with_temp_dir,
|
|
)
|
|
|
|
|
|
class TestMistral(unittest.TestCase):
|
|
"""
|
|
Test case for Llama models using LoRA
|
|
"""
|
|
|
|
@require_torch_2_6_0
|
|
@with_temp_dir
|
|
def test_lora_packing(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
|
"flash_attention": True,
|
|
"sample_packing": True,
|
|
"sequence_len": 1024,
|
|
"load_in_8bit": True,
|
|
"adapter": "lora",
|
|
"lora_r": 32,
|
|
"lora_alpha": 64,
|
|
"lora_dropout": 0.05,
|
|
"lora_target_linear": True,
|
|
"val_set_size": 0.05,
|
|
"special_tokens": {
|
|
"unk_token": "<unk>",
|
|
"bos_token": "<s>",
|
|
"eos_token": "</s>",
|
|
},
|
|
"datasets": [
|
|
{
|
|
"path": "mhenrichsen/alpaca_2k_test",
|
|
"type": "alpaca",
|
|
},
|
|
],
|
|
"num_epochs": 2,
|
|
"micro_batch_size": 4,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 2e-4,
|
|
"optimizer": "adamw_torch_fused",
|
|
"lr_scheduler": "cosine",
|
|
"max_steps": 50,
|
|
"logging_steps": 1,
|
|
"save_steps": 50,
|
|
"eval_steps": 50,
|
|
"bf16": "auto",
|
|
"save_first_step": False,
|
|
"use_tensorboard": True,
|
|
"seed": 42,
|
|
}
|
|
)
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
check_model_output_exists(temp_dir, cfg)
|
|
check_tensorboard_loss_decreased(
|
|
temp_dir + "/runs",
|
|
initial_window=5,
|
|
final_window=5,
|
|
max_initial=5.5,
|
|
max_final=4.3,
|
|
)
|
|
|
|
@with_temp_dir
|
|
def test_ft_packing(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
|
"flash_attention": True,
|
|
"sample_packing": True,
|
|
"sequence_len": 1024,
|
|
"val_set_size": 0.05,
|
|
"special_tokens": {
|
|
"unk_token": "<unk>",
|
|
"bos_token": "<s>",
|
|
"eos_token": "</s>",
|
|
},
|
|
"datasets": [
|
|
{
|
|
"path": "mhenrichsen/alpaca_2k_test",
|
|
"type": "alpaca",
|
|
},
|
|
],
|
|
"num_epochs": 2,
|
|
"micro_batch_size": 4,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 2e-4,
|
|
"optimizer": "adamw_torch_fused",
|
|
"lr_scheduler": "cosine",
|
|
"max_steps": 50,
|
|
"logging_steps": 1,
|
|
"save_steps": 50,
|
|
"eval_steps": 50,
|
|
"bf16": "auto",
|
|
"save_first_step": False,
|
|
"use_tensorboard": True,
|
|
"seed": 42,
|
|
}
|
|
)
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
check_model_output_exists(temp_dir, cfg)
|
|
check_tensorboard_loss_decreased(
|
|
temp_dir + "/runs",
|
|
initial_window=5,
|
|
final_window=5,
|
|
max_initial=5.5,
|
|
max_final=4.3,
|
|
)
|