Compare commits
5 Commits
activeblue
...
smol-ci
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
993db05b3a | ||
|
|
1b9520cc8b | ||
|
|
f77408a3d0 | ||
|
|
5db4272f69 | ||
|
|
431888c1de |
@@ -119,15 +119,49 @@ def download_smollm2_135m_gptq_model():
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session", autouse=True)
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
def download_qwen_2_5_half_billion_model():
|
def download_qwen3_half_billion_model():
|
||||||
# download the model
|
# download the model (still used as the KD teacher in tests/e2e/integrations/test_kd.py)
|
||||||
snapshot_download_w_retry("Qwen/Qwen2.5-0.5B", repo_type="model")
|
snapshot_download_w_retry("Qwen/Qwen3-0.6B", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session", autouse=True)
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
def download_qwen3_half_billion_model():
|
def download_tiny_llama_model():
|
||||||
# download the model
|
snapshot_download_w_retry("axolotl-ai-co/tiny-llama-50m", repo_type="model")
|
||||||
snapshot_download_w_retry("Qwen/Qwen3-0.6B", repo_type="model")
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_mistral_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-mistral-25m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_mixtral_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-mixtral-30m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_phi_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-phi-64m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_falcon_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-falcon-42m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_qwen2_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-qwen2-129m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_qwen3_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-qwen3-129m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
|
def download_tiny_gemma2_model():
|
||||||
|
snapshot_download_w_retry("axolotl-ai-co/tiny-gemma2-137m", repo_type="model")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session", autouse=True)
|
@pytest.fixture(scope="session", autouse=True)
|
||||||
@@ -620,7 +654,15 @@ def fixture_min_base_cfg():
|
|||||||
)
|
)
|
||||||
def test_load_fixtures(
|
def test_load_fixtures(
|
||||||
download_smollm2_135m_model,
|
download_smollm2_135m_model,
|
||||||
download_qwen_2_5_half_billion_model,
|
download_qwen3_half_billion_model,
|
||||||
|
download_tiny_llama_model,
|
||||||
|
download_tiny_mistral_model,
|
||||||
|
download_tiny_mixtral_model,
|
||||||
|
download_tiny_phi_model,
|
||||||
|
download_tiny_falcon_model,
|
||||||
|
download_tiny_qwen2_model,
|
||||||
|
download_tiny_qwen3_model,
|
||||||
|
download_tiny_gemma2_model,
|
||||||
download_tatsu_lab_alpaca_dataset,
|
download_tatsu_lab_alpaca_dataset,
|
||||||
download_mhenrichsen_alpaca_2k_dataset,
|
download_mhenrichsen_alpaca_2k_dataset,
|
||||||
download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
download_mhenrichsen_alpaca_2k_w_revision_dataset,
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ from axolotl.utils import get_pytorch_version
|
|||||||
from axolotl.utils.config import normalize_config, prepare_plugins, validate_config
|
from axolotl.utils.config import normalize_config, prepare_plugins, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from tests.e2e.utils import check_model_output_exists
|
from tests.e2e.utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
@@ -35,13 +38,16 @@ def min_cfg(temp_dir):
|
|||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"micro_batch_size": 8,
|
"micro_batch_size": 8,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 5e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 10,
|
"max_steps": 40,
|
||||||
|
"warmup_steps": 5,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -64,11 +70,18 @@ class TestCutCrossEntropyIntegration:
|
|||||||
else:
|
else:
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=2.2,
|
||||||
|
max_final=2.0,
|
||||||
|
)
|
||||||
|
|
||||||
def test_qwen2_w_cce(self, temp_dir):
|
def test_qwen2_w_cce(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"plugins": [
|
"plugins": [
|
||||||
"axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin",
|
"axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin",
|
||||||
],
|
],
|
||||||
@@ -87,13 +100,15 @@ class TestCutCrossEntropyIntegration:
|
|||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"micro_batch_size": 4,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 10,
|
"max_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -108,6 +123,13 @@ class TestCutCrossEntropyIntegration:
|
|||||||
else:
|
else:
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"attention_type",
|
"attention_type",
|
||||||
@@ -136,3 +158,10 @@ class TestCutCrossEntropyIntegration:
|
|||||||
else:
|
else:
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=2.2,
|
||||||
|
max_final=2.0,
|
||||||
|
)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ from axolotl.monkeypatch.lora_kernels import (
|
|||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
MODEL_NAME = "Qwen/Qwen3-0.6B"
|
MODEL_NAME = "axolotl-ai-co/tiny-qwen3-129m"
|
||||||
DEVICE = "cuda"
|
DEVICE = "cuda"
|
||||||
DTYPE = torch.bfloat16
|
DTYPE = torch.bfloat16
|
||||||
|
|
||||||
|
|||||||
@@ -1,23 +1,22 @@
|
|||||||
"""Test module for DistMuon optimizer with FSDP2 multi-GPU functionality."""
|
"""Test module for DistMuon optimizer with FSDP2 multi-GPU functionality."""
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
|
||||||
import yaml
|
import yaml
|
||||||
from accelerate.test_utils import execute_subprocess_async
|
from accelerate.test_utils import execute_subprocess_async
|
||||||
from tbparse import SummaryReader
|
|
||||||
from transformers.testing_utils import get_torch_dist_unique_port
|
from transformers.testing_utils import get_torch_dist_unique_port
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from tests.e2e.utils import most_recent_subdir, require_torch_2_7_0
|
from tests.e2e.utils import check_tensorboard_loss_decreased, require_torch_2_7_0
|
||||||
|
|
||||||
AXOLOTL_ROOT = Path(__file__).parent.parent.parent.parent
|
AXOLOTL_ROOT = Path(__file__).parent.parent.parent.parent
|
||||||
|
|
||||||
|
|
||||||
def verify_training_success(temp_dir):
|
def verify_training_success(temp_dir):
|
||||||
"""Verify that training completed successfully by checking artifacts and loss."""
|
"""Verify that training completed successfully — artifacts, no-NaN, loss
|
||||||
|
stayed in qwen2-pretraining scale (tiny-qwen2-129m final pretrain CE ~3.92).
|
||||||
|
"""
|
||||||
output_path = Path(temp_dir)
|
output_path = Path(temp_dir)
|
||||||
|
|
||||||
model_files = list(output_path.glob("*.bin")) + list(
|
model_files = list(output_path.glob("*.bin")) + list(
|
||||||
@@ -30,19 +29,13 @@ def verify_training_success(temp_dir):
|
|||||||
"No checkpoint files found - training may have failed"
|
"No checkpoint files found - training may have failed"
|
||||||
)
|
)
|
||||||
|
|
||||||
tb_log_path = most_recent_subdir(temp_dir + "/runs")
|
check_tensorboard_loss_decreased(
|
||||||
if tb_log_path:
|
temp_dir + "/runs",
|
||||||
event_files = sorted(os.listdir(tb_log_path))
|
initial_window=10,
|
||||||
if event_files:
|
final_window=10,
|
||||||
event_file = os.path.join(tb_log_path, event_files[0])
|
max_initial=5.0,
|
||||||
reader = SummaryReader(event_file)
|
max_final=4.7,
|
||||||
df = reader.scalars
|
)
|
||||||
train_loss_df = df[df.tag == "train/train_loss"]
|
|
||||||
if len(train_loss_df) > 0:
|
|
||||||
final_loss = train_loss_df.value.values[-1]
|
|
||||||
assert not torch.isnan(torch.tensor(final_loss)), (
|
|
||||||
f"Training loss is NaN: {final_loss}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDistMuon:
|
class TestDistMuon:
|
||||||
@@ -52,7 +45,7 @@ class TestDistMuon:
|
|||||||
def test_fft_sft(self, temp_dir):
|
def test_fft_sft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -63,11 +56,12 @@ class TestDistMuon:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.02,
|
"learning_rate": 2e-3,
|
||||||
"optimizer": "muon",
|
"optimizer": "muon",
|
||||||
"weight_decay": 0.01,
|
"weight_decay": 0.01,
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
@@ -82,6 +76,9 @@ class TestDistMuon:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -109,7 +106,7 @@ class TestDistMuon:
|
|||||||
def test_lora_sft(self, temp_dir):
|
def test_lora_sft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -122,14 +119,15 @@ class TestDistMuon:
|
|||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
"lora_r": 8,
|
"lora_r": 8,
|
||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_dropout": 0.05,
|
"lora_dropout": 0.0,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.02,
|
"learning_rate": 2e-3,
|
||||||
"optimizer": "muon",
|
"optimizer": "muon",
|
||||||
"weight_decay": 0.01,
|
"weight_decay": 0.01,
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
@@ -144,6 +142,9 @@ class TestDistMuon:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,24 +1,23 @@
|
|||||||
"""Test module for FSDP1 multi-GPU functionality."""
|
"""Test module for FSDP1 multi-GPU functionality."""
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import torch
|
|
||||||
import yaml
|
import yaml
|
||||||
from accelerate.test_utils import execute_subprocess_async
|
from accelerate.test_utils import execute_subprocess_async
|
||||||
from tbparse import SummaryReader
|
|
||||||
from transformers.testing_utils import get_torch_dist_unique_port
|
from transformers.testing_utils import get_torch_dist_unique_port
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from tests.e2e.utils import most_recent_subdir
|
from tests.e2e.utils import check_tensorboard_loss_decreased
|
||||||
|
|
||||||
AXOLOTL_ROOT = Path(__file__).parent.parent.parent.parent
|
AXOLOTL_ROOT = Path(__file__).parent.parent.parent.parent
|
||||||
|
|
||||||
|
|
||||||
def verify_training_success(temp_dir):
|
def verify_training_success(temp_dir):
|
||||||
"""Verify that training completed successfully by checking artifacts and loss."""
|
"""Verify that training completed successfully — artifacts, no-NaN, loss
|
||||||
|
stayed in qwen2-pretraining scale (tiny-qwen2-129m final pretrain CE ~3.92).
|
||||||
|
"""
|
||||||
output_path = Path(temp_dir)
|
output_path = Path(temp_dir)
|
||||||
|
|
||||||
model_files = list(output_path.glob("*.bin")) + list(
|
model_files = list(output_path.glob("*.bin")) + list(
|
||||||
@@ -31,19 +30,13 @@ def verify_training_success(temp_dir):
|
|||||||
"No checkpoint files found - training may have failed"
|
"No checkpoint files found - training may have failed"
|
||||||
)
|
)
|
||||||
|
|
||||||
tb_log_path = most_recent_subdir(temp_dir + "/runs")
|
check_tensorboard_loss_decreased(
|
||||||
if tb_log_path:
|
temp_dir + "/runs",
|
||||||
event_files = sorted(os.listdir(tb_log_path))
|
initial_window=10,
|
||||||
if event_files:
|
final_window=10,
|
||||||
event_file = os.path.join(tb_log_path, event_files[0])
|
max_initial=5.0,
|
||||||
reader = SummaryReader(event_file)
|
max_final=4.7,
|
||||||
df = reader.scalars
|
)
|
||||||
train_loss_df = df[df.tag == "train/train_loss"]
|
|
||||||
if len(train_loss_df) > 0:
|
|
||||||
final_loss = train_loss_df.value.values[-1]
|
|
||||||
assert not torch.isnan(torch.tensor(final_loss)), (
|
|
||||||
f"Training loss is NaN: {final_loss}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestFSDP1:
|
class TestFSDP1:
|
||||||
@@ -56,7 +49,7 @@ class TestFSDP1:
|
|||||||
def test_fft_sft(self, temp_dir, fsdp_cpu_ram_efficient_loading):
|
def test_fft_sft(self, temp_dir, fsdp_cpu_ram_efficient_loading):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -67,11 +60,12 @@ class TestFSDP1:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -87,6 +81,9 @@ class TestFSDP1:
|
|||||||
"fsdp_use_orig_params": False,
|
"fsdp_use_orig_params": False,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -126,7 +123,7 @@ class TestFSDP1:
|
|||||||
def test_lora_sft(self, temp_dir, adapter_config):
|
def test_lora_sft(self, temp_dir, adapter_config):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -140,14 +137,15 @@ class TestFSDP1:
|
|||||||
"load_in_4bit": adapter_config["load_in_4bit"],
|
"load_in_4bit": adapter_config["load_in_4bit"],
|
||||||
"lora_r": 8,
|
"lora_r": 8,
|
||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_dropout": 0.05,
|
"lora_dropout": 0.0,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -163,6 +161,9 @@ class TestFSDP1:
|
|||||||
"fsdp_use_orig_params": False,
|
"fsdp_use_orig_params": False,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -190,7 +191,7 @@ class TestFSDP1:
|
|||||||
def test_dpo_fft(self, temp_dir):
|
def test_dpo_fft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"rl": "dpo",
|
"rl": "dpo",
|
||||||
@@ -203,11 +204,11 @@ class TestFSDP1:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 20,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -223,6 +224,9 @@ class TestFSDP1:
|
|||||||
"fsdp_use_orig_params": False,
|
"fsdp_use_orig_params": False,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -262,7 +266,7 @@ class TestFSDP1:
|
|||||||
def test_dpo_lora(self, temp_dir, adapter_config):
|
def test_dpo_lora(self, temp_dir, adapter_config):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"load_in_4bit": adapter_config["load_in_4bit"],
|
"load_in_4bit": adapter_config["load_in_4bit"],
|
||||||
"rl": "dpo",
|
"rl": "dpo",
|
||||||
"chat_template": "chatml",
|
"chat_template": "chatml",
|
||||||
@@ -281,11 +285,11 @@ class TestFSDP1:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 20,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -301,6 +305,9 @@ class TestFSDP1:
|
|||||||
"fsdp_use_orig_params": False,
|
"fsdp_use_orig_params": False,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"tf32": True,
|
"tf32": True,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,23 @@
|
|||||||
"""Test module for FSDP2 multi-GPU functionality."""
|
"""Test module for FSDP2 multi-GPU functionality."""
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import torch
|
|
||||||
import yaml
|
import yaml
|
||||||
from accelerate.test_utils import execute_subprocess_async
|
from accelerate.test_utils import execute_subprocess_async
|
||||||
from tbparse import SummaryReader
|
|
||||||
from transformers.testing_utils import get_torch_dist_unique_port
|
from transformers.testing_utils import get_torch_dist_unique_port
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from tests.e2e.utils import most_recent_subdir, require_torch_2_7_0
|
from tests.e2e.utils import check_tensorboard_loss_decreased, require_torch_2_7_0
|
||||||
|
|
||||||
AXOLOTL_ROOT = Path(__file__).parent.parent.parent.parent
|
AXOLOTL_ROOT = Path(__file__).parent.parent.parent.parent
|
||||||
|
|
||||||
|
|
||||||
def verify_training_success(temp_dir):
|
def verify_training_success(temp_dir):
|
||||||
"""Verify that training completed successfully by checking artifacts and loss."""
|
"""Verify that training completed successfully — artifacts, no-NaN, loss
|
||||||
|
stayed in qwen2-pretraining scale (tiny-qwen2-129m final pretrain CE ~3.92).
|
||||||
|
"""
|
||||||
output_path = Path(temp_dir)
|
output_path = Path(temp_dir)
|
||||||
|
|
||||||
model_files = list(output_path.glob("*.bin")) + list(
|
model_files = list(output_path.glob("*.bin")) + list(
|
||||||
@@ -31,19 +30,13 @@ def verify_training_success(temp_dir):
|
|||||||
"No checkpoint files found - training may have failed"
|
"No checkpoint files found - training may have failed"
|
||||||
)
|
)
|
||||||
|
|
||||||
tb_log_path = most_recent_subdir(temp_dir + "/runs")
|
check_tensorboard_loss_decreased(
|
||||||
if tb_log_path:
|
temp_dir + "/runs",
|
||||||
event_files = sorted(os.listdir(tb_log_path))
|
initial_window=10,
|
||||||
if event_files:
|
final_window=10,
|
||||||
event_file = os.path.join(tb_log_path, event_files[0])
|
max_initial=5.0,
|
||||||
reader = SummaryReader(event_file)
|
max_final=4.7,
|
||||||
df = reader.scalars
|
)
|
||||||
train_loss_df = df[df.tag == "train/train_loss"]
|
|
||||||
if len(train_loss_df) > 0:
|
|
||||||
final_loss = train_loss_df.value.values[-1]
|
|
||||||
assert not torch.isnan(torch.tensor(final_loss)), (
|
|
||||||
f"Training loss is NaN: {final_loss}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestFSDP2:
|
class TestFSDP2:
|
||||||
@@ -57,7 +50,7 @@ class TestFSDP2:
|
|||||||
def test_fft_sft(self, temp_dir, fsdp_cpu_ram_efficient_loading):
|
def test_fft_sft(self, temp_dir, fsdp_cpu_ram_efficient_loading):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -68,11 +61,12 @@ class TestFSDP2:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -86,6 +80,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -114,7 +111,7 @@ class TestFSDP2:
|
|||||||
def test_lora_sft(self, temp_dir, peft_use_dora):
|
def test_lora_sft(self, temp_dir, peft_use_dora):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -128,14 +125,15 @@ class TestFSDP2:
|
|||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
"lora_r": 8,
|
"lora_r": 8,
|
||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_dropout": 0.05,
|
"lora_dropout": 0.0,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -149,6 +147,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
# explicitly disable LORA kernels, as they may be auto-enabled
|
# explicitly disable LORA kernels, as they may be auto-enabled
|
||||||
"lora_mlp_kernel": False,
|
"lora_mlp_kernel": False,
|
||||||
@@ -180,7 +181,7 @@ class TestFSDP2:
|
|||||||
def test_lora_sft_kernels(self, temp_dir):
|
def test_lora_sft_kernels(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -195,11 +196,12 @@ class TestFSDP2:
|
|||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -213,6 +215,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
"lora_mlp_kernel": True,
|
"lora_mlp_kernel": True,
|
||||||
"lora_qkv_kernel": True,
|
"lora_qkv_kernel": True,
|
||||||
@@ -243,7 +248,7 @@ class TestFSDP2:
|
|||||||
def test_qlora_sft(self, temp_dir):
|
def test_qlora_sft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -257,14 +262,15 @@ class TestFSDP2:
|
|||||||
"adapter": "qlora",
|
"adapter": "qlora",
|
||||||
"lora_r": 8,
|
"lora_r": 8,
|
||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_dropout": 0.05,
|
"lora_dropout": 0.0,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -278,6 +284,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -305,7 +314,7 @@ class TestFSDP2:
|
|||||||
def test_qlora_sft_kernels(self, temp_dir):
|
def test_qlora_sft_kernels(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -321,11 +330,12 @@ class TestFSDP2:
|
|||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -339,6 +349,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"bf16": True,
|
"bf16": True,
|
||||||
"lora_mlp_kernel": True,
|
"lora_mlp_kernel": True,
|
||||||
"lora_qkv_kernel": True,
|
"lora_qkv_kernel": True,
|
||||||
@@ -370,7 +383,7 @@ class TestFSDP2:
|
|||||||
def test_dpo_fft(self, temp_dir):
|
def test_dpo_fft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"rl": "dpo",
|
"rl": "dpo",
|
||||||
@@ -383,11 +396,11 @@ class TestFSDP2:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 20,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -401,6 +414,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -428,7 +444,7 @@ class TestFSDP2:
|
|||||||
def test_dpo_lora(self, temp_dir):
|
def test_dpo_lora(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"rl": "dpo",
|
"rl": "dpo",
|
||||||
"chat_template": "chatml",
|
"chat_template": "chatml",
|
||||||
@@ -445,11 +461,11 @@ class TestFSDP2:
|
|||||||
"lora_dropout": 0.05,
|
"lora_dropout": 0.05,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"max_steps": 2,
|
"max_steps": 20,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-3,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
@@ -463,6 +479,9 @@ class TestFSDP2:
|
|||||||
"reshard_after_forward": True,
|
"reshard_after_forward": True,
|
||||||
},
|
},
|
||||||
"use_tensorboard": True,
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ def _run_training(temp_dir, cfg):
|
|||||||
def _base_lora_fsdp2_config(temp_dir, **overrides):
|
def _base_lora_fsdp2_config(temp_dir, **overrides):
|
||||||
"""Base config for LoRA + FSDP2 + kernel tests."""
|
"""Base config for LoRA + FSDP2 + kernel tests."""
|
||||||
cfg = {
|
cfg = {
|
||||||
"base_model": "Qwen/Qwen3-0.6B",
|
"base_model": "axolotl-ai-co/tiny-qwen3-129m",
|
||||||
"sequence_len": 512,
|
"sequence_len": 512,
|
||||||
"val_set_size": 0.0,
|
"val_set_size": 0.0,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from accelerate.test_utils import execute_subprocess_async, get_torch_dist_uniqu
|
|||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from tests.e2e.utils import check_tensorboard, require_torch_2_7_0
|
from tests.e2e.utils import check_tensorboard_loss_decreased, require_torch_2_7_0
|
||||||
|
|
||||||
|
|
||||||
class TestTensorParallel:
|
class TestTensorParallel:
|
||||||
@@ -21,7 +21,7 @@ class TestTensorParallel:
|
|||||||
def test_fft_sft(self, temp_dir):
|
def test_fft_sft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
@@ -63,6 +63,6 @@ class TestTensorParallel:
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard_loss_decreased(
|
||||||
temp_dir + "/runs", "train/train_loss", 1.0, "Train Loss (%s) is too high"
|
temp_dir + "/runs", max_initial=5.0, max_final=4.7
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -32,12 +32,12 @@ from axolotl.utils.dict import DictDefault
|
|||||||
|
|
||||||
MODEL_CONFIGS = [
|
MODEL_CONFIGS = [
|
||||||
{
|
{
|
||||||
"name": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
"name": "axolotl-ai-co/tiny-mistral-25m",
|
||||||
"expected_activation": apply_lora_mlp_swiglu,
|
"expected_activation": apply_lora_mlp_swiglu,
|
||||||
"dtype": torch.float16,
|
"dtype": torch.float16,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
|
"name": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"expected_activation": apply_lora_mlp_swiglu,
|
"expected_activation": apply_lora_mlp_swiglu,
|
||||||
"dtype": torch.float16,
|
"dtype": torch.float16,
|
||||||
},
|
},
|
||||||
@@ -47,7 +47,7 @@ MODEL_CONFIGS = [
|
|||||||
"dtype": torch.float32,
|
"dtype": torch.float32,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "trl-internal-testing/tiny-Gemma2ForCausalLM",
|
"name": "axolotl-ai-co/tiny-gemma2-137m",
|
||||||
"expected_activation": apply_lora_mlp_geglu,
|
"expected_activation": apply_lora_mlp_geglu,
|
||||||
"dtype": torch.float16,
|
"dtype": torch.float16,
|
||||||
},
|
},
|
||||||
@@ -159,7 +159,7 @@ def test_swiglu_mlp_integration(small_llama_model):
|
|||||||
def test_geglu_model_integration():
|
def test_geglu_model_integration():
|
||||||
"""Test GeGLU activation with Gemma model."""
|
"""Test GeGLU activation with Gemma model."""
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
"trl-internal-testing/tiny-Gemma2ForCausalLM",
|
"axolotl-ai-co/tiny-gemma2-137m",
|
||||||
dtype=torch.float16,
|
dtype=torch.float16,
|
||||||
device_map="cuda:0",
|
device_map="cuda:0",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,14 +4,16 @@ E2E tests for falcon
|
|||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.datasets import load_datasets
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestFalconPatched(unittest.TestCase):
|
class TestFalconPatched(unittest.TestCase):
|
||||||
@@ -19,13 +21,12 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
Test case for Falcon models
|
Test case for Falcon models
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_qlora(self, temp_dir):
|
def test_qlora(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "illuin/tiny-random-FalconForCausalLM",
|
"base_model": "axolotl-ai-co/tiny-falcon-42m",
|
||||||
"flash_attention": True,
|
"flash_attention": False,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"load_in_4bit": True,
|
"load_in_4bit": True,
|
||||||
@@ -47,17 +48,20 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -66,14 +70,20 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=6.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "illuin/tiny-random-FalconForCausalLM",
|
"base_model": "axolotl-ai-co/tiny-falcon-42m",
|
||||||
"flash_attention": True,
|
"flash_attention": False,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.05,
|
"val_set_size": 0.05,
|
||||||
@@ -88,17 +98,20 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -107,3 +120,10 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=6.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|||||||
@@ -9,7 +9,12 @@ from axolotl.train import train
|
|||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, require_torch_2_6_0, with_temp_dir
|
from ..utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
require_torch_2_6_0,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestMistral(unittest.TestCase):
|
class TestMistral(unittest.TestCase):
|
||||||
@@ -22,7 +27,7 @@ class TestMistral(unittest.TestCase):
|
|||||||
def test_lora_packing(self, temp_dir):
|
def test_lora_packing(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
@@ -45,17 +50,20 @@ class TestMistral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 5,
|
"max_steps": 50,
|
||||||
"save_steps": 3,
|
"logging_steps": 1,
|
||||||
"eval_steps": 4,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -64,12 +72,19 @@ class TestMistral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.5,
|
||||||
|
max_final=4.3,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft_packing(self, temp_dir):
|
def test_ft_packing(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
@@ -86,17 +101,20 @@ class TestMistral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 5,
|
"max_steps": 50,
|
||||||
"save_steps": 3,
|
"logging_steps": 1,
|
||||||
"eval_steps": 4,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -105,3 +123,10 @@ class TestMistral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.5,
|
||||||
|
max_final=4.3,
|
||||||
|
)
|
||||||
|
|||||||
@@ -9,7 +9,11 @@ from axolotl.train import train
|
|||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestMixtral(unittest.TestCase):
|
class TestMixtral(unittest.TestCase):
|
||||||
@@ -21,8 +25,7 @@ class TestMixtral(unittest.TestCase):
|
|||||||
def test_qlora(self, temp_dir):
|
def test_qlora(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
@@ -30,7 +33,7 @@ class TestMixtral(unittest.TestCase):
|
|||||||
"adapter": "qlora",
|
"adapter": "qlora",
|
||||||
"lora_r": 16,
|
"lora_r": 16,
|
||||||
"lora_alpha": 32,
|
"lora_alpha": 32,
|
||||||
"lora_dropout": 0.1,
|
"lora_dropout": 0.0,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"val_set_size": 0.05,
|
"val_set_size": 0.05,
|
||||||
"special_tokens": {},
|
"special_tokens": {},
|
||||||
@@ -41,17 +44,21 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 3e-3,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 5,
|
"max_steps": 80,
|
||||||
"save_steps": 3,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 4,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 80,
|
||||||
|
"eval_steps": 80,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -60,13 +67,19 @@ class TestMixtral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=10,
|
||||||
|
final_window=10,
|
||||||
|
max_initial=6.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
@@ -79,17 +92,21 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 5e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 5,
|
"max_steps": 80,
|
||||||
"save_steps": 3,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 4,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 80,
|
||||||
|
"eval_steps": 80,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -98,3 +115,10 @@ class TestMixtral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=6.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|||||||
@@ -22,8 +22,7 @@ class TestModelPatches(unittest.TestCase):
|
|||||||
def test_mixtral_multipack(self, temp_dir):
|
def test_mixtral_multipack(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
@@ -57,7 +56,7 @@ class TestModelPatches(unittest.TestCase):
|
|||||||
def test_mistral_multipack(self, temp_dir):
|
def test_mistral_multipack(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
|
|||||||
@@ -9,7 +9,11 @@ from axolotl.train import train
|
|||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestPhiMultipack(unittest.TestCase):
|
class TestPhiMultipack(unittest.TestCase):
|
||||||
@@ -21,7 +25,7 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
def test_ft_packed(self, temp_dir):
|
def test_ft_packed(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "axolotl-ai-co/tiny-phi-64m",
|
||||||
"model_type": "PhiForCausalLM",
|
"model_type": "PhiForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
@@ -43,17 +47,20 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
"dataset_shard_num": 10,
|
"dataset_shard_num": 10,
|
||||||
"dataset_shard_idx": 0,
|
"dataset_shard_idx": 0,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"micro_batch_size": 1,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 5,
|
"max_steps": 50,
|
||||||
"eval_steps": 3,
|
"logging_steps": 1,
|
||||||
"save_steps": 4,
|
"eval_steps": 50,
|
||||||
|
"save_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -63,12 +70,19 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=6.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_qlora_packed(self, temp_dir):
|
def test_qlora_packed(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "axolotl-ai-co/tiny-phi-64m",
|
||||||
"model_type": "PhiForCausalLM",
|
"model_type": "PhiForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
@@ -94,17 +108,20 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
"dataset_shard_num": 10,
|
"dataset_shard_num": 10,
|
||||||
"dataset_shard_idx": 0,
|
"dataset_shard_idx": 0,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"micro_batch_size": 1,
|
"micro_batch_size": 2,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 5,
|
"max_steps": 50,
|
||||||
"eval_steps": 3,
|
"logging_steps": 1,
|
||||||
"save_steps": 4,
|
"eval_steps": 50,
|
||||||
|
"save_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -114,3 +131,10 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=6.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from transformers import AutoModelForCausalLM
|
|||||||
# Import the actual trainer methods we want to test
|
# Import the actual trainer methods we want to test
|
||||||
from axolotl.core.trainers.grpo.async_trainer import AsyncGRPOTrainer
|
from axolotl.core.trainers.grpo.async_trainer import AsyncGRPOTrainer
|
||||||
|
|
||||||
MODEL_NAME = "Qwen/Qwen3-0.6B"
|
MODEL_NAME = "axolotl-ai-co/tiny-qwen3-129m"
|
||||||
|
|
||||||
|
|
||||||
def _fix_patched_attention(model):
|
def _fix_patched_attention(model):
|
||||||
|
|||||||
@@ -4,14 +4,16 @@ E2E tests for falcon
|
|||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.datasets import load_datasets
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestFalcon(unittest.TestCase):
|
class TestFalcon(unittest.TestCase):
|
||||||
@@ -19,13 +21,12 @@ class TestFalcon(unittest.TestCase):
|
|||||||
Test case for falcon
|
Test case for falcon
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_lora(self, temp_dir):
|
def test_lora(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "illuin/tiny-random-FalconForCausalLM",
|
"base_model": "axolotl-ai-co/tiny-falcon-42m",
|
||||||
"flash_attention": True,
|
"flash_attention": False,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"load_in_8bit": True,
|
"load_in_8bit": True,
|
||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
@@ -49,17 +50,21 @@ class TestFalcon(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 10,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -69,14 +74,20 @@ class TestFalcon(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_lora_added_vocab(self, temp_dir):
|
def test_lora_added_vocab(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "illuin/tiny-random-FalconForCausalLM",
|
"base_model": "axolotl-ai-co/tiny-falcon-42m",
|
||||||
"flash_attention": True,
|
"flash_attention": False,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"load_in_8bit": True,
|
"load_in_8bit": True,
|
||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
@@ -104,17 +115,21 @@ class TestFalcon(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 10,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -124,14 +139,20 @@ class TestFalcon(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@pytest.mark.skip(reason="no tiny models for testing with safetensors")
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "illuin/tiny-random-FalconForCausalLM",
|
"base_model": "axolotl-ai-co/tiny-falcon-42m",
|
||||||
"flash_attention": True,
|
"flash_attention": False,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"val_set_size": 0.02,
|
"val_set_size": 0.02,
|
||||||
"special_tokens": {
|
"special_tokens": {
|
||||||
@@ -145,17 +166,23 @@ class TestFalcon(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 5e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 80,
|
||||||
"save_steps": 10,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 10,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 80,
|
||||||
|
"eval_steps": 80,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -165,3 +192,10 @@ class TestFalcon(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=10,
|
||||||
|
final_window=10,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|||||||
@@ -11,7 +11,11 @@ from axolotl.train import train
|
|||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestMistral(unittest.TestCase):
|
class TestMistral(unittest.TestCase):
|
||||||
@@ -23,7 +27,7 @@ class TestMistral(unittest.TestCase):
|
|||||||
def test_lora(self, temp_dir):
|
def test_lora(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"load_in_8bit": True,
|
"load_in_8bit": True,
|
||||||
@@ -45,16 +49,18 @@ class TestMistral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -64,12 +70,19 @@ class TestMistral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=4.5,
|
||||||
|
max_final=4.3,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "trl-internal-testing/tiny-MistralForCausalLM-0.2",
|
"base_model": "axolotl-ai-co/tiny-mistral-25m",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"val_set_size": 0.02,
|
"val_set_size": 0.02,
|
||||||
@@ -85,16 +98,18 @@ class TestMistral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_torch_fused",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if is_torch_bf16_gpu_available():
|
if is_torch_bf16_gpu_available():
|
||||||
@@ -108,3 +123,10 @@ class TestMistral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=4.5,
|
||||||
|
max_final=4.3,
|
||||||
|
)
|
||||||
|
|||||||
@@ -12,7 +12,11 @@ from axolotl.train import train
|
|||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestMixtral(unittest.TestCase):
|
class TestMixtral(unittest.TestCase):
|
||||||
@@ -24,8 +28,7 @@ class TestMixtral(unittest.TestCase):
|
|||||||
def test_qlora_w_fa2(self, temp_dir):
|
def test_qlora_w_fa2(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"load_in_4bit": True,
|
"load_in_4bit": True,
|
||||||
@@ -51,16 +54,18 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -74,13 +79,19 @@ class TestMixtral(unittest.TestCase):
|
|||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_qlora_wo_fa2(self, temp_dir):
|
def test_qlora_wo_fa2(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": False,
|
"flash_attention": False,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"load_in_4bit": True,
|
"load_in_4bit": True,
|
||||||
@@ -106,16 +117,18 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -129,13 +142,19 @@ class TestMixtral(unittest.TestCase):
|
|||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_16bit_lora_w_fa2(self, temp_dir):
|
def test_16bit_lora_w_fa2(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
@@ -160,16 +179,18 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if is_torch_bf16_gpu_available():
|
if is_torch_bf16_gpu_available():
|
||||||
@@ -187,13 +208,19 @@ class TestMixtral(unittest.TestCase):
|
|||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_16bit_lora_wo_fa2(self, temp_dir):
|
def test_16bit_lora_wo_fa2(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": False,
|
"flash_attention": False,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
@@ -218,16 +245,18 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -245,13 +274,19 @@ class TestMixtral(unittest.TestCase):
|
|||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "hf-internal-testing/Mixtral-tiny",
|
"base_model": "axolotl-ai-co/tiny-mixtral-30m",
|
||||||
"tokenizer_config": "LoneStriker/Mixtral-8x7B-v0.1-HF",
|
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"val_set_size": 0.02,
|
"val_set_size": 0.02,
|
||||||
@@ -263,16 +298,18 @@ class TestMixtral(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 2,
|
"num_epochs": 2,
|
||||||
"micro_batch_size": 2,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "adamw_bnb_8bit",
|
"optimizer": "adamw_bnb_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"max_steps": 20,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"logging_steps": 1,
|
||||||
"eval_steps": 10,
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if is_torch_bf16_gpu_available():
|
if is_torch_bf16_gpu_available():
|
||||||
@@ -286,3 +323,10 @@ class TestMixtral(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ from axolotl.utils.dict import DictDefault
|
|||||||
|
|
||||||
from .utils import (
|
from .utils import (
|
||||||
check_model_output_exists,
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
require_torch_2_5_1,
|
require_torch_2_5_1,
|
||||||
require_torch_2_6_0,
|
require_torch_2_6_0,
|
||||||
require_torch_2_7_0,
|
require_torch_2_7_0,
|
||||||
@@ -243,20 +244,18 @@ class TestCustomOptimizers(unittest.TestCase):
|
|||||||
def test_came_pytorch(self, temp_dir):
|
def test_came_pytorch(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "JackFram/llama-68m",
|
"base_model": "axolotl-ai-co/tiny-llama-50m",
|
||||||
"tokenizer_type": "LlamaTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"load_in_8bit": True,
|
"load_in_8bit": True,
|
||||||
"adapter": "lora",
|
"adapter": "lora",
|
||||||
"lora_r": 8,
|
"lora_r": 8,
|
||||||
"lora_alpha": 16,
|
"lora_alpha": 16,
|
||||||
"lora_dropout": 0.05,
|
"lora_dropout": 0.0,
|
||||||
"lora_target_linear": True,
|
"lora_target_linear": True,
|
||||||
"val_set_size": 0.1,
|
"val_set_size": 0.1,
|
||||||
"special_tokens": {
|
"special_tokens": {
|
||||||
"unk_token": "<unk>",
|
"pad_token": "<|endoftext|>",
|
||||||
"bos_token": "<s>",
|
|
||||||
"eos_token": "</s>",
|
|
||||||
},
|
},
|
||||||
"datasets": [
|
"datasets": [
|
||||||
{
|
{
|
||||||
@@ -265,16 +264,22 @@ class TestCustomOptimizers(unittest.TestCase):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
|
"sample_packing": True,
|
||||||
|
"pad_to_sequence_len": True,
|
||||||
"micro_batch_size": 8,
|
"micro_batch_size": 8,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 1e-4,
|
||||||
"optimizer": "came_pytorch",
|
"optimizer": "came_pytorch",
|
||||||
"adam_beta3": 0.9999,
|
"adam_beta3": 0.9999,
|
||||||
"adam_epsilon2": 1e-16,
|
"adam_epsilon2": 1e-16,
|
||||||
"max_steps": 5,
|
"max_steps": 80,
|
||||||
|
"warmup_steps": 5,
|
||||||
|
"logging_steps": 1,
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -284,6 +289,13 @@ class TestCustomOptimizers(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=10,
|
||||||
|
final_window=10,
|
||||||
|
max_initial=4.0,
|
||||||
|
max_final=3.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@require_torch_2_7_0
|
@require_torch_2_7_0
|
||||||
|
|||||||
@@ -9,7 +9,11 @@ from axolotl.train import train
|
|||||||
from axolotl.utils.config import normalize_config, validate_config
|
from axolotl.utils.config import normalize_config, validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import (
|
||||||
|
check_model_output_exists,
|
||||||
|
check_tensorboard_loss_decreased,
|
||||||
|
with_temp_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestPhi(unittest.TestCase):
|
class TestPhi(unittest.TestCase):
|
||||||
@@ -21,7 +25,7 @@ class TestPhi(unittest.TestCase):
|
|||||||
def test_phi_ft(self, temp_dir):
|
def test_phi_ft(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "axolotl-ai-co/tiny-phi-64m",
|
||||||
"model_type": "AutoModelForCausalLM",
|
"model_type": "AutoModelForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
@@ -41,18 +45,22 @@ class TestPhi(unittest.TestCase):
|
|||||||
"dataset_shard_num": 10,
|
"dataset_shard_num": 10,
|
||||||
"dataset_shard_idx": 0,
|
"dataset_shard_idx": 0,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"micro_batch_size": 1,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "paged_adamw_8bit",
|
"optimizer": "adamw_torch_fused",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"max_steps": 10,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 10,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -61,12 +69,19 @@ class TestPhi(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_phi_qlora(self, temp_dir):
|
def test_phi_qlora(self, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "axolotl-ai-co/tiny-phi-64m",
|
||||||
"model_type": "AutoModelForCausalLM",
|
"model_type": "AutoModelForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
@@ -90,18 +105,22 @@ class TestPhi(unittest.TestCase):
|
|||||||
"dataset_shard_num": 10,
|
"dataset_shard_num": 10,
|
||||||
"dataset_shard_idx": 0,
|
"dataset_shard_idx": 0,
|
||||||
"num_epochs": 1,
|
"num_epochs": 1,
|
||||||
"micro_batch_size": 1,
|
"micro_batch_size": 4,
|
||||||
"gradient_accumulation_steps": 1,
|
"gradient_accumulation_steps": 1,
|
||||||
"output_dir": temp_dir,
|
"output_dir": temp_dir,
|
||||||
"learning_rate": 0.00001,
|
"learning_rate": 2e-4,
|
||||||
"optimizer": "paged_adamw_8bit",
|
"optimizer": "paged_adamw_8bit",
|
||||||
"lr_scheduler": "cosine",
|
"lr_scheduler": "cosine",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"max_steps": 10,
|
"max_steps": 50,
|
||||||
"save_steps": 10,
|
"warmup_steps": 5,
|
||||||
"eval_steps": 10,
|
"logging_steps": 1,
|
||||||
|
"save_steps": 50,
|
||||||
|
"eval_steps": 50,
|
||||||
"bf16": "auto",
|
"bf16": "auto",
|
||||||
"save_first_step": False,
|
"save_first_step": False,
|
||||||
|
"use_tensorboard": True,
|
||||||
|
"seed": 42,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
cfg = validate_config(cfg)
|
cfg = validate_config(cfg)
|
||||||
@@ -110,3 +129,10 @@ class TestPhi(unittest.TestCase):
|
|||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
check_model_output_exists(temp_dir, cfg)
|
||||||
|
check_tensorboard_loss_decreased(
|
||||||
|
temp_dir + "/runs",
|
||||||
|
initial_window=5,
|
||||||
|
final_window=5,
|
||||||
|
max_initial=5.0,
|
||||||
|
max_final=4.7,
|
||||||
|
)
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ class TestPreprocess:
|
|||||||
|
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
"base_model": "Qwen/Qwen2.5-0.5B",
|
"base_model": "axolotl-ai-co/tiny-qwen2-129m",
|
||||||
"sequence_len": 2048,
|
"sequence_len": 2048,
|
||||||
"val_set_size": 0.01,
|
"val_set_size": 0.01,
|
||||||
"datasets": [
|
"datasets": [
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ def _get_fake_quant_config_dtype(config):
|
|||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def model():
|
def model():
|
||||||
dummy_model = AutoModelForCausalLM.from_pretrained(
|
dummy_model = AutoModelForCausalLM.from_pretrained(
|
||||||
"Qwen/Qwen2-0.5B",
|
"axolotl-ai-co/tiny-qwen2-129m",
|
||||||
device_map="auto",
|
device_map="auto",
|
||||||
dtype=torch.bfloat16,
|
dtype=torch.bfloat16,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class TestE2eQwen:
|
|||||||
Test cases for qwen models
|
Test cases for qwen models
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.parametrize("base_model", ["Qwen/Qwen2-0.5B", "Qwen/Qwen2.5-0.5B"])
|
@pytest.mark.parametrize("base_model", ["axolotl-ai-co/tiny-qwen2-129m"])
|
||||||
def test_dpo(self, base_model, temp_dir):
|
def test_dpo(self, base_model, temp_dir):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -199,6 +199,106 @@ def check_tensorboard(
|
|||||||
assert df.value.values[-1] > 1e-5, "Expected loss to be greater than zero"
|
assert df.value.values[-1] > 1e-5, "Expected loss to be greater than zero"
|
||||||
|
|
||||||
|
|
||||||
|
def check_tensorboard_loss_decreased(
|
||||||
|
temp_run_dir: str,
|
||||||
|
tag: str | None = None,
|
||||||
|
initial_window: int = 1,
|
||||||
|
final_window: int = 1,
|
||||||
|
min_delta: float | None = None,
|
||||||
|
max_initial: float | None = None,
|
||||||
|
max_final: float | None = None,
|
||||||
|
max_loss_ratio: float = 0.95,
|
||||||
|
) -> None:
|
||||||
|
"""Check that training actually learned — loss went down and stayed in
|
||||||
|
a sensible range.
|
||||||
|
|
||||||
|
Used with the tiny ``axolotl-ai-co/tiny-*`` CI models, where pretraining
|
||||||
|
was brief enough that final loss won't clear the absolute thresholds used
|
||||||
|
for 135M+ models — but the training pipeline should still behave.
|
||||||
|
|
||||||
|
``train/train_loss`` is only logged once (end-of-training aggregate). The
|
||||||
|
per-step tag is ``train/loss`` for SFT/LM trainers and may vary across
|
||||||
|
trainers (e.g. DPO). When ``tag`` is None we try common per-step tags in
|
||||||
|
order and use the first with enough samples.
|
||||||
|
|
||||||
|
Two kinds of regression we guard against:
|
||||||
|
|
||||||
|
1. **Loss blew up.** A silent bug (e.g. broken label masking) can start
|
||||||
|
training at an absurdly high loss. ``max_initial`` / ``max_final``
|
||||||
|
assert the measured means stay at-or-below bounds measured from a
|
||||||
|
known-good run. Both are optional but strongly encouraged — loss
|
||||||
|
going *down* from a bad starting scale still looks like "learning."
|
||||||
|
|
||||||
|
2. **Loss didn't go down enough.** ``max_loss_ratio`` (default 0.95)
|
||||||
|
requires ``final <= initial * ratio``. A default below 1.0 means the
|
||||||
|
final window mean must sit at least 5% below the initial window mean
|
||||||
|
— real learning, not noise that happened to land below start. Only
|
||||||
|
raise this for configs where a smaller drop is expected *and*
|
||||||
|
documented (e.g. DPO with near-trivial pairs); in that case you are
|
||||||
|
intentionally weakening the test.
|
||||||
|
|
||||||
|
``min_delta`` is optional; when set, additionally requires
|
||||||
|
``final + min_delta <= initial`` — use for configs with enough signal
|
||||||
|
to demand a specific minimum absolute drop.
|
||||||
|
"""
|
||||||
|
tb_log_path = most_recent_subdir(temp_run_dir)
|
||||||
|
event_file = os.path.join(tb_log_path, sorted(os.listdir(tb_log_path))[0])
|
||||||
|
reader = SummaryReader(event_file)
|
||||||
|
df = reader.scalars
|
||||||
|
|
||||||
|
if tag is None:
|
||||||
|
candidates = ["train/loss", "train/train_loss"]
|
||||||
|
else:
|
||||||
|
candidates = [tag]
|
||||||
|
|
||||||
|
required = initial_window + final_window
|
||||||
|
chosen_tag, values = None, None
|
||||||
|
for candidate in candidates:
|
||||||
|
sub = df[df.tag == candidate]
|
||||||
|
if len(sub) >= required:
|
||||||
|
chosen_tag = candidate
|
||||||
|
values = sub.value.values
|
||||||
|
break
|
||||||
|
|
||||||
|
available = sorted({t for t in df.tag.unique() if "loss" in t.lower()})
|
||||||
|
assert values is not None, (
|
||||||
|
f"None of the tags {candidates} had ≥{required} logged steps. "
|
||||||
|
f"Loss tags present: {available}"
|
||||||
|
)
|
||||||
|
|
||||||
|
initial = float(values[:initial_window].mean())
|
||||||
|
final = float(values[-final_window:].mean())
|
||||||
|
print(
|
||||||
|
f"[check_tensorboard_loss_decreased] tag={chosen_tag} n={len(values)} "
|
||||||
|
f"initial_mean{initial_window}={initial:.4f} final_mean{final_window}={final:.4f}"
|
||||||
|
)
|
||||||
|
assert final > 1e-5, "Expected loss to be greater than zero"
|
||||||
|
assert final <= initial * max_loss_ratio, (
|
||||||
|
f"Loss did not decrease for {chosen_tag}: "
|
||||||
|
f"initial(mean of first {initial_window})={initial:.4f}, "
|
||||||
|
f"final(mean of last {final_window})={final:.4f}, "
|
||||||
|
f"ratio={final / initial:.4f} (max allowed {max_loss_ratio}). "
|
||||||
|
f"Expected final <= initial — training did not learn."
|
||||||
|
)
|
||||||
|
if min_delta is not None:
|
||||||
|
assert final + min_delta <= initial, (
|
||||||
|
f"Expected loss to decrease by at least {min_delta} for {chosen_tag}: "
|
||||||
|
f"initial={initial:.4f}, final={final:.4f}, delta={initial - final:.4f}"
|
||||||
|
)
|
||||||
|
if max_initial is not None:
|
||||||
|
assert initial <= max_initial, (
|
||||||
|
f"Initial loss {initial:.4f} is above the expected max {max_initial}. "
|
||||||
|
f"Absolute scale is wrong — probably a silent regression "
|
||||||
|
f"(e.g. bad label masking) that bumped the starting point."
|
||||||
|
)
|
||||||
|
if max_final is not None:
|
||||||
|
assert final <= max_final, (
|
||||||
|
f"Final loss {final:.4f} is above the expected max {max_final}. "
|
||||||
|
f"Absolute scale is wrong — probably a silent regression "
|
||||||
|
f"(e.g. bad label masking) that bumped the endpoint."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_model_output_exists(temp_dir: str, cfg: DictDefault) -> None:
|
def check_model_output_exists(temp_dir: str, cfg: DictDefault) -> None:
|
||||||
"""
|
"""
|
||||||
helper function to check if a model output file exists after training
|
helper function to check if a model output file exists after training
|
||||||
|
|||||||
Reference in New Issue
Block a user