* Prepare for transformers v5 upgrade * fix hf cli * update for hf hub changes * fix tokenizer apply_chat_template args * remap include_tokens_per_second * fix tps * handle migration for warmup * use latest hf hub * Fix scan -> ls * fix import * fix for renaming of mistral common tokenizer -> backend * update for fixed tokenziation for llama * Skip phi35 tests for now * remove mistral patch fixed upstream in huggingface/transformers#41439 * use namespacing for patch * don't rely on sdist for e2e tests for now * run modal ci without waiting too * Fix dep for ci * fix imports * Fix fp8 check * fsdp2 fixes * fix version handling * update fsdp version tests for new v5 behavior * Fail multigpu tests after 3 failures * skip known v5 broken tests for now and cleanup * bump deps * unmark skipped test * re-enable test_fsdp_qlora_prequant_packed test * increase multigpu ci timeout * skip broken gemma3 test * reduce timout back to original 120min now that the hanging test is skipped * fix for un-necessary collator for pretraining with bsz=1 * fix: safe_serialization deprecated in transformers v5 rc01 (#3318) * torch_dtype deprecated * load model in float32 for consistency with tests * revert some test fixtures back * use hf cache ls instead of scan * don't strip fsdp_version more fdsp_Version fixes for v5 fix version in fsdp_config fix aliasing fix fsdp_version check check fsdp_version is 2 in both places * Transformers v5 rc2 (#3347) * bump dep * use latest fbgemm, grab model config as part of fixture, un-skip test * import AutoConfig * don't need more problematic autoconfig when specifying config.json manually * add fixtures for argilla ultrafeedback datasets * download phi4-reasoning * fix arg * update tests for phi fast tokenizer changes * use explicit model types for gemma3 --------- Co-authored-by: Wing Lian <wing@axolotl.ai> * fix: AutoModelForVision2Seq -> AutoModelForImageTextToText * chore: remove duplicate * fix: attempt fix gemma3 text mode * chore: lint * ga release of v5 * need property setter for name_or_path for mistral tokenizer * vllm not compatible with transformers v5 * setter for chat_template w mistral too --------- Co-authored-by: NanoCode012 <nano@axolotl.ai> Co-authored-by: salman <salman.mohammadi@outlook.com>
121 lines
4.1 KiB
Python
121 lines
4.1 KiB
Python
"""
|
|
E2E tests for resuming training
|
|
"""
|
|
|
|
import os
|
|
import re
|
|
import subprocess
|
|
|
|
from transformers.utils import is_torch_bf16_gpu_available
|
|
|
|
from axolotl.common.datasets import load_datasets
|
|
from axolotl.train import train
|
|
from axolotl.utils.callbacks.tokens_per_second import TOKENS_STATE_FILE
|
|
from axolotl.utils.config import normalize_config, validate_config
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
from ..utils import check_model_output_exists, most_recent_subdir, require_torch_2_6_0
|
|
|
|
|
|
class TestResumeLlama:
|
|
"""
|
|
Test case for resuming training of llama models
|
|
"""
|
|
|
|
@require_torch_2_6_0
|
|
def test_resume_lora_packed(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
"sequence_len": 1024,
|
|
"sample_packing": True,
|
|
"flash_attention": True,
|
|
"load_in_8bit": True,
|
|
"adapter": "lora",
|
|
"lora_r": 8,
|
|
"lora_alpha": 16,
|
|
"lora_dropout": 0.05,
|
|
"lora_target_linear": True,
|
|
"val_set_size": 0.001,
|
|
"special_tokens": {
|
|
"pad_token": "<|endoftext|>",
|
|
},
|
|
"datasets": [
|
|
{
|
|
"path": "tatsu-lab/alpaca",
|
|
"type": "alpaca",
|
|
"split": "train[:10%]",
|
|
},
|
|
],
|
|
"num_epochs": 2,
|
|
"micro_batch_size": 1,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 0.00001,
|
|
"optimizer": "adamw_8bit",
|
|
"lr_scheduler": "cosine",
|
|
"save_steps": 3,
|
|
"save_total_limit": 5,
|
|
"max_steps": 15,
|
|
"use_tensorboard": True,
|
|
"save_first_step": False,
|
|
"include_tkps": True,
|
|
}
|
|
)
|
|
if is_torch_bf16_gpu_available():
|
|
cfg.bf16 = True
|
|
else:
|
|
cfg.fp16 = True
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
initial_total_num_tokens = cfg.total_num_tokens
|
|
assert initial_total_num_tokens is not None, (
|
|
"total_num_tokens should be calculated during load_datasets"
|
|
)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
|
|
checkpoint_path = f"{temp_dir}/checkpoint-9"
|
|
tokens_state_path = os.path.join(checkpoint_path, TOKENS_STATE_FILE)
|
|
assert os.path.isfile(tokens_state_path), (
|
|
f"{TOKENS_STATE_FILE} should exist in checkpoint at {tokens_state_path}"
|
|
)
|
|
|
|
resume_cfg = cfg | DictDefault(
|
|
{
|
|
"resume_from_checkpoint": f"{temp_dir}/checkpoint-9/",
|
|
}
|
|
)
|
|
normalize_config(resume_cfg)
|
|
|
|
assert resume_cfg.total_num_tokens == initial_total_num_tokens, (
|
|
f"total_num_tokens should be preserved on resume. "
|
|
f"Expected {initial_total_num_tokens}, got {resume_cfg.total_num_tokens}"
|
|
)
|
|
|
|
resume_dataset_meta = load_datasets(cfg=resume_cfg)
|
|
|
|
assert resume_cfg.total_num_tokens == initial_total_num_tokens, (
|
|
f"total_num_tokens should not be recalculated when resuming. "
|
|
f"Expected {initial_total_num_tokens}, got {resume_cfg.total_num_tokens}"
|
|
)
|
|
|
|
train(cfg=resume_cfg, dataset_meta=resume_dataset_meta)
|
|
|
|
assert resume_cfg.total_num_tokens == initial_total_num_tokens, (
|
|
f"total_num_tokens should remain unchanged after resume training. "
|
|
f"Expected {initial_total_num_tokens}, got {resume_cfg.total_num_tokens}"
|
|
)
|
|
check_model_output_exists(temp_dir, cfg)
|
|
|
|
tb_log_path_1 = most_recent_subdir(temp_dir + "/runs")
|
|
cmd = f"tensorboard --inspect --logdir {tb_log_path_1}"
|
|
res = subprocess.run(
|
|
cmd, shell=True, text=True, capture_output=True, check=True
|
|
)
|
|
pattern = r"first_step\s+(\d+)"
|
|
first_steps = int(re.findall(pattern, res.stdout)[0])
|
|
assert first_steps == 10
|