* Prepare for transformers v5 upgrade * fix hf cli * update for hf hub changes * fix tokenizer apply_chat_template args * remap include_tokens_per_second * fix tps * handle migration for warmup * use latest hf hub * Fix scan -> ls * fix import * fix for renaming of mistral common tokenizer -> backend * update for fixed tokenziation for llama * Skip phi35 tests for now * remove mistral patch fixed upstream in huggingface/transformers#41439 * use namespacing for patch * don't rely on sdist for e2e tests for now * run modal ci without waiting too * Fix dep for ci * fix imports * Fix fp8 check * fsdp2 fixes * fix version handling * update fsdp version tests for new v5 behavior * Fail multigpu tests after 3 failures * skip known v5 broken tests for now and cleanup * bump deps * unmark skipped test * re-enable test_fsdp_qlora_prequant_packed test * increase multigpu ci timeout * skip broken gemma3 test * reduce timout back to original 120min now that the hanging test is skipped * fix for un-necessary collator for pretraining with bsz=1 * fix: safe_serialization deprecated in transformers v5 rc01 (#3318) * torch_dtype deprecated * load model in float32 for consistency with tests * revert some test fixtures back * use hf cache ls instead of scan * don't strip fsdp_version more fdsp_Version fixes for v5 fix version in fsdp_config fix aliasing fix fsdp_version check check fsdp_version is 2 in both places * Transformers v5 rc2 (#3347) * bump dep * use latest fbgemm, grab model config as part of fixture, un-skip test * import AutoConfig * don't need more problematic autoconfig when specifying config.json manually * add fixtures for argilla ultrafeedback datasets * download phi4-reasoning * fix arg * update tests for phi fast tokenizer changes * use explicit model types for gemma3 --------- Co-authored-by: Wing Lian <wing@axolotl.ai> * fix: AutoModelForVision2Seq -> AutoModelForImageTextToText * chore: remove duplicate * fix: attempt fix gemma3 text mode * chore: lint * ga release of v5 * need property setter for name_or_path for mistral tokenizer * vllm not compatible with transformers v5 * setter for chat_template w mistral too --------- Co-authored-by: NanoCode012 <nano@axolotl.ai> Co-authored-by: salman <salman.mohammadi@outlook.com>
149 lines
4.6 KiB
Python
149 lines
4.6 KiB
Python
"""
|
|
e2e tests for kd trainer support in Axolotl
|
|
"""
|
|
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
import yaml
|
|
from accelerate.test_utils import execute_subprocess_async, get_torch_dist_unique_port
|
|
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
from tests.e2e.utils import check_tensorboard, require_torch_2_5_1
|
|
|
|
|
|
@pytest.fixture(name="kd_min_cfg")
|
|
def min_cfg(temp_dir):
|
|
return {
|
|
"base_model": "Qwen/Qwen3-0.6B",
|
|
"tokenizer_config": "winglian/qwen3-14b-math",
|
|
"plugins": [
|
|
"axolotl.integrations.kd.KDPlugin",
|
|
"axolotl.integrations.liger.LigerPlugin",
|
|
],
|
|
"liger_rms_norm": True,
|
|
"liger_glu_activation": True,
|
|
"torch_compile": True,
|
|
"chat_template": "qwen3",
|
|
"kd_trainer": True,
|
|
"kd_ce_alpha": 0.1,
|
|
"kd_alpha": 0.9,
|
|
"kd_temperature": 1.0,
|
|
"kd_beta": 0.0,
|
|
"kd_normalize_topk": True,
|
|
"dataloader_prefetch_factor": 8,
|
|
"dataloader_num_workers": 4,
|
|
"dataloader_pin_memory": True,
|
|
"datasets": [
|
|
{
|
|
"path": "winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-topk128-normalized",
|
|
"type": "chat_template",
|
|
"split": "train",
|
|
"split_thinking": True,
|
|
"eot_tokens": ["<|im_end|>"],
|
|
"data_files": ["train/batch-000000.parquet"],
|
|
},
|
|
],
|
|
"skip_prepare_dataset": True,
|
|
"val_set_size": 0.0,
|
|
"sequence_len": 2048,
|
|
"sample_packing": True,
|
|
"pad_to_sequence_len": True,
|
|
"gradient_accumulation_steps": 2,
|
|
"micro_batch_size": 1,
|
|
"num_epochs": 1,
|
|
"optimizer": "adamw_8bit",
|
|
"lr_scheduler": "cosine",
|
|
"learning_rate": 0.00001,
|
|
"bf16": "auto",
|
|
"gradient_checkpointing": True,
|
|
"flash_attention": True,
|
|
"special_tokens": {
|
|
"pad_token": "<|end_of_text|>",
|
|
"eos_token": "<|eot_id|>",
|
|
},
|
|
"max_steps": 5,
|
|
"output_dir": temp_dir,
|
|
"use_tensorboard": True,
|
|
"save_first_step": False,
|
|
}
|
|
|
|
|
|
class TestKnowledgeDistillation:
|
|
"""
|
|
Test case for Knowledge Distillation
|
|
"""
|
|
|
|
# While this will run on torch 2.4.x without torch_compile enabled
|
|
# the VRAM requirement is higher than what is available in CI
|
|
@require_torch_2_5_1
|
|
def test_llama_kd(self, temp_dir, kd_min_cfg):
|
|
cfg = DictDefault(kd_min_cfg)
|
|
|
|
# write cfg to yaml file
|
|
Path(temp_dir).mkdir(parents=True, exist_ok=True)
|
|
with open(Path(temp_dir) / "config.yaml", "w", encoding="utf-8") as fout:
|
|
fout.write(yaml.dump(cfg.to_dict(), Dumper=yaml.Dumper))
|
|
|
|
execute_subprocess_async(
|
|
[
|
|
"axolotl",
|
|
"train",
|
|
str(Path(temp_dir) / "config.yaml"),
|
|
"--num-processes",
|
|
"1",
|
|
"--main-process-port",
|
|
f"{get_torch_dist_unique_port()}",
|
|
]
|
|
)
|
|
|
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
|
check_tensorboard(
|
|
temp_dir + "/runs", "train/loss", 1.4, "Train Loss (%s) is too high"
|
|
)
|
|
|
|
@pytest.mark.parametrize(
|
|
"load_in_8bit",
|
|
[True, False],
|
|
)
|
|
def test_llama_lora_kd(self, temp_dir, kd_min_cfg, load_in_8bit):
|
|
cfg = DictDefault(
|
|
{
|
|
"load_in_8bit": load_in_8bit,
|
|
"torch_compile": False,
|
|
"adapter": "lora",
|
|
"peft_use_dora": True,
|
|
"lora_target_linear": True,
|
|
"lora_r": 16,
|
|
"lora_alpha": 32,
|
|
"lora_dropout": 0.0,
|
|
"lora_modules_to_save": ["embed_tokens", "lm_head"],
|
|
"lora_mlp_kernel": False,
|
|
"lora_qkv_kernel": False,
|
|
"lora_o_kernel": False,
|
|
}
|
|
| kd_min_cfg
|
|
)
|
|
|
|
# write cfg to yaml file
|
|
Path(temp_dir).mkdir(parents=True, exist_ok=True)
|
|
with open(Path(temp_dir) / "config.yaml", "w", encoding="utf-8") as fout:
|
|
fout.write(yaml.dump(cfg.to_dict(), Dumper=yaml.Dumper))
|
|
|
|
execute_subprocess_async(
|
|
[
|
|
"axolotl",
|
|
"train",
|
|
str(Path(temp_dir) / "config.yaml"),
|
|
"--num-processes",
|
|
"1",
|
|
"--main-process-port",
|
|
f"{get_torch_dist_unique_port()}",
|
|
]
|
|
)
|
|
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
|
check_tensorboard(
|
|
temp_dir + "/runs", "train/loss", 1.2, "Train Loss (%s) is too high"
|
|
)
|