* Prepare for transformers v5 upgrade * fix hf cli * update for hf hub changes * fix tokenizer apply_chat_template args * remap include_tokens_per_second * fix tps * handle migration for warmup * use latest hf hub * Fix scan -> ls * fix import * fix for renaming of mistral common tokenizer -> backend * update for fixed tokenziation for llama * Skip phi35 tests for now * remove mistral patch fixed upstream in huggingface/transformers#41439 * use namespacing for patch * don't rely on sdist for e2e tests for now * run modal ci without waiting too * Fix dep for ci * fix imports * Fix fp8 check * fsdp2 fixes * fix version handling * update fsdp version tests for new v5 behavior * Fail multigpu tests after 3 failures * skip known v5 broken tests for now and cleanup * bump deps * unmark skipped test * re-enable test_fsdp_qlora_prequant_packed test * increase multigpu ci timeout * skip broken gemma3 test * reduce timout back to original 120min now that the hanging test is skipped * fix for un-necessary collator for pretraining with bsz=1 * fix: safe_serialization deprecated in transformers v5 rc01 (#3318) * torch_dtype deprecated * load model in float32 for consistency with tests * revert some test fixtures back * use hf cache ls instead of scan * don't strip fsdp_version more fdsp_Version fixes for v5 fix version in fsdp_config fix aliasing fix fsdp_version check check fsdp_version is 2 in both places * Transformers v5 rc2 (#3347) * bump dep * use latest fbgemm, grab model config as part of fixture, un-skip test * import AutoConfig * don't need more problematic autoconfig when specifying config.json manually * add fixtures for argilla ultrafeedback datasets * download phi4-reasoning * fix arg * update tests for phi fast tokenizer changes * use explicit model types for gemma3 --------- Co-authored-by: Wing Lian <wing@axolotl.ai> * fix: AutoModelForVision2Seq -> AutoModelForImageTextToText * chore: remove duplicate * fix: attempt fix gemma3 text mode * chore: lint * ga release of v5 * need property setter for name_or_path for mistral tokenizer * vllm not compatible with transformers v5 * setter for chat_template w mistral too --------- Co-authored-by: NanoCode012 <nano@axolotl.ai> Co-authored-by: salman <salman.mohammadi@outlook.com>
184 lines
6.5 KiB
Python
184 lines
6.5 KiB
Python
"""
|
|
E2E tests for llama
|
|
"""
|
|
|
|
from axolotl.common.datasets import load_datasets
|
|
from axolotl.train import train
|
|
from axolotl.utils.config import normalize_config, validate_config
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
from tests.e2e.utils import check_model_output_exists
|
|
|
|
|
|
class TestLlama:
|
|
"""
|
|
Test case for Llama models
|
|
"""
|
|
|
|
def test_fft_trust_remote_code(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
"tokenizer_type": "AutoTokenizer",
|
|
"trust_remote_code": True,
|
|
"sequence_len": 512,
|
|
"val_set_size": 0.02,
|
|
"special_tokens": {
|
|
"pad_token": "<|endoftext|>",
|
|
},
|
|
"datasets": [
|
|
{
|
|
"path": "mhenrichsen/alpaca_2k_test",
|
|
"type": "alpaca",
|
|
},
|
|
],
|
|
"num_epochs": 1,
|
|
"max_steps": 5,
|
|
"micro_batch_size": 2,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 0.00001,
|
|
"optimizer": "adamw_bnb_8bit",
|
|
"lr_scheduler": "cosine",
|
|
"flash_attention": True,
|
|
"sample_packing": True,
|
|
"bf16": True,
|
|
"save_first_step": False,
|
|
}
|
|
)
|
|
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
check_model_output_exists(temp_dir, cfg)
|
|
|
|
def test_fix_untrained_tokens(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
"fix_untrained_tokens": True,
|
|
"sequence_len": 512,
|
|
"val_set_size": 0.0,
|
|
"special_tokens": {
|
|
"pad_token": "<|endoftext|>",
|
|
"bos_token": "<|custom_im_start|>",
|
|
"eos_token": "<|custom_im_end|>",
|
|
},
|
|
"datasets": [
|
|
{
|
|
"chat_template": "jinja",
|
|
"chat_template_jinja": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|custom_im_start|>' + message['role'] + '\n' + message['content'] + '<|custom_im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|custom_im_start|>assistant\n' }}{% endif %}",
|
|
"path": "mlabonne/FineTome-100k",
|
|
"type": "chat_template",
|
|
"split": "train[:10%]",
|
|
"field_messages": "conversations",
|
|
"message_field_role": "from",
|
|
"message_field_content": "value",
|
|
},
|
|
],
|
|
"num_epochs": 1,
|
|
"max_steps": 5,
|
|
"micro_batch_size": 1,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 0.00001,
|
|
"optimizer": "adamw_8bit",
|
|
"lr_scheduler": "cosine",
|
|
"flash_attention": True,
|
|
"sample_packing": True,
|
|
"bf16": True,
|
|
"save_first_step": False,
|
|
}
|
|
)
|
|
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
check_model_output_exists(temp_dir, cfg)
|
|
|
|
def test_fix_untrained_tokens_already_trained(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
"fix_untrained_tokens": True,
|
|
"sequence_len": 512,
|
|
"val_set_size": 0.0,
|
|
"special_tokens": {
|
|
"pad_token": "<|endoftext|>",
|
|
},
|
|
"chat_template": "chatml",
|
|
"datasets": [
|
|
{
|
|
"path": "mlabonne/FineTome-100k",
|
|
"type": "chat_template",
|
|
"split": "train[:10%]",
|
|
"field_messages": "conversations",
|
|
"message_field_role": "from",
|
|
"message_field_content": "value",
|
|
},
|
|
],
|
|
"num_epochs": 1,
|
|
"max_steps": 5,
|
|
"micro_batch_size": 1,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 0.00001,
|
|
"optimizer": "adamw_8bit",
|
|
"lr_scheduler": "cosine",
|
|
"flash_attention": True,
|
|
"sample_packing": True,
|
|
"bf16": True,
|
|
"save_first_step": False,
|
|
}
|
|
)
|
|
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
check_model_output_exists(temp_dir, cfg)
|
|
|
|
def test_batch_flattening(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
"trust_remote_code": True,
|
|
"sequence_len": 512,
|
|
"val_set_size": 0.01,
|
|
"special_tokens": {
|
|
"pad_token": "<|endoftext|>",
|
|
},
|
|
"datasets": [
|
|
{
|
|
"path": "mhenrichsen/alpaca_2k_test",
|
|
"type": "alpaca",
|
|
},
|
|
],
|
|
"num_epochs": 1,
|
|
"max_steps": 5,
|
|
"micro_batch_size": 4,
|
|
"gradient_accumulation_steps": 1,
|
|
"output_dir": temp_dir,
|
|
"learning_rate": 0.00001,
|
|
"optimizer": "adamw_8bit",
|
|
"lr_scheduler": "cosine",
|
|
"flash_attention": True,
|
|
"sample_packing": False,
|
|
"batch_flattening": True,
|
|
"bf16": True,
|
|
"save_first_step": False,
|
|
}
|
|
)
|
|
|
|
cfg = validate_config(cfg)
|
|
normalize_config(cfg)
|
|
dataset_meta = load_datasets(cfg=cfg)
|
|
|
|
train(cfg=cfg, dataset_meta=dataset_meta)
|
|
check_model_output_exists(temp_dir, cfg)
|