* Prepare for transformers v5 upgrade * fix hf cli * update for hf hub changes * fix tokenizer apply_chat_template args * remap include_tokens_per_second * fix tps * handle migration for warmup * use latest hf hub * Fix scan -> ls * fix import * fix for renaming of mistral common tokenizer -> backend * update for fixed tokenziation for llama * Skip phi35 tests for now * remove mistral patch fixed upstream in huggingface/transformers#41439 * use namespacing for patch * don't rely on sdist for e2e tests for now * run modal ci without waiting too * Fix dep for ci * fix imports * Fix fp8 check * fsdp2 fixes * fix version handling * update fsdp version tests for new v5 behavior * Fail multigpu tests after 3 failures * skip known v5 broken tests for now and cleanup * bump deps * unmark skipped test * re-enable test_fsdp_qlora_prequant_packed test * increase multigpu ci timeout * skip broken gemma3 test * reduce timout back to original 120min now that the hanging test is skipped * fix for un-necessary collator for pretraining with bsz=1 * fix: safe_serialization deprecated in transformers v5 rc01 (#3318) * torch_dtype deprecated * load model in float32 for consistency with tests * revert some test fixtures back * use hf cache ls instead of scan * don't strip fsdp_version more fdsp_Version fixes for v5 fix version in fsdp_config fix aliasing fix fsdp_version check check fsdp_version is 2 in both places * Transformers v5 rc2 (#3347) * bump dep * use latest fbgemm, grab model config as part of fixture, un-skip test * import AutoConfig * don't need more problematic autoconfig when specifying config.json manually * add fixtures for argilla ultrafeedback datasets * download phi4-reasoning * fix arg * update tests for phi fast tokenizer changes * use explicit model types for gemma3 --------- Co-authored-by: Wing Lian <wing@axolotl.ai> * fix: AutoModelForVision2Seq -> AutoModelForImageTextToText * chore: remove duplicate * fix: attempt fix gemma3 text mode * chore: lint * ga release of v5 * need property setter for name_or_path for mistral tokenizer * vllm not compatible with transformers v5 * setter for chat_template w mistral too --------- Co-authored-by: NanoCode012 <nano@axolotl.ai> Co-authored-by: salman <salman.mohammadi@outlook.com>
163 lines
5.0 KiB
Python
163 lines
5.0 KiB
Python
"""
|
|
Test cases for the tokenizer loading
|
|
"""
|
|
|
|
import unittest
|
|
|
|
import pytest
|
|
|
|
from axolotl.loaders import load_tokenizer
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
from tests.hf_offline_utils import enable_hf_offline
|
|
|
|
|
|
class TestTokenizers:
|
|
"""
|
|
test class for the load_tokenizer fn
|
|
"""
|
|
|
|
@pytest.mark.skip("LlamaTokenizer no longer has a Fast/Slow tokenizer")
|
|
@enable_hf_offline
|
|
def test_default_use_fast(self):
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
}
|
|
)
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert "Fast" in tokenizer.__class__.__name__
|
|
|
|
@pytest.mark.skip("LlamaTokenizer no longer has a Fast/Slow tokenizer")
|
|
@enable_hf_offline
|
|
def test_dont_use_fast(self):
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"tokenizer_use_fast": False,
|
|
}
|
|
)
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert "Fast" not in tokenizer.__class__.__name__
|
|
|
|
@enable_hf_offline
|
|
def test_special_tokens_modules_to_save(self):
|
|
# setting special_tokens to new token
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"adapter": "lora",
|
|
"special_tokens": {"bos_token": "[INST]"},
|
|
}
|
|
)
|
|
with pytest.raises(
|
|
ValueError,
|
|
match=r".*Please set lora_modules_to_save*",
|
|
):
|
|
load_tokenizer(cfg)
|
|
|
|
# setting special_tokens but not changing from default
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"adapter": "lora",
|
|
"special_tokens": {"bos_token": "<s>"},
|
|
}
|
|
)
|
|
load_tokenizer(cfg)
|
|
|
|
# non-adapter setting special_tokens
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"special_tokens": {"bos_token": "[INST]"},
|
|
}
|
|
)
|
|
load_tokenizer(cfg)
|
|
|
|
@enable_hf_offline
|
|
def test_add_additional_special_tokens(self):
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"special_tokens": {"additional_special_tokens": ["<|im_start|>"]},
|
|
}
|
|
)
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert tokenizer("<|im_start|>user")["input_ids"] == [1, 32000, 1404]
|
|
assert len(tokenizer) == 32001
|
|
|
|
# ensure reloading the tokenizer again from cfg results in same vocab length
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert len(tokenizer) == 32001
|
|
|
|
@enable_hf_offline
|
|
def test_added_tokens_overrides(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
# use with tokenizer that has reserved_tokens in added_tokens
|
|
"tokenizer_config": "NousResearch/Llama-3.2-1B",
|
|
"added_tokens_overrides": {
|
|
128041: "RANDOM_OVERRIDE_1",
|
|
128042: "RANDOM_OVERRIDE_2",
|
|
},
|
|
"output_dir": temp_dir,
|
|
}
|
|
)
|
|
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_1", add_special_tokens=False) == [
|
|
128041
|
|
]
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_2", add_special_tokens=False) == [
|
|
128042
|
|
]
|
|
assert (
|
|
tokenizer.decode([128041, 128042]) == "RANDOM_OVERRIDE_1RANDOM_OVERRIDE_2"
|
|
)
|
|
|
|
@enable_hf_offline
|
|
def test_added_tokens_overrides_gemma3(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
# use with tokenizer that has reserved_tokens in added_tokens
|
|
"tokenizer_config": "mlx-community/gemma-3-4b-it-8bit",
|
|
"added_tokens_overrides": {
|
|
256001: "RANDOM_OVERRIDE_1",
|
|
256002: "RANDOM_OVERRIDE_2",
|
|
},
|
|
"output_dir": temp_dir,
|
|
}
|
|
)
|
|
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_1", add_special_tokens=False) == [
|
|
256001
|
|
]
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_2", add_special_tokens=False) == [
|
|
256002
|
|
]
|
|
assert (
|
|
tokenizer.decode([256001, 256002]) == "RANDOM_OVERRIDE_1RANDOM_OVERRIDE_2"
|
|
)
|
|
|
|
@enable_hf_offline
|
|
def test_added_tokens_overrides_with_toolargeid(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
# use with tokenizer that has reserved_tokens in added_tokens
|
|
"tokenizer_config": "HuggingFaceTB/SmolLM2-135M",
|
|
"added_tokens_overrides": {1000000: "BROKEN_RANDOM_OVERRIDE_1"},
|
|
"output_dir": temp_dir,
|
|
}
|
|
)
|
|
|
|
with pytest.raises(
|
|
ValueError, match=r".*Token ID 1000000 not found in added_tokens.*"
|
|
):
|
|
load_tokenizer(cfg)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|