* upgrade transformers==5.3.0 trl==0.29.0 kernels * use latest deepspeed fixes * use corect image for cleanup * fix test outputs for tokenizer fixes upstream * fix import: * keep trl at 0.28.0 * handle updated API * use latest trl since 0.28.0 doesn't work with latest transformers * use trl experimental for pad to length * monkeypatch trl with ORPOTrainer so liger doesn't croak * upgrade accelerate * more fixes * move patch for orpotrainer * load the imports later * remove use_logits_to_keep * fix loss_type arg as a list * fetch hf cache from s3 * just manually download the missing model for now * lint for pre-commit update * a few more missing models on disk * fix: loss_type internally now list * fix: remove deprecated code and raise deprecate * fix: remove unneeded blocklist * fix: remove reliance on transformers api to find package available * chore: refactor shim for less sideeffect * fix: silent trl experimental warning --------- Co-authored-by: NanoCode012 <nano@axolotl.ai>
165 lines
5.1 KiB
Python
165 lines
5.1 KiB
Python
"""
|
|
Test cases for the tokenizer loading
|
|
"""
|
|
|
|
import unittest
|
|
|
|
import pytest
|
|
|
|
from axolotl.loaders import load_tokenizer
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
from tests.hf_offline_utils import enable_hf_offline
|
|
|
|
|
|
class TestTokenizers:
|
|
"""
|
|
test class for the load_tokenizer fn
|
|
"""
|
|
|
|
@pytest.mark.skip("LlamaTokenizer no longer has a Fast/Slow tokenizer")
|
|
@enable_hf_offline
|
|
def test_default_use_fast(self):
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
}
|
|
)
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert "Fast" in tokenizer.__class__.__name__
|
|
|
|
@pytest.mark.skip("LlamaTokenizer no longer has a Fast/Slow tokenizer")
|
|
@enable_hf_offline
|
|
def test_dont_use_fast(self):
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"tokenizer_use_fast": False,
|
|
}
|
|
)
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert "Fast" not in tokenizer.__class__.__name__
|
|
|
|
@enable_hf_offline
|
|
def test_special_tokens_modules_to_save(self):
|
|
# setting special_tokens to new token
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"adapter": "lora",
|
|
"special_tokens": {"bos_token": "[INST]"},
|
|
}
|
|
)
|
|
with pytest.raises(
|
|
ValueError,
|
|
match=r".*Please set lora_modules_to_save*",
|
|
):
|
|
load_tokenizer(cfg)
|
|
|
|
# setting special_tokens but not changing from default
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"adapter": "lora",
|
|
"special_tokens": {"bos_token": "<s>"},
|
|
}
|
|
)
|
|
load_tokenizer(cfg)
|
|
|
|
# non-adapter setting special_tokens
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"special_tokens": {"bos_token": "[INST]"},
|
|
}
|
|
)
|
|
load_tokenizer(cfg)
|
|
|
|
@enable_hf_offline
|
|
def test_add_additional_special_tokens(self):
|
|
cfg = DictDefault(
|
|
{
|
|
"tokenizer_config": "huggyllama/llama-7b",
|
|
"special_tokens": {"additional_special_tokens": ["<|im_start|>"]},
|
|
}
|
|
)
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert "LlamaTokenizer" in tokenizer.__class__.__name__
|
|
assert tokenizer("<|im_start|>user")["input_ids"] == [1, 32000, 1792]
|
|
assert len(tokenizer) == 32001
|
|
|
|
# ensure reloading the tokenizer again from cfg results in same vocab length
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert len(tokenizer) == 32001
|
|
|
|
@enable_hf_offline
|
|
def test_added_tokens_overrides(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
# use with tokenizer that has reserved_tokens in added_tokens
|
|
"tokenizer_config": "NousResearch/Llama-3.2-1B",
|
|
"added_tokens_overrides": {
|
|
128041: "RANDOM_OVERRIDE_1",
|
|
128042: "RANDOM_OVERRIDE_2",
|
|
},
|
|
"output_dir": temp_dir,
|
|
}
|
|
)
|
|
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_1", add_special_tokens=False) == [
|
|
128041
|
|
]
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_2", add_special_tokens=False) == [
|
|
128042
|
|
]
|
|
assert (
|
|
tokenizer.decode([128041, 128042]) == "RANDOM_OVERRIDE_1RANDOM_OVERRIDE_2"
|
|
)
|
|
|
|
@pytest.mark.skip("FIXME slow test sdist py3.11 + torch2.8.0")
|
|
@enable_hf_offline
|
|
def test_added_tokens_overrides_gemma3(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
# use with tokenizer that has reserved_tokens in added_tokens
|
|
"tokenizer_config": "mlx-community/gemma-3-4b-it-8bit",
|
|
"added_tokens_overrides": {
|
|
256001: "RANDOM_OVERRIDE_1",
|
|
256002: "RANDOM_OVERRIDE_2",
|
|
},
|
|
"output_dir": temp_dir,
|
|
}
|
|
)
|
|
|
|
tokenizer = load_tokenizer(cfg)
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_1", add_special_tokens=False) == [
|
|
256001
|
|
]
|
|
assert tokenizer.encode("RANDOM_OVERRIDE_2", add_special_tokens=False) == [
|
|
256002
|
|
]
|
|
assert (
|
|
tokenizer.decode([256001, 256002]) == "RANDOM_OVERRIDE_1RANDOM_OVERRIDE_2"
|
|
)
|
|
|
|
@enable_hf_offline
|
|
def test_added_tokens_overrides_with_toolargeid(self, temp_dir):
|
|
cfg = DictDefault(
|
|
{
|
|
# use with tokenizer that has reserved_tokens in added_tokens
|
|
"tokenizer_config": "HuggingFaceTB/SmolLM2-135M",
|
|
"added_tokens_overrides": {1000000: "BROKEN_RANDOM_OVERRIDE_1"},
|
|
"output_dir": temp_dir,
|
|
}
|
|
)
|
|
|
|
with pytest.raises(
|
|
ValueError, match=r".*Token ID 1000000 not found in added_tokens.*"
|
|
):
|
|
load_tokenizer(cfg)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|