* Prepare for transformers v5 upgrade * fix hf cli * update for hf hub changes * fix tokenizer apply_chat_template args * remap include_tokens_per_second * fix tps * handle migration for warmup * use latest hf hub * Fix scan -> ls * fix import * fix for renaming of mistral common tokenizer -> backend * update for fixed tokenziation for llama * Skip phi35 tests for now * remove mistral patch fixed upstream in huggingface/transformers#41439 * use namespacing for patch * don't rely on sdist for e2e tests for now * run modal ci without waiting too * Fix dep for ci * fix imports * Fix fp8 check * fsdp2 fixes * fix version handling * update fsdp version tests for new v5 behavior * Fail multigpu tests after 3 failures * skip known v5 broken tests for now and cleanup * bump deps * unmark skipped test * re-enable test_fsdp_qlora_prequant_packed test * increase multigpu ci timeout * skip broken gemma3 test * reduce timout back to original 120min now that the hanging test is skipped * fix for un-necessary collator for pretraining with bsz=1 * fix: safe_serialization deprecated in transformers v5 rc01 (#3318) * torch_dtype deprecated * load model in float32 for consistency with tests * revert some test fixtures back * use hf cache ls instead of scan * don't strip fsdp_version more fdsp_Version fixes for v5 fix version in fsdp_config fix aliasing fix fsdp_version check check fsdp_version is 2 in both places * Transformers v5 rc2 (#3347) * bump dep * use latest fbgemm, grab model config as part of fixture, un-skip test * import AutoConfig * don't need more problematic autoconfig when specifying config.json manually * add fixtures for argilla ultrafeedback datasets * download phi4-reasoning * fix arg * update tests for phi fast tokenizer changes * use explicit model types for gemma3 --------- Co-authored-by: Wing Lian <wing@axolotl.ai> * fix: AutoModelForVision2Seq -> AutoModelForImageTextToText * chore: remove duplicate * fix: attempt fix gemma3 text mode * chore: lint * ga release of v5 * need property setter for name_or_path for mistral tokenizer * vllm not compatible with transformers v5 * setter for chat_template w mistral too --------- Co-authored-by: NanoCode012 <nano@axolotl.ai> Co-authored-by: salman <salman.mohammadi@outlook.com>
175 lines
5.4 KiB
Python
175 lines
5.4 KiB
Python
"""
|
|
tests for pydantic fsdp validation
|
|
"""
|
|
|
|
import pytest
|
|
|
|
from axolotl.utils.config import validate_config
|
|
from axolotl.utils.dict import DictDefault
|
|
|
|
|
|
class TestFSDPValidation:
|
|
"""
|
|
test class for pydantic fsdp validation
|
|
"""
|
|
|
|
def test_fsdp_version_from_fsdp_config(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"version": 2,
|
|
},
|
|
)
|
|
cfg = validate_config(
|
|
cfg,
|
|
)
|
|
assert cfg.fsdp_version == 2
|
|
|
|
def test_fsdp_version_in_fsdp_config(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_version=2,
|
|
fsdp_config={
|
|
"reshard_after_forward": True,
|
|
},
|
|
)
|
|
cfg = validate_config(
|
|
cfg,
|
|
)
|
|
assert cfg.fsdp_version == 2
|
|
assert cfg.fsdp_config.fsdp_version == 2
|
|
|
|
def test_fsdp_offload_w_8bit_optim(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"offload_params": True,
|
|
},
|
|
optimizer="adamw_8bit",
|
|
fsdp_version=1,
|
|
)
|
|
with pytest.raises(
|
|
ValueError, match="FSDP Offload not compatible with adamw_8bit"
|
|
):
|
|
validate_config(cfg)
|
|
|
|
def test_fsdp2_w_8bit_optim(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"offload_params": True,
|
|
},
|
|
optimizer="adamw_8bit",
|
|
fsdp_version=2,
|
|
)
|
|
with pytest.raises(
|
|
ValueError,
|
|
match="FSDP2 not compatible with adamw_8bit, use `adamw_torch_8bit` instead",
|
|
):
|
|
validate_config(cfg)
|
|
|
|
def test_fsdp2_w_cpu_ram_efficient_loading(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
load_in_8bit=True,
|
|
adapter="lora",
|
|
fsdp_config={
|
|
"cpu_ram_efficient_loading": True,
|
|
},
|
|
fsdp_version=2,
|
|
)
|
|
validated_cfg = validate_config(cfg)
|
|
assert validated_cfg.fsdp_version == 2
|
|
assert validated_cfg.fsdp_config.cpu_ram_efficient_loading is True
|
|
|
|
def test_fsdp2_cpu_offload_pin_memory_requires_offload_params(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"cpu_offload_pin_memory": False,
|
|
"offload_params": False,
|
|
},
|
|
fsdp_version=2,
|
|
)
|
|
with pytest.raises(
|
|
ValueError,
|
|
match="disabling cpu_offload_pin_memory requires enabling offload_params",
|
|
):
|
|
validate_config(cfg)
|
|
|
|
def test_fsdp1_cpu_offload_pin_memory_not_supported(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"cpu_offload_pin_memory": False,
|
|
"offload_params": True,
|
|
},
|
|
fsdp_version=1,
|
|
)
|
|
with pytest.raises(
|
|
ValueError,
|
|
match="FSDP1 does not support disabling cpu_offload_pin_memory, please set `fsdp_version` to 2",
|
|
):
|
|
validate_config(cfg)
|
|
|
|
def test_fsdp2_cpu_offload_pin_memory_w_offload_params(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"cpu_offload_pin_memory": False,
|
|
"offload_params": True,
|
|
},
|
|
fsdp_version=2,
|
|
)
|
|
validated_cfg = validate_config(cfg)
|
|
assert validated_cfg.fsdp_config.cpu_offload_pin_memory is False
|
|
assert validated_cfg.fsdp_config.offload_params is True
|
|
|
|
def test_fsdp_prefixes_removed(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_config={
|
|
"fsdp_version": 2,
|
|
"fsdp_auto_wrap_policy": "TRANSFORMER_BASED_WRAP",
|
|
"fsdp_transformer_layer_cls_to_wrap": "LlamaDecoderLayer",
|
|
"fsdp_reshard_after_forward": True,
|
|
}
|
|
)
|
|
cfg = validate_config(cfg)
|
|
assert cfg.fsdp_version == 2
|
|
assert cfg.fsdp_config.fsdp_version == 2
|
|
for key in cfg.fsdp_config.keys():
|
|
if key != "fsdp_version":
|
|
assert not key.startswith("fsdp_")
|
|
assert cfg.fsdp_config.auto_wrap_policy == "TRANSFORMER_BASED_WRAP"
|
|
assert cfg.fsdp_config.transformer_layer_cls_to_wrap == "LlamaDecoderLayer"
|
|
assert cfg.fsdp_config.reshard_after_forward is True
|
|
|
|
def test_muon_fsdp1_rejected(self, min_base_cfg):
|
|
cfg = min_base_cfg | DictDefault(
|
|
optimizer="muon",
|
|
fsdp_version=1,
|
|
fsdp_config={"reshard_after_forward": True},
|
|
)
|
|
with pytest.raises(
|
|
ValueError, match="Muon optimizer is only compatible with FSDP2"
|
|
):
|
|
validate_config(cfg)
|
|
|
|
@pytest.mark.parametrize(
|
|
"rl",
|
|
[
|
|
"dpo",
|
|
"kto",
|
|
"orpo",
|
|
"ipo",
|
|
],
|
|
)
|
|
def test_fsdp2_dpo(self, min_base_cfg, rl):
|
|
cfg = min_base_cfg | DictDefault(
|
|
fsdp_version=2,
|
|
fsdp_config={
|
|
"reshard_after_forward": True,
|
|
},
|
|
rl=rl,
|
|
load_in_8bit=True,
|
|
adapter="lora",
|
|
remove_unused_columns=False,
|
|
)
|
|
with pytest.raises(
|
|
ValueError,
|
|
match="FSDP2 does not support load_in_8bit or load_in_4bit with ",
|
|
):
|
|
validate_config(cfg)
|