Files
axolotl/tests/e2e/test_qat.py
VED 1eaf4d7418 add: support mxfp4 axo (#3375)
* mxfp4 axo

* import lint

* test for qat mxfp4

* config for mxfp4

* add qat:

* pass base config

* MXFakeQuantizeConfig

* lint

* tune config so it fits in 32GB VRAM

---------

Co-authored-by: Wing Lian <wing@axolotl.ai>
2026-03-05 13:40:45 -05:00

164 lines
5.7 KiB
Python

"""
E2E tests for QAT
"""
from pathlib import Path
from axolotl.common.datasets import load_datasets, load_preference_datasets
from axolotl.train import train
from axolotl.utils.config import normalize_config, validate_config
from axolotl.utils.dict import DictDefault
from axolotl.utils.schemas.enums import TorchAOQuantDType
from axolotl.utils.schemas.quantization import QATConfig, validate_ao_dtype
from .utils import check_model_output_exists, check_tensorboard
class TestQATLlama:
"""
Test case for QAT Llama models
"""
def test_qat(self, temp_dir):
cfg = DictDefault(
{
"base_model": "HuggingFaceTB/SmolLM2-135M",
"tokenizer_type": "AutoTokenizer",
"sequence_len": 1024,
"special_tokens": {
"pad_token": "<|endoftext|>",
},
"datasets": [
{
"path": "mlabonne/FineTome-100k",
"type": "chat_template",
"field_messages": "conversations",
"message_property_mappings": {
"role": "from",
"content": "value",
},
"drop_system_message": True,
"split": "train[:1%]",
},
],
"chat_template": "chatml",
"qat": {
"quantize_embedding": True,
"activation_dtype": "int8",
"weight_dtype": "int4",
"group_size": 8,
},
"num_epochs": 1,
"micro_batch_size": 1,
"gradient_accumulation_steps": 2,
"output_dir": temp_dir,
"learning_rate": 0.00001,
"optimizer": "adamw_bnb_8bit",
"lr_scheduler": "cosine",
"max_steps": 5,
"bf16": True,
"save_first_step": False,
}
)
cfg = validate_config(cfg)
normalize_config(cfg)
dataset_meta = load_datasets(cfg=cfg)
train(cfg=cfg, dataset_meta=dataset_meta)
check_model_output_exists(Path(temp_dir) / "checkpoint-5", cfg)
def test_qat_dpo(self, temp_dir):
cfg = DictDefault(
{
"base_model": "HuggingFaceTB/SmolLM2-135M",
"sequence_len": 2048,
"sample_packing": False,
"eval_sample_packing": False,
"pad_to_sequence_len": True,
"val_set_size": 0.01,
"special_tokens": {
"pad_token": "<|endoftext|>",
},
"rl": "dpo",
"chat_template": "chatml",
"datasets": [
{
"path": "fozziethebeat/alpaca_messages_2k_dpo_test",
"type": "chat_template.default",
"field_messages": "conversation",
"field_chosen": "chosen",
"field_rejected": "rejected",
"message_field_role": "role",
"message_field_content": "content",
"roles": {
"system": ["system"],
"user": ["user"],
"assistant": ["assistant"],
},
},
],
"num_epochs": 1,
"max_steps": 5,
"micro_batch_size": 2,
"gradient_accumulation_steps": 2,
"output_dir": temp_dir,
"warmup_steps": 0,
"learning_rate": 0.00001,
"optimizer": "adamw_torch_fused",
"lr_scheduler": "cosine",
"flash_attention": True,
"use_tensorboard": True,
"bf16": True,
"qat": {
"quantize_embedding": True,
"activation_dtype": "int8",
"weight_dtype": "int4",
"group_size": 8,
},
"save_first_step": False,
}
)
cfg = validate_config(cfg)
normalize_config(cfg)
dataset_meta = load_preference_datasets(cfg=cfg)
train(cfg=cfg, dataset_meta=dataset_meta)
check_model_output_exists(Path(temp_dir) / "checkpoint-5", cfg)
loss_threshold = 2.3
check_tensorboard(
temp_dir + "/runs",
"train/train_loss",
loss_threshold,
"Train Loss (%s) is too high",
)
class TestMXFP4Schema:
"""Test MXFP4 schema validation"""
def test_validate_mxfp4_dtype(self):
result = validate_ao_dtype("mxfp4")
assert result == TorchAOQuantDType.mxfp4
def test_qat_config_with_mxfp4(self):
"""Test QATConfig accepts mxfp4 weight_dtype"""
config = QATConfig(
weight_dtype="mxfp4",
group_size=32,
quantize_embedding=False,
)
assert config.weight_dtype == TorchAOQuantDType.mxfp4
assert config.group_size == 32
def test_qat_config_mxfp4_invalid_group_size(self):
"""Test that invalid group_size raises appropriate error during quantization"""
# Note: Schema validation doesn't check group_size compatibility,
# that happens in get_quantization_config
config = QATConfig(
weight_dtype="mxfp4",
group_size=16, # Invalid for mxfp4, but schema allows it
)
assert config.group_size == 16 # Schema accepts it
# Actual validation happens at runtime in get_quantization_config