fix tests

This commit is contained in:
Wing Lian
2026-04-23 23:47:28 +00:00
parent 5db4272f69
commit f77408a3d0
7 changed files with 42 additions and 20 deletions

View File

@@ -38,14 +38,16 @@ def min_cfg(temp_dir):
"num_epochs": 1,
"micro_batch_size": 8,
"gradient_accumulation_steps": 1,
"learning_rate": 0.00001,
"learning_rate": 5e-4,
"optimizer": "adamw_torch_fused",
"output_dir": temp_dir,
"lr_scheduler": "cosine",
"max_steps": 10,
"max_steps": 40,
"warmup_steps": 5,
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
@@ -72,8 +74,8 @@ class TestCutCrossEntropyIntegration:
temp_dir + "/runs",
initial_window=5,
final_window=5,
max_initial=5.0,
max_final=4.7,
max_initial=2.2,
max_final=2.0,
)
def test_qwen2_w_cce(self, temp_dir):
@@ -106,6 +108,7 @@ class TestCutCrossEntropyIntegration:
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
)
cfg = validate_config(cfg)
@@ -159,6 +162,6 @@ class TestCutCrossEntropyIntegration:
temp_dir + "/runs",
initial_window=5,
final_window=5,
max_initial=5.0,
max_final=4.7,
max_initial=2.2,
max_final=2.0,
)

View File

@@ -56,7 +56,8 @@ class TestDistMuon:
},
],
"num_epochs": 1,
"max_steps": 20,
"max_steps": 30,
"warmup_steps": 3,
"micro_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": temp_dir,
@@ -118,7 +119,8 @@ class TestDistMuon:
"lora_dropout": 0.05,
"lora_target_linear": True,
"num_epochs": 1,
"max_steps": 20,
"max_steps": 30,
"warmup_steps": 3,
"micro_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": temp_dir,

View File

@@ -133,10 +133,11 @@ class TestFSDP1:
"load_in_4bit": adapter_config["load_in_4bit"],
"lora_r": 8,
"lora_alpha": 16,
"lora_dropout": 0.05,
"lora_dropout": 0.0,
"lora_target_linear": True,
"num_epochs": 1,
"max_steps": 20,
"max_steps": 30,
"warmup_steps": 3,
"micro_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": temp_dir,

View File

@@ -314,7 +314,8 @@ class TestFSDP2:
"lora_alpha": 16,
"lora_target_linear": True,
"num_epochs": 1,
"max_steps": 20,
"max_steps": 30,
"warmup_steps": 3,
"micro_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": temp_dir,

View File

@@ -57,12 +57,14 @@ class TestFalcon(unittest.TestCase):
"optimizer": "adamw_torch_fused",
"lr_scheduler": "cosine",
"max_steps": 50,
"warmup_steps": 5,
"logging_steps": 1,
"save_steps": 50,
"eval_steps": 50,
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
)
@@ -120,12 +122,14 @@ class TestFalcon(unittest.TestCase):
"optimizer": "adamw_torch_fused",
"lr_scheduler": "cosine",
"max_steps": 50,
"warmup_steps": 5,
"logging_steps": 1,
"save_steps": 50,
"eval_steps": 50,
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
)
@@ -169,12 +173,14 @@ class TestFalcon(unittest.TestCase):
"optimizer": "adamw_torch_fused",
"lr_scheduler": "cosine",
"max_steps": 50,
"warmup_steps": 5,
"logging_steps": 1,
"save_steps": 50,
"eval_steps": 50,
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
)

View File

@@ -53,12 +53,14 @@ class TestPhi(unittest.TestCase):
"lr_scheduler": "cosine",
"flash_attention": True,
"max_steps": 50,
"warmup_steps": 5,
"logging_steps": 1,
"save_steps": 50,
"eval_steps": 50,
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
)
cfg = validate_config(cfg)
@@ -111,12 +113,14 @@ class TestPhi(unittest.TestCase):
"lr_scheduler": "cosine",
"flash_attention": True,
"max_steps": 50,
"warmup_steps": 5,
"logging_steps": 1,
"save_steps": 50,
"eval_steps": 50,
"bf16": "auto",
"save_first_step": False,
"use_tensorboard": True,
"seed": 42,
}
)
cfg = validate_config(cfg)

View File

@@ -207,9 +207,10 @@ def check_tensorboard_loss_decreased(
min_delta: float | None = None,
max_initial: float | None = None,
max_final: float | None = None,
max_loss_ratio: float = 1.10,
max_loss_ratio: float = 0.95,
) -> None:
"""Check that training didn't regress — loss stayed in a sensible range.
"""Check that training actually learned — loss went down and stayed in
a sensible range.
Used with the tiny ``axolotl-ai-co/tiny-*`` CI models, where pretraining
was brief enough that final loss won't clear the absolute thresholds used
@@ -228,14 +229,17 @@ def check_tensorboard_loss_decreased(
known-good run. Both are optional but strongly encouraged — loss
going *down* from a bad starting scale still looks like "learning."
2. **Training diverged.** ``max_loss_ratio`` (default 1.10) requires
``final <= initial * ratio``. Allows small noise in flat-loss cases
(common with tiny pretrained models that start near optimum), but
a final loss 10%+ above initial flags instability / NaNs / drift.
2. **Loss didn't go down enough.** ``max_loss_ratio`` (default 0.95)
requires ``final <= initial * ratio``. A default below 1.0 means the
final window mean must sit at least 5% below the initial window mean
— real learning, not noise that happened to land below start. Only
raise this for configs where a smaller drop is expected *and*
documented (e.g. DPO with near-trivial pairs); in that case you are
intentionally weakening the test.
``min_delta`` is optional; when set, additionally requires
``final + min_delta <= initial`` — use for configs with enough signal
to demand a strict decrease.
to demand a specific minimum absolute drop.
"""
tb_log_path = most_recent_subdir(temp_run_dir)
event_file = os.path.join(tb_log_path, sorted(os.listdir(tb_log_path))[0])
@@ -270,10 +274,11 @@ def check_tensorboard_loss_decreased(
)
assert final > 1e-5, "Expected loss to be greater than zero"
assert final <= initial * max_loss_ratio, (
f"Loss regressed for {chosen_tag}: "
f"Loss did not decrease for {chosen_tag}: "
f"initial(mean of first {initial_window})={initial:.4f}, "
f"final(mean of last {final_window})={final:.4f}, "
f"ratio={final / initial:.4f} (max allowed {max_loss_ratio})"
f"ratio={final / initial:.4f} (max allowed {max_loss_ratio}). "
f"Expected final <= initial — training did not learn."
)
if min_delta is not None:
assert final + min_delta <= initial, (