add output for train loss in assertian err
This commit is contained in:
@@ -90,7 +90,7 @@ class TestKnowledgeDistillation:
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/loss", 1.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -121,5 +121,5 @@ class TestKnowledgeDistillation:
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/loss", 1.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -89,5 +89,5 @@ class TestPackedFlex:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -96,5 +96,5 @@ class TestMultiGPUGemma3:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 1.8, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 1.8, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -94,7 +94,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -159,7 +159,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
def test_dpo_lora_ddp(self, temp_dir):
|
||||
@@ -385,7 +385,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -457,7 +457,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@require_torch_2_6_0
|
||||
@@ -538,7 +538,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.1, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.1, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
def test_fsdp_qlora_prequant_packed(self, temp_dir):
|
||||
@@ -618,7 +618,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -702,7 +702,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -776,7 +776,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -850,7 +850,7 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.skip(
|
||||
@@ -917,5 +917,5 @@ class TestMultiGPULlama:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 4.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 4.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -80,7 +80,7 @@ class TestMultiGPURay:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@require_torch_lt_2_6_0
|
||||
@@ -138,5 +138,5 @@ class TestMultiGPURay:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.3, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -93,7 +93,7 @@ class TestSequenceParallelism:
|
||||
)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.6, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.6, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -86,5 +86,5 @@ class TestFAXentropyLlama:
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 1.5, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 1.5, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -80,7 +80,7 @@ class TestUnslothQLoRA:
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
def test_unsloth_llama_qlora_unpacked(self, temp_dir):
|
||||
@@ -130,7 +130,7 @@ class TestUnslothQLoRA:
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -185,5 +185,5 @@ class TestUnslothQLoRA:
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -69,5 +69,5 @@ class TestPackedFlex(unittest.TestCase):
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -84,5 +84,5 @@ class TestPretrainLlama:
|
||||
temp_dir + "/runs",
|
||||
"train/train_loss",
|
||||
loss_threshold,
|
||||
"Train Loss is too high",
|
||||
"Train Loss (%s) is too high",
|
||||
)
|
||||
|
||||
@@ -68,5 +68,5 @@ class TestPackedLlama(unittest.TestCase):
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train loss (%s) is too high"
|
||||
)
|
||||
|
||||
@@ -73,6 +73,6 @@ class TestRewardModelLoraSmolLM2(unittest.TestCase):
|
||||
|
||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
||||
check_tensorboard(
|
||||
temp_dir + "/runs", "train/train_loss", 2.5, "Train Loss is too high"
|
||||
temp_dir + "/runs", "train/train_loss", 2.5, "Train loss (%s) is too high"
|
||||
)
|
||||
check_model_output_exists(temp_dir, cfg)
|
||||
|
||||
Reference in New Issue
Block a user