* bump transformers and trl * fix: update trainer.log signature * fix trl trainer.log interfaces * broken 🦥 with latest transformers * skip parent, call grandparent - yeah, super janky * update HF HUB env var and fix reward trainer log since it doesn't directly override log * also bump accelerate * patches for llama ga * detab the code to check * fix whitespace for patch check * play nicely with CI tests since we patch everytime * fix pop default in case it doesn't exist * more tweaks to make patches nicer in CI * fix detab for when there are possibly multiple patches --------- Co-authored-by: NanoCode012 <nano@axolotl.ai>
26 lines
954 B
Python
26 lines
954 B
Python
""""Test module for checking whether the Hugging Face Transformers is working as expected."""
|
|
import unittest
|
|
|
|
from axolotl.monkeypatch.trainer_grad_accum import (
|
|
check_forward_is_patchable,
|
|
check_training_step_is_patchable,
|
|
)
|
|
|
|
|
|
class TestTrainerGAIntegration(unittest.TestCase):
|
|
"""llama monkeypatch integration tests."""
|
|
|
|
def test_train_step_patchable(self):
|
|
# ensures the current version of transformers has loss code that matches our patching code
|
|
self.assertTrue(
|
|
check_training_step_is_patchable(),
|
|
"HF transformers Trainer.training_step has changed and isn't patchable",
|
|
)
|
|
|
|
def test_model_forward_patchable(self):
|
|
# ensures the current version of transformers has loss code that matches our patching code
|
|
self.assertTrue(
|
|
check_forward_is_patchable(),
|
|
"HF transformers LlamaForCausalLM.forward has changed and isn't patchable",
|
|
)
|