first pass at modifying tests to use llama-7m
This commit is contained in:
@@ -496,6 +496,12 @@ def dataset_fozziethebeat_alpaca_messages_2k_dpo_test_rev_ea82cff(
|
||||
return datasets.load_from_disk(ds_path)["train"]
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def download_tiny_llama_7m_model():
|
||||
# download the model
|
||||
return snapshot_download_w_retry("axolotl-ai-internal/llama-7m", repo_type="model")
|
||||
|
||||
|
||||
# # pylint: disable=redefined-outer-name,unused-argument
|
||||
# def test_load_fixtures(
|
||||
# download_smollm2_135m_model,
|
||||
|
||||
@@ -8,7 +8,7 @@ from transformers.models.auto.tokenization_auto import AutoTokenizer
|
||||
|
||||
from axolotl.utils.callbacks.perplexity import Perplexity
|
||||
|
||||
MODEL_NAME = "HuggingFaceTB/SmolLM2-135M"
|
||||
MODEL_NAME = "axolotl-ai-internal/llama-7m"
|
||||
|
||||
|
||||
@fixture()
|
||||
|
||||
Reference in New Issue
Block a user