bump accelerate to 0.34.2 (#1901)
* bump accelerate * add fixture to predownload the test model * change fixture
This commit is contained in:
@@ -10,6 +10,7 @@ from pathlib import Path
|
||||
import pytest
|
||||
import yaml
|
||||
from accelerate.test_utils import execute_subprocess_async
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from axolotl.utils.dict import DictDefault
|
||||
|
||||
@@ -19,6 +20,12 @@ LOG = logging.getLogger("axolotl.tests.e2e.multigpu")
|
||||
os.environ["WANDB_DISABLED"] = "true"
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def download_model():
|
||||
# download the model
|
||||
snapshot_download("TinyLlama/TinyLlama_v1.1")
|
||||
|
||||
|
||||
class TestMultiGPULlama(unittest.TestCase):
|
||||
"""
|
||||
Test case for Llama models using LoRA
|
||||
|
||||
Reference in New Issue
Block a user