bump accelerate to 0.34.2 (#1901)

* bump accelerate

* add fixture to predownload the test model

* change fixture
This commit is contained in:
Wing Lian
2024-09-07 14:39:31 -04:00
committed by GitHub
parent 6e354682e3
commit 3853ab7ae9
3 changed files with 11 additions and 1 deletions

View File

@@ -1,6 +1,9 @@
name: docker-multigpu-tests-biweekly
on:
pull_request:
paths:
- 'tests/e2e/multigpu/*.py'
workflow_dispatch:
schedule:
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday

View File

@@ -4,7 +4,7 @@ peft==0.12.0
transformers==4.44.2
tokenizers>=0.19.1
bitsandbytes==0.43.3
accelerate==0.34.0
accelerate==0.34.2
datasets==2.20.0
deepspeed==0.14.4
pydantic==2.6.3

View File

@@ -10,6 +10,7 @@ from pathlib import Path
import pytest
import yaml
from accelerate.test_utils import execute_subprocess_async
from huggingface_hub import snapshot_download
from axolotl.utils.dict import DictDefault
@@ -19,6 +20,12 @@ LOG = logging.getLogger("axolotl.tests.e2e.multigpu")
os.environ["WANDB_DISABLED"] = "true"
@pytest.fixture(scope="session", autouse=True)
def download_model():
# download the model
snapshot_download("TinyLlama/TinyLlama_v1.1")
class TestMultiGPULlama(unittest.TestCase):
"""
Test case for Llama models using LoRA