* Prepare for transformers v5 upgrade * fix hf cli * update for hf hub changes * fix tokenizer apply_chat_template args * remap include_tokens_per_second * fix tps * handle migration for warmup * use latest hf hub * Fix scan -> ls * fix import * fix for renaming of mistral common tokenizer -> backend * update for fixed tokenziation for llama * Skip phi35 tests for now * remove mistral patch fixed upstream in huggingface/transformers#41439 * use namespacing for patch * don't rely on sdist for e2e tests for now * run modal ci without waiting too * Fix dep for ci * fix imports * Fix fp8 check * fsdp2 fixes * fix version handling * update fsdp version tests for new v5 behavior * Fail multigpu tests after 3 failures * skip known v5 broken tests for now and cleanup * bump deps * unmark skipped test * re-enable test_fsdp_qlora_prequant_packed test * increase multigpu ci timeout * skip broken gemma3 test * reduce timout back to original 120min now that the hanging test is skipped * fix for un-necessary collator for pretraining with bsz=1 * fix: safe_serialization deprecated in transformers v5 rc01 (#3318) * torch_dtype deprecated * load model in float32 for consistency with tests * revert some test fixtures back * use hf cache ls instead of scan * don't strip fsdp_version more fdsp_Version fixes for v5 fix version in fsdp_config fix aliasing fix fsdp_version check check fsdp_version is 2 in both places * Transformers v5 rc2 (#3347) * bump dep * use latest fbgemm, grab model config as part of fixture, un-skip test * import AutoConfig * don't need more problematic autoconfig when specifying config.json manually * add fixtures for argilla ultrafeedback datasets * download phi4-reasoning * fix arg * update tests for phi fast tokenizer changes * use explicit model types for gemma3 --------- Co-authored-by: Wing Lian <wing@axolotl.ai> * fix: AutoModelForVision2Seq -> AutoModelForImageTextToText * chore: remove duplicate * fix: attempt fix gemma3 text mode * chore: lint * ga release of v5 * need property setter for name_or_path for mistral tokenizer * vllm not compatible with transformers v5 * setter for chat_template w mistral too --------- Co-authored-by: NanoCode012 <nano@axolotl.ai> Co-authored-by: salman <salman.mohammadi@outlook.com>
84 lines
2.8 KiB
YAML
84 lines
2.8 KiB
YAML
name: docker-multigpu-tests-biweekly
|
|
|
|
on:
|
|
pull_request:
|
|
paths:
|
|
- 'tests/e2e/multigpu/**.py'
|
|
- 'requirements.txt'
|
|
- 'setup.py'
|
|
- 'pyproject.toml'
|
|
- '.github/workflows/multi-gpu-e2e.yml'
|
|
- 'src/axolotl/core/trainers/mixins/sequence_parallel.py'
|
|
- 'src/axolotl/utils/distributed.py'
|
|
workflow_dispatch:
|
|
schedule:
|
|
- cron: '0 0 * * 1,4' # Runs at 00:00 UTC every monday & thursday
|
|
|
|
# Cancel jobs on the same ref if a new one is triggered
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref }}
|
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
|
|
|
env:
|
|
MODAL_IMAGE_BUILDER_VERSION: "2025.06"
|
|
|
|
jobs:
|
|
test-axolotl-multigpu:
|
|
if: ${{ ! contains(github.event.commits[0].message, '[skip e2e]') && github.repository_owner == 'axolotl-ai-cloud' && (github.event_name != 'pull_request' || !github.event.pull_request.draft) }}
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
include:
|
|
- cuda: 128
|
|
cuda_version: 12.8.1
|
|
python_version: "3.11"
|
|
pytorch: 2.8.0
|
|
axolotl_extras: fbgemm-gpu
|
|
num_gpus: 2
|
|
- cuda: 128
|
|
cuda_version: 12.8.1
|
|
python_version: "3.11"
|
|
pytorch: 2.9.1
|
|
axolotl_extras: "fbgemm-gpu"
|
|
num_gpus: 2
|
|
- cuda: 129
|
|
cuda_version: 12.9.1
|
|
python_version: "3.12"
|
|
pytorch: 2.9.1
|
|
axolotl_extras: "fbgemm-gpu"
|
|
num_gpus: 2
|
|
dockerfile: "Dockerfile-uv.jinja"
|
|
- cuda: 130
|
|
cuda_version: 13.0.0
|
|
python_version: "3.11"
|
|
pytorch: 2.9.1
|
|
axolotl_extras:
|
|
# axolotl_extras: fbgemm-gpu
|
|
num_gpus: 2
|
|
runs-on: [self-hosted, modal]
|
|
timeout-minutes: 120
|
|
steps:
|
|
- name: Checkout
|
|
uses: actions/checkout@v4
|
|
- name: Install Python
|
|
uses: actions/setup-python@v5
|
|
with:
|
|
python-version: "3.11"
|
|
- name: Install Modal
|
|
run: |
|
|
python -m pip install --upgrade pip
|
|
pip install modal==1.3.0.post1 jinja2
|
|
- name: Update env vars
|
|
run: |
|
|
echo "BASE_TAG=main-base-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}" >> $GITHUB_ENV
|
|
echo "PYTORCH_VERSION=${{ matrix.pytorch}}" >> $GITHUB_ENV
|
|
echo "AXOLOTL_ARGS=${{ matrix.axolotl_args}}" >> $GITHUB_ENV
|
|
echo "AXOLOTL_EXTRAS=${{ matrix.axolotl_extras}}" >> $GITHUB_ENV
|
|
echo "CUDA=${{ matrix.cuda }}" >> $GITHUB_ENV
|
|
echo "N_GPUS=${{ matrix.num_gpus }}" >> $GITHUB_ENV
|
|
echo "CODECOV_TOKEN=${{ secrets.CODECOV_TOKEN }}" >> $GITHUB_ENV
|
|
echo "E2E_DOCKERFILE=${{ matrix.dockerfile || 'Dockerfile.jinja'}}" >> $GITHUB_ENV
|
|
- name: Run tests job on Modal
|
|
run: |
|
|
modal run -m cicd.multigpu
|