* upgrade transformers==5.3.0 trl==0.29.0 kernels * use latest deepspeed fixes * use corect image for cleanup * fix test outputs for tokenizer fixes upstream * fix import: * keep trl at 0.28.0 * handle updated API * use latest trl since 0.28.0 doesn't work with latest transformers * use trl experimental for pad to length * monkeypatch trl with ORPOTrainer so liger doesn't croak * upgrade accelerate * more fixes * move patch for orpotrainer * load the imports later * remove use_logits_to_keep * fix loss_type arg as a list * fetch hf cache from s3 * just manually download the missing model for now * lint for pre-commit update * a few more missing models on disk * fix: loss_type internally now list * fix: remove deprecated code and raise deprecate * fix: remove unneeded blocklist * fix: remove reliance on transformers api to find package available * chore: refactor shim for less sideeffect * fix: silent trl experimental warning --------- Co-authored-by: NanoCode012 <nano@axolotl.ai>
62 lines
1.8 KiB
Bash
Executable File
62 lines
1.8 KiB
Bash
Executable File
#!/bin/bash
|
|
set -e
|
|
|
|
python -c "import torch; assert '$PYTORCH_VERSION' in torch.__version__"
|
|
|
|
# curl -L https://axolotl-ci.b-cdn.net/hf-cache.tar.zst | tar -xpf - -C "${HF_HOME}/hub/" --use-compress-program unzstd --strip-components=1
|
|
hf download "NousResearch/Meta-Llama-3-8B"
|
|
hf download "NousResearch/Meta-Llama-3-8B-Instruct"
|
|
hf download "microsoft/Phi-4-reasoning"
|
|
hf download "microsoft/Phi-3.5-mini-instruct"
|
|
|
|
# Run unit tests with initial coverage report
|
|
pytest -v --durations=10 -n8 \
|
|
--ignore=tests/e2e/ \
|
|
--ignore=tests/patched/ \
|
|
--ignore=tests/cli \
|
|
/workspace/axolotl/tests/ \
|
|
--cov=axolotl
|
|
|
|
# Run lora kernels tests with coverage append
|
|
pytest -v --durations=10 \
|
|
/workspace/axolotl/tests/e2e/patched/lora_kernels \
|
|
--cov=axolotl \
|
|
--cov-append
|
|
|
|
# Run patched tests excluding lora kernels with coverage append
|
|
pytest --full-trace -vvv --durations=10 \
|
|
--ignore=tests/e2e/patched/lora_kernels \
|
|
/workspace/axolotl/tests/e2e/patched \
|
|
--cov=axolotl \
|
|
--cov-append
|
|
|
|
# Run solo tests with coverage append
|
|
pytest -v --durations=10 -n1 \
|
|
/workspace/axolotl/tests/e2e/solo/ \
|
|
--cov=axolotl \
|
|
--cov-append
|
|
|
|
# Run integration tests with coverage append
|
|
pytest -v --durations=10 \
|
|
/workspace/axolotl/tests/e2e/integrations/ \
|
|
--cov=axolotl \
|
|
--cov-append
|
|
|
|
pytest -v --durations=10 /workspace/axolotl/tests/cli \
|
|
--cov=axolotl \
|
|
--cov-append
|
|
|
|
# Run remaining e2e tests with coverage append and final report
|
|
pytest -v --durations=10 \
|
|
--ignore=tests/e2e/solo/ \
|
|
--ignore=tests/e2e/patched/ \
|
|
--ignore=tests/e2e/multigpu/ \
|
|
--ignore=tests/e2e/integrations/ \
|
|
--ignore=tests/cli \
|
|
/workspace/axolotl/tests/e2e/ \
|
|
--cov=axolotl \
|
|
--cov-append \
|
|
--cov-report=xml:e2e-coverage.xml
|
|
|
|
codecov upload-process -t $CODECOV_TOKEN -f e2e-coverage.xml -F e2e,pytorch-${PYTORCH_VERSION} || true
|