default to qlora support, make gptq specific image

This commit is contained in:
Wing Lian
2023-05-29 20:34:41 -04:00
parent e43bcc6c4f
commit 6ef96f569b
5 changed files with 25 additions and 49 deletions

View File

@@ -18,10 +18,22 @@ jobs:
cuda_version: 11.8.0
cuda_version_bnb: "118"
pytorch: 2.0.0
axolotl_extras:
- cuda: cu117
cuda_version: 11.7.0
cuda_version_bnb: "117"
pytorch: 1.13.1
axolotl_extras:
- cuda: cu118
cuda_version: 11.8.0
cuda_version_bnb: "118"
pytorch: 2.0.0
axolotl_extras: gptq
- cuda: cu117
cuda_version: 11.7.0
cuda_version_bnb: "117"
pytorch: 1.13.1
axolotl_extras: gptq
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -43,7 +55,7 @@ jobs:
context: .
file: ./docker/Dockerfile-base
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
labels: ${{ steps.metadata.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
@@ -52,3 +64,4 @@ jobs:
CUDA_VERSION_BNB=${{ matrix.cuda_version_bnb }}
CUDA=${{ matrix.cuda }}
PYTORCH_VERSION=${{ matrix.pytorch }}
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras }}

View File

@@ -30,7 +30,7 @@
```bash
git clone https://github.com/OpenAccess-AI-Collective/axolotl
pip3 install -e .[int4]
pip3 install -e .[gptq]
accelerate config
@@ -57,7 +57,7 @@ accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml \
1. Install python **3.9**
2. Install python dependencies with ONE of the following:
- `pip3 install -e .[int4]` (recommended)
- `pip3 install -e .[gptq]` (recommended)
- `pip3 install -e .[int4_triton]`
- `pip3 install -e .`

View File

@@ -2,6 +2,7 @@ ARG BASE_TAG=main-base
FROM winglian/axolotl-base:$BASE_TAG
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
ARG AXOLOTL_EXTRAS=""
RUN apt-get update && \
apt-get install -y vim curl
@@ -13,8 +14,13 @@ RUN python3 -m pip install -U --no-cache-dir pydantic
RUN mkdir axolotl
COPY . axolotl/
# If AXOLOTL_EXTRAS is set, append it in brackets
RUN cd axolotl && \
pip install -e .[int4]
if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
pip install -e .[$AXOLOTL_EXTRAS]; \
else \
pip install -e .; \
fi
# helper for huggingface-login cli
RUN git config --global credential.helper store

View File

@@ -1,43 +0,0 @@
#!/bin/bash
export WANDB_MODE=offline
export WANDB_CACHE_DIR=/workspace/data/wandb-cache
mkdir -p $WANDB_CACHE_DIR
mkdir -p /workspace/data/huggingface-cache/{hub,datasets}
export HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
export HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
export TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
export NCCL_P2P_DISABLE=1
nvidia-smi
num_gpus=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
gpu_indices=$(seq 0 $((num_gpus - 1)) | paste -sd "," -)
export CUDA_VISIBLE_DEVICES=$gpu_indices
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
apt-get update
apt-get install -y build-essential ninja-build vim git-lfs
git lfs install
pip3 install --force-reinstall https://download.pytorch.org/whl/nightly/cu117/torch-2.0.0.dev20230301%2Bcu117-cp38-cp38-linux_x86_64.whl --index-url https://download.pytorch.org/whl/nightly/cu117
if [ -z "${TORCH_CUDA_ARCH_LIST}" ]; then # only set this if not set yet
# this covers most common GPUs that the installed version of pytorch supports
# python -c "import torch; print(torch.cuda.get_arch_list())"
export TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
fi
# install flash-attn and deepspeed from pre-built wheels for this specific container b/c these take forever to install
mkdir -p /workspace/wheels
cd /workspace/wheels
curl -L -O https://github.com/OpenAccess-AI-Collective/axolotl/raw/wheels/wheels/deepspeed-0.9.2%2B7ddc3b01-cp38-cp38-linux_x86_64.whl
curl -L -O https://github.com/OpenAccess-AI-Collective/axolotl/raw/wheels/wheels/flash_attn-1.0.4-cp38-cp38-linux_x86_64.whl
pip install deepspeed-0.9.2%2B7ddc3b01-cp38-cp38-linux_x86_64.whl
pip install flash_attn-1.0.4-cp38-cp38-linux_x86_64.whl
pip install "peft @ git+https://github.com/huggingface/peft.git@main" --force-reinstall --no-dependencies
cd /workspace/
git clone https://github.com/OpenAccess-AI-Collective/axolotl.git
cd axolotl
pip install -e .[int4]
mkdir -p ~/.cache/huggingface/accelerate/
cp configs/accelerate/default_config.yaml ~/.cache/huggingface/accelerate/default_config.yaml

View File

@@ -17,10 +17,10 @@ setup(
packages=find_packages(),
install_requires=install_requires,
extras_require={
"int4": [
"gptq": [
"alpaca_lora_4bit @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
],
"int4_triton": [
"gptq_triton": [
"alpaca_lora_4bit[triton] @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
],
"extras": [