quickstart instructions for starting from runpod (#5)

This commit is contained in:
Wing Lian
2023-04-18 19:22:25 -04:00
committed by GitHub
parent 5cb7ea49a6
commit 0a472e1e08
10 changed files with 332 additions and 21 deletions

View File

@@ -225,7 +225,14 @@ def train(
)
logging.info("Starting trainer...")
trainer.train(resume_from_checkpoint=cfg.resume_from_checkpoint)
resume_from_checkpoint = cfg.resume_from_checkpoint
if cfg.resume_from_checkpoint is None and cfg.auto_resume_from_checkpoints:
possible_checkpoints = [str(cp) for cp in Path(cfg.output_dir).glob("checkpoint-*")]
if len(possible_checkpoints) > 0:
sorted_paths = sorted(possible_checkpoints, key=lambda path: int(path.split('-')[-1]))
resume_from_checkpoint = sorted_paths[-1]
logging.info(f"Using Auto-resume functionality to start with checkpoint at {resume_from_checkpoint}")
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
if cfg.local_rank == 0:
# TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading

34
scripts/setup-runpod.sh Normal file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
export WANDB_MODE=offline
export WANDB_CACHE_DIR=/workspace/data/wandb-cache
mkdir -p $WANDB_CACHE_DIR
mkdir -p /workspace/data/huggingface-cache/{hub,datasets}
export HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
export HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
export TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
export NCCL_P2P_DISABLE=1
nvidia-smi
num_gpus=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
gpu_indices=$(seq 0 $((num_gpus - 1)) | paste -sd "," -)
export CUDA_VISIBLE_DEVICES=$gpu_indices
echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
apt-get update
apt-get install -y build-essential ninja-build vim git-lfs
git lfs install
pip3 install --force-reinstall https://download.pytorch.org/whl/nightly/cu117/torch-2.0.0.dev20230301%2Bcu117-cp38-cp38-linux_x86_64.whl --index-url https://download.pytorch.org/whl/nightly/cu117
if [ -z "${TORCH_CUDA_ARCH_LIST}" ]; then # only set this if not set yet
# this covers most common GPUs that the installed version of pytorch supports
# python -c "import torch; print(torch.cuda.get_arch_list())"
export TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
fi
cd /workspace/
git clone https://github.com/winglian/axolotl.git
cd axolotl
pip install -e .[int4]
mkdir -p ~/.cache/huggingface/accelerate/
cp configs/accelerate/default_config.yml ~/.cache/huggingface/accelerate/default_config.yml