update readme to point to direct link to runpod template, cleanup install instrucitons (#532)

* update readme to point to direct link to runpod template, cleanup install instrucitons

* default install flash-attn and auto-gptq now too

* update readme w flash-attn extra

* fix version in setup
This commit is contained in:
Wing Lian
2023-09-08 11:58:54 -04:00
committed by GitHub
parent 5e2d8a42d9
commit 34c0a86a11
5 changed files with 11 additions and 28 deletions

View File

@@ -24,8 +24,8 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
pip install -e . pip3 install -e .
pip install -r requirements-tests.txt pip3 install -r requirements-tests.txt
- name: Run tests - name: Run tests
run: | run: |

View File

@@ -90,8 +90,7 @@ accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml \
```bash ```bash
docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.10-cu118-2.0.1 docker run --gpus '"all"' --rm -it winglian/axolotl:main-py3.10-cu118-2.0.1
``` ```
- `winglian/axolotl-runpod:main-py3.10-cu118-2.0.1`: for runpod - `winglian/axolotl-runpod:main-latest`: for runpod or use this [direct link](https://runpod.io/gsc?template=v2ickqhz9s&ref=6i7fkpdz)
- `winglian/axolotl-runpod:main-py3.9-cu118-2.0.1-gptq`: for gptq
Or run on the current files for development: Or run on the current files for development:
@@ -104,19 +103,9 @@ accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml \
2. Install pytorch stable https://pytorch.org/get-started/locally/ 2. Install pytorch stable https://pytorch.org/get-started/locally/
3. Install python dependencies with ONE of the following: 3. Install axolotl along with python dependencies
- Recommended, supports QLoRA, NO gptq/int4 support
```bash ```bash
pip3 install -e . pip3 install -e .[flash-attn]
pip3 install -U git+https://github.com/huggingface/peft.git
```
- gptq/int4 support, NO QLoRA
```bash
pip3 install -e .[gptq]
```
- same as above but not recommended
```bash
pip3 install -e .[gptq_triton]
``` ```
- LambdaLabs - LambdaLabs
@@ -151,10 +140,9 @@ accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml \
git clone https://github.com/OpenAccess-AI-Collective/axolotl git clone https://github.com/OpenAccess-AI-Collective/axolotl
cd axolotl cd axolotl
pip3 install -e . # change depend on needs pip3 install -e .
pip3 install protobuf==3.20.3 pip3 install protobuf==3.20.3
pip3 install -U --ignore-installed requests Pillow psutil scipy pip3 install -U --ignore-installed requests Pillow psutil scipy
pip3 install git+https://github.com/huggingface/peft.git # not for gptq
``` ```
5. Set path 5. Set path

View File

@@ -15,9 +15,9 @@ RUN git clone --depth=1 https://github.com/OpenAccess-AI-Collective/axolotl.git
# If AXOLOTL_EXTRAS is set, append it in brackets # If AXOLOTL_EXTRAS is set, append it in brackets
RUN cd axolotl && \ RUN cd axolotl && \
if [ "$AXOLOTL_EXTRAS" != "" ] ; then \ if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
pip install -e .[flash-attn,gptq,$AXOLOTL_EXTRAS]; \ pip install -e .[flash-attn,$AXOLOTL_EXTRAS]; \
else \ else \
pip install -e .[flash-attn,gptq]; \ pip install -e .[flash-attn]; \
fi fi
# fix so that git fetch/pull from remote works # fix so that git fetch/pull from remote works

View File

@@ -12,7 +12,7 @@ evaluate
fire fire
PyYAML>=6.0 PyYAML>=6.0
datasets datasets
flash-attn>=2.0.8 flash-attn>=2.2.1
sentencepiece sentencepiece
wandb wandb
einops einops

View File

@@ -7,9 +7,7 @@ def parse_requirements():
_install_requires = [] _install_requires = []
_dependency_links = [] _dependency_links = []
with open("./requirements.txt", encoding="utf-8") as requirements_file: with open("./requirements.txt", encoding="utf-8") as requirements_file:
lines = [ lines = [r.strip() for r in requirements_file.readlines()]
r.strip() for r in requirements_file.readlines() if "auto-gptq" not in r
]
for line in lines: for line in lines:
if line.startswith("--extra-index-url"): if line.startswith("--extra-index-url"):
# Handle custom index URLs # Handle custom index URLs
@@ -33,11 +31,8 @@ setup(
install_requires=install_requires, install_requires=install_requires,
dependency_links=dependency_links, dependency_links=dependency_links,
extras_require={ extras_require={
"gptq": [
"auto-gptq",
],
"flash-attn": [ "flash-attn": [
"flash-attn==2.0.8", "flash-attn>=2.2.1",
], ],
"extras": [ "extras": [
"deepspeed", "deepspeed",