fix(readme): update cuda instructions during preprocess (#2114) [skip ci]

This commit is contained in:
NanoCode012
2024-12-03 20:58:03 +07:00
committed by GitHub
parent bd8436bc6e
commit 81ef3e45f7

View File

@@ -147,7 +147,7 @@ pip3 install -e '.[flash-attn,deepspeed]'
### Usage
```bash
# preprocess datasets - optional but recommended
CUDA_VISIBLE_DEVICES="" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml
CUDA_VISIBLE_DEVICES="0" python -m axolotl.cli.preprocess examples/openllama-3b/lora.yml
# finetune lora
accelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml