From cf5ae6b6496e1043c1a1ba2a116336a2d365bbc5 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Sun, 16 Jul 2023 01:07:27 +0900 Subject: [PATCH] Feat(readme): improve docs on multi-gpu --- README.md | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ca36acbcb..efd64d8d2 100644 --- a/README.md +++ b/README.md @@ -36,8 +36,6 @@ git clone https://github.com/OpenAccess-AI-Collective/axolotl pip3 install -e . pip3 install -U git+https://github.com/huggingface/peft.git -accelerate config - # finetune lora accelerate launch scripts/finetune.py examples/openllama-3b/lora.yml @@ -525,6 +523,21 @@ Run accelerate launch scripts/finetune.py configs/your_config.yml ``` +#### Multi-GPU Config + +- llama FSDP +```yaml +fsdp: + - full_shard + - auto_wrap +fsdp_config: + fsdp_offload_params: true + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer +``` + +- llama Deepspeed: append `ACCELERATE_USE_DEEPSPEED=true` in front of finetune command + ### Inference Pass the appropriate flag to the train command: @@ -575,6 +588,10 @@ Try set `fp16: true` Try to turn off xformers. +> Message about accelerate config missing + +It's safe to ignore it. + ## Need help? 🙋♂️ Join our [Discord server](https://discord.gg/HhrNrHJPRb) where we can help you