From 8afb0fbaba8ae42238f8c0c239000afb56b310e6 Mon Sep 17 00:00:00 2001 From: Utensil Date: Wed, 31 May 2023 23:58:40 +0800 Subject: [PATCH 1/7] Axolotl supports falcon + qlora --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fd5a60947..a5f8e3ff8 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ | Pythia | ✅ | ✅ | ❓ | ❌ | ❌ | ❌ | ❓ | | cerebras | ✅ | ✅ | ❓ | ❌ | ❌ | ❌ | ❓ | | mpt | ✅ | ❌ | ❓ | ❌ | ❌ | ❌ | ❓ | -| falcon | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❓ | +| falcon | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ❓ | ## Quickstart ⚡ From 72bf8aafb67bed35c985e86610c3acd94ff37b1b Mon Sep 17 00:00:00 2001 From: Utensil Date: Thu, 1 Jun 2023 00:00:37 +0800 Subject: [PATCH 2/7] Create config-7b-qlora.yml --- examples/falcon/config-7b-qlora.yml | 68 +++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 examples/falcon/config-7b-qlora.yml diff --git a/examples/falcon/config-7b-qlora.yml b/examples/falcon/config-7b-qlora.yml new file mode 100644 index 000000000..c36fe9bed --- /dev/null +++ b/examples/falcon/config-7b-qlora.yml @@ -0,0 +1,68 @@ +base_model: tiiuae/falcon-7b +base_model_config: tiiuae/falcon-7b +trust_remote_code: true +model_type: AutoModelForCausalLM +tokenizer_type: AutoTokenizer +load_in_8bit: false +load_in_4bit: true +gptq: false +strict: false +push_dataset_to_hub: +datasets: + - path: QingyiSi/Alpaca-CoT + data_files: + - Chain-of-Thought/formatted_cot_data/gsm8k_train.json + type: "alpaca:chat" +dataset_prepared_path: last_run_prepared +val_set_size: 0.01 +adapter: qlora +lora_model_dir: +sequence_len: 2048 +max_packed_sequence_len: 2048 +lora_r: 64 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_modules: +lora_target_linear: true +lora_fan_in_fan_out: +wandb_project: falcon-qlora +wandb_watch: +wandb_run_id: +wandb_log_model: +output_dir: ./qlora-out +batch_size: 8 +micro_batch_size: 4 +num_epochs: 3 +optimizer: paged_adamw_32bit +torchdistx_path: +lr_scheduler: cosine +learning_rate: 0.0002 +train_on_inputs: false +group_by_length: false +bf16: true +fp16: false +tf32: true +gradient_checkpointing: true +# stop training after this many evaluation losses have increased in a row +# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback +early_stopping_patience: 3 +resume_from_checkpoint: +auto_resume_from_checkpoints: true +local_rank: +logging_steps: 1 +xformers_attention: false +flash_attention: +gptq_groupsize: +gptq_model_v1: +warmup_steps: 10 +eval_steps: 5 +save_steps: 10 +debug: +deepspeed: +weight_decay: 0.000001 +fsdp: +fsdp_config: +special_tokens: + pad_token: "<|endoftext|>" + bos_token: ">>ABSTRACT<<" + eos_token: "<|endoftext|>" From fb3d40f197471f275c6c1ecfae2761189bfb36ea Mon Sep 17 00:00:00 2001 From: Utensil Date: Thu, 1 Jun 2023 18:29:20 +0800 Subject: [PATCH 3/7] falcon + qlora + xformer mbs 40 gas 2 on A6000 --- examples/falcon/config-7b-qlora.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/falcon/config-7b-qlora.yml b/examples/falcon/config-7b-qlora.yml index c36fe9bed..f15847f5c 100644 --- a/examples/falcon/config-7b-qlora.yml +++ b/examples/falcon/config-7b-qlora.yml @@ -18,7 +18,7 @@ val_set_size: 0.01 adapter: qlora lora_model_dir: sequence_len: 2048 -max_packed_sequence_len: 2048 +max_packed_sequence_len: lora_r: 64 lora_alpha: 16 lora_dropout: 0.05 @@ -30,8 +30,8 @@ wandb_watch: wandb_run_id: wandb_log_model: output_dir: ./qlora-out -batch_size: 8 -micro_batch_size: 4 +micro_batch_size: 40 +gradient_accumulation_steps: 2 num_epochs: 3 optimizer: paged_adamw_32bit torchdistx_path: @@ -50,7 +50,7 @@ resume_from_checkpoint: auto_resume_from_checkpoints: true local_rank: logging_steps: 1 -xformers_attention: false +xformers_attention: true flash_attention: gptq_groupsize: gptq_model_v1: From ca11ae9689d220e24bda57cc0bd1b7fcf89ae290 Mon Sep 17 00:00:00 2001 From: Utensil Date: Sat, 3 Jun 2023 15:04:02 +0800 Subject: [PATCH 4/7] Add comments/alternatives for falcon-qlora configs --- examples/falcon/config-7b-qlora.yml | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/examples/falcon/config-7b-qlora.yml b/examples/falcon/config-7b-qlora.yml index f15847f5c..a3845d92d 100644 --- a/examples/falcon/config-7b-qlora.yml +++ b/examples/falcon/config-7b-qlora.yml @@ -1,9 +1,13 @@ +# 1b: tiiuae/falcon-rw-1b +# 40b: tiiuae/falcon-40b base_model: tiiuae/falcon-7b base_model_config: tiiuae/falcon-7b +# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main trust_remote_code: true model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false +# enable 4bit for QLoRA load_in_4bit: true gptq: false strict: false @@ -15,27 +19,47 @@ datasets: type: "alpaca:chat" dataset_prepared_path: last_run_prepared val_set_size: 0.01 +# enable QLoRA adapter: qlora lora_model_dir: sequence_len: 2048 max_packed_sequence_len: + +# hyperparameters from QLoRA paper Appendix B.2 +# "We find hyperparameters to be largely robust across datasets" lora_r: 64 lora_alpha: 16 +# 0.1 for models up to 13B +# 0.05 for 33B and 65B models lora_dropout: 0.05 +# add LoRA modules on all linear layers of the base model lora_target_modules: lora_target_linear: true lora_fan_in_fan_out: + wandb_project: falcon-qlora wandb_watch: wandb_run_id: wandb_log_model: output_dir: ./qlora-out -micro_batch_size: 40 + +# QLoRA paper Table 9 +# - 16 for 7b & 13b +# - 32 for 33b, 64 for 64b +# Max size tested on A6000 +# - 7b: 40 +# - 40b: 4 +# decrease if OOM, increase for max VRAM utilization +micro_batch_size: 30 gradient_accumulation_steps: 2 num_epochs: 3 +# Optimizer for QLoRA optimizer: paged_adamw_32bit torchdistx_path: lr_scheduler: cosine +# QLoRA paper Table 9 +# - 2e-4 for 7b & 13b +# - 1e-4 for 33b & 64b learning_rate: 0.0002 train_on_inputs: false group_by_length: false From c9c050316febb964b2c9956a1ea430083d6a0bce Mon Sep 17 00:00:00 2001 From: Utensil Date: Sat, 3 Jun 2023 17:26:33 +0800 Subject: [PATCH 5/7] Default micro_batch_size to 1 for a safer start --- examples/falcon/config-7b-qlora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/falcon/config-7b-qlora.yml b/examples/falcon/config-7b-qlora.yml index a3845d92d..2f2920e98 100644 --- a/examples/falcon/config-7b-qlora.yml +++ b/examples/falcon/config-7b-qlora.yml @@ -50,7 +50,7 @@ output_dir: ./qlora-out # - 7b: 40 # - 40b: 4 # decrease if OOM, increase for max VRAM utilization -micro_batch_size: 30 +micro_batch_size: 1 gradient_accumulation_steps: 2 num_epochs: 3 # Optimizer for QLoRA From a52f4816b04199f2aa2d97154e562309f423f97a Mon Sep 17 00:00:00 2001 From: Utensil Date: Thu, 8 Jun 2023 23:04:19 +0800 Subject: [PATCH 6/7] Default `wandb_project` to empty as suggested Co-authored-by: NanoCode012 --- examples/falcon/config-7b-qlora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/falcon/config-7b-qlora.yml b/examples/falcon/config-7b-qlora.yml index 2f2920e98..3e24d5567 100644 --- a/examples/falcon/config-7b-qlora.yml +++ b/examples/falcon/config-7b-qlora.yml @@ -37,7 +37,7 @@ lora_target_modules: lora_target_linear: true lora_fan_in_fan_out: -wandb_project: falcon-qlora +wandb_project: wandb_watch: wandb_run_id: wandb_log_model: From 79a8f52181110f7f0646e80ed1c88d57fe157d6a Mon Sep 17 00:00:00 2001 From: Utensil Date: Thu, 8 Jun 2023 23:48:57 +0800 Subject: [PATCH 7/7] Trim trailing whitespace --- examples/falcon/config-7b-qlora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/falcon/config-7b-qlora.yml b/examples/falcon/config-7b-qlora.yml index 3e24d5567..6168ff2d5 100644 --- a/examples/falcon/config-7b-qlora.yml +++ b/examples/falcon/config-7b-qlora.yml @@ -2,7 +2,7 @@ # 40b: tiiuae/falcon-40b base_model: tiiuae/falcon-7b base_model_config: tiiuae/falcon-7b -# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main +# required by falcon custom model code: https://huggingface.co/tiiuae/falcon-7b/tree/main trust_remote_code: true model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer