From c75f91674566594c1ef68a922368eeeaac3bdf07 Mon Sep 17 00:00:00 2001
From: Tim Dolan <40906019+tdolan21@users.noreply.github.com>
Date: Tue, 2 Jan 2024 20:00:37 -0500
Subject: [PATCH] added tiny llama examples for lora and qlora (#1027)
* added tiny llama examples for lora and qlora
* corrected yml files and removed tiny-llama.yml from llama-2 example
---
examples/tiny-llama/README.md | 17 +++++
.../tiny-llama.yml => tiny-llama/lora.yml} | 9 +--
examples/tiny-llama/qlora.yml | 67 +++++++++++++++++++
3 files changed, 87 insertions(+), 6 deletions(-)
create mode 100644 examples/tiny-llama/README.md
rename examples/{llama-2/tiny-llama.yml => tiny-llama/lora.yml} (87%)
create mode 100644 examples/tiny-llama/qlora.yml
diff --git a/examples/tiny-llama/README.md b/examples/tiny-llama/README.md
new file mode 100644
index 000000000..467c06ec8
--- /dev/null
+++ b/examples/tiny-llama/README.md
@@ -0,0 +1,17 @@
+# Overview
+
+This is a simple example of how to finetune TinyLlama1.1B using either lora or qlora:
+
+LoRa:
+
+```
+accelerate launch -m axolotl.cli.train examples/tiny-llama/lora.yml
+```
+
+qLoRa:
+
+```
+accelerate launch -m axolotl.cli.train examples/tiny-llama/qlora.yml
+```
+
+Both take about 10 minutes to complete on a 4090.
diff --git a/examples/llama-2/tiny-llama.yml b/examples/tiny-llama/lora.yml
similarity index 87%
rename from examples/llama-2/tiny-llama.yml
rename to examples/tiny-llama/lora.yml
index c72db4e5b..d72ce8eb4 100644
--- a/examples/llama-2/tiny-llama.yml
+++ b/examples/tiny-llama/lora.yml
@@ -1,5 +1,4 @@
-base_model: PY007/TinyLlama-1.1B-intermediate-step-715k-1.5T
-
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true
@@ -17,6 +16,7 @@ output_dir: ./lora-out
sequence_len: 4096
sample_packing: true
+pad_to_sequence_len: true
adapter: lora
lora_model_dir:
@@ -55,7 +55,6 @@ flash_attention: true
warmup_steps: 10
evals_per_epoch: 4
-eval_table_size:
saves_per_epoch: 1
debug:
deepspeed:
@@ -63,6 +62,4 @@ weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
- bos_token: ""
- eos_token: ""
- unk_token: ""
+
diff --git a/examples/tiny-llama/qlora.yml b/examples/tiny-llama/qlora.yml
new file mode 100644
index 000000000..02af851ad
--- /dev/null
+++ b/examples/tiny-llama/qlora.yml
@@ -0,0 +1,67 @@
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+model_type: LlamaForCausalLM
+tokenizer_type: LlamaTokenizer
+is_llama_derived_model: true
+
+load_in_8bit: false
+load_in_4bit: true
+strict: false
+
+datasets:
+ - path: mhenrichsen/alpaca_2k_test
+ type: alpaca
+dataset_prepared_path:
+val_set_size: 0.05
+output_dir: ./qlora-out
+
+adapter: qlora
+lora_model_dir:
+
+sequence_len: 4096
+sample_packing: true
+pad_to_sequence_len: true
+
+lora_r: 32
+lora_alpha: 16
+lora_dropout: 0.05
+lora_target_modules:
+lora_target_linear: true
+lora_fan_in_fan_out:
+
+wandb_project:
+wandb_entity:
+wandb_watch:
+wandb_name:
+wandb_log_model:
+
+gradient_accumulation_steps: 4
+micro_batch_size: 2
+num_epochs: 4
+optimizer: paged_adamw_32bit
+lr_scheduler: cosine
+learning_rate: 0.0002
+
+train_on_inputs: false
+group_by_length: false
+bf16: true
+fp16: false
+tf32: false
+
+gradient_checkpointing: true
+early_stopping_patience:
+resume_from_checkpoint:
+local_rank:
+logging_steps: 1
+xformers_attention:
+flash_attention: true
+
+warmup_steps: 10
+evals_per_epoch: 4
+saves_per_epoch: 1
+debug:
+deepspeed:
+weight_decay: 0.0
+fsdp:
+fsdp_config:
+special_tokens:
+