From c04df54b4bc41b1fb4dee2ac552cd1f4fc2e3535 Mon Sep 17 00:00:00 2001 From: Mads Henrichsen Date: Sat, 30 Dec 2023 21:36:01 +0100 Subject: [PATCH] new lr --- examples/yayi2-30b/qlora.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/yayi2-30b/qlora.yml b/examples/yayi2-30b/qlora.yml index 3d4cd0a50..4f8ebb24c 100644 --- a/examples/yayi2-30b/qlora.yml +++ b/examples/yayi2-30b/qlora.yml @@ -3,7 +3,7 @@ model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer is_mistral_derived_model: false trust_remote_code: true - +model_revision: refs/pr/5 load_in_8bit: false load_in_4bit: true @@ -19,7 +19,7 @@ output_dir: ./qlora-out adapter: qlora lora_model_dir: -sequence_len: 8192 +sequence_len: 2048 # Fits in 40gb VRAM. Can easily do 4096 in A100 80 or a A6000 sample_packing: false pad_to_sequence_len: true @@ -37,7 +37,7 @@ lora_target_modules: - k_proj - o_proj -wandb_project: +wandb_project: yayi2 wandb_entity: wandb_watch: wandb_name: @@ -48,7 +48,7 @@ micro_batch_size: 1 num_epochs: 1 optimizer: adamw_bnb_8bit lr_scheduler: cosine -learning_rate: 0.0002 +learning_rate: 0.00002 train_on_inputs: false group_by_length: false