new lr
This commit is contained in:
@@ -3,7 +3,7 @@ model_type: AutoModelForCausalLM
|
||||
tokenizer_type: AutoTokenizer
|
||||
is_mistral_derived_model: false
|
||||
trust_remote_code: true
|
||||
|
||||
model_revision: refs/pr/5
|
||||
|
||||
load_in_8bit: false
|
||||
load_in_4bit: true
|
||||
@@ -19,7 +19,7 @@ output_dir: ./qlora-out
|
||||
adapter: qlora
|
||||
lora_model_dir:
|
||||
|
||||
sequence_len: 8192
|
||||
sequence_len: 2048 # Fits in 40gb VRAM. Can easily do 4096 in A100 80 or a A6000
|
||||
sample_packing: false
|
||||
pad_to_sequence_len: true
|
||||
|
||||
@@ -37,7 +37,7 @@ lora_target_modules:
|
||||
- k_proj
|
||||
- o_proj
|
||||
|
||||
wandb_project:
|
||||
wandb_project: yayi2
|
||||
wandb_entity:
|
||||
wandb_watch:
|
||||
wandb_name:
|
||||
@@ -48,7 +48,7 @@ micro_batch_size: 1
|
||||
num_epochs: 1
|
||||
optimizer: adamw_bnb_8bit
|
||||
lr_scheduler: cosine
|
||||
learning_rate: 0.0002
|
||||
learning_rate: 0.00002
|
||||
|
||||
train_on_inputs: false
|
||||
group_by_length: false
|
||||
|
||||
Reference in New Issue
Block a user