diff --git a/examples/gemma/qlora.yml b/examples/gemma/qlora.yml new file mode 100644 index 000000000..abbd2f034 --- /dev/null +++ b/examples/gemma/qlora.yml @@ -0,0 +1,65 @@ +# use google/gemma-7b if you have access +base_model: mhenrichsen/gemma-7b +model_type: AutoModelForCausalLM +tokenizer_type: AutoTokenizer + +load_in_8bit: false +load_in_4bit: true +strict: false + +# huggingface repo +datasets: + - path: mhenrichsen/alpaca_2k_test + type: alpaca +val_set_size: 0.1 +output_dir: ./out + +adapter: qlora +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true + +sequence_len: 4096 +sample_packing: false +pad_to_sequence_len: false + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + + +gradient_accumulation_steps: 3 +micro_batch_size: 2 +num_epochs: 4 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +train_on_inputs: false +group_by_length: false +bf16: auto +fp16: +tf32: false + +gradient_checkpointing: true +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: true + +warmup_ratio: 0.1 +evals_per_epoch: 4 +eval_table_size: +eval_max_new_tokens: 128 +saves_per_epoch: 1 +debug: +deepspeed: +weight_decay: 0.0 +fsdp: +fsdp_config: +special_tokens: