base_model: google/gemma-2-2b # optionally might have model_type or tokenizer_type model_type: AutoModelForSequenceClassification num_labels: 1 tokenizer_type: AutoTokenizer # Automatically upload checkpoint and final model to HF # hub_model_id: username/custom_model_name load_in_8bit: false load_in_4bit: false strict: false reward_model: true chat_template: gemma datasets: - path: argilla/distilabel-intel-orca-dpo-pairs type: bradley_terry.chat_template val_set_size: 0.0 output_dir: ./outputs/out remove_unused_columns: false sequence_len: 2048 sample_packing: false eval_sample_packing: false pad_to_sequence_len: true wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 4 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 train_on_inputs: false group_by_length: false bf16: true fp16: tf32: true gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_ratio: 0.1 evals_per_epoch: eval_table_size: eval_max_new_tokens: 128 saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: special_tokens: