diff --git a/examples/gemma3/270m-qlora.yml b/examples/gemma3/270m-qlora.yml new file mode 100644 index 000000000..8744fad26 --- /dev/null +++ b/examples/gemma3/270m-qlora.yml @@ -0,0 +1,68 @@ +base_model: google/gemma-3-270m-it +# optionally might have model_type or tokenizer_type +model_type: AutoModelForCausalLM +tokenizer_type: AutoTokenizer +# Automatically upload checkpoint and final model to HF +# hub_model_id: username/custom_model_name + +# gemma3 doesn't seem to play nice with ddp +ddp_find_unused_parameters: true + +load_in_8bit: false +load_in_4bit: true + +# huggingface repo +chat_template: gemma3 +eot_tokens: + - +datasets: + - path: cgato/SlimOrcaDedupCleaned + type: chat_template + field_messages: conversations + message_property_mappings: + role: from + content: value + +val_set_size: 0.0 +output_dir: ./outputs/out + +adapter: qlora +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true + +sequence_len: 2048 +sample_packing: true +eval_sample_packing: false + + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + + +gradient_accumulation_steps: 4 +micro_batch_size: 1 +num_epochs: 1 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +bf16: auto +tf32: true + +gradient_checkpointing: true +gradient_checkpointing_kwargs: + use_reentrant: false +resume_from_checkpoint: +logging_steps: 1 +flash_attention: true + +warmup_ratio: 0.1 +evals_per_epoch: +saves_per_epoch: 1 +weight_decay: 0.0 +special_tokens: