diff --git a/examples/openllama-3b/relora.yml b/examples/openllama-3b/relora.yml
deleted file mode 100644
index 2d1e5a971..000000000
--- a/examples/openllama-3b/relora.yml
+++ /dev/null
@@ -1,65 +0,0 @@
-base_model: /home/charles/.cache/huggingface/hub/models--openlm-research--open_llama_3b/snapshots/8fcddba529aef0eda7293cc9a4171a3994648d2e/
-base_model_config: openlm-research/open_llama_3b
-model_type: LlamaForCausalLM
-tokenizer_type: LlamaTokenizer
-load_in_8bit: false
-load_in_4bit: false
-strict: false
-push_dataset_to_hub:
-datasets:
- - path: teknium/GPT4-LLM-Cleaned
- type: alpaca
-prompt_format: alpaca
-dataset_prepared_path: last_run_prepared
-val_set_size: 0.005
-adapter: lora
-lora_model_dir:
-sequence_len: 512
-max_packed_sequence_len: 512
-lora_r: 8
-lora_alpha: 16
-lora_dropout: 0.001
-lora_target_modules:
-lora_target_linear: true
-lora_fan_in_fan_out:
-relora_steps: 20
-relora_warmup_steps: 10
-wandb_project: relora
-wandb_watch:
-wandb_run_id:
-wandb_log_model:
-output_dir: ./lora-out
-gradient_accumulation_steps: 4
-micro_batch_size: 2
-num_epochs: 3
-optimizer: adamw_bnb_8bit
-torchdistx_path:
-lr_scheduler: cosine
-learning_rate: 0.0002
-train_on_inputs: false
-group_by_length: false
-bf16: false
-fp16: true
-tf32: false
-gradient_checkpointing: false
-early_stopping_patience:
-resume_from_checkpoint:
-local_rank:
-logging_steps: 1
-xformers_attention:
-sdp_attention: true
-flash_attention:
-gptq_groupsize:
-gptq_model_v1:
-warmup_steps: 10
-eval_steps: 20
-save_steps: 50
-debug:
-deepspeed:
-weight_decay: 0.0
-fsdp:
-fsdp_config:
-special_tokens:
- bos_token: ""
- eos_token: ""
- unk_token: ""