diff --git a/examples/4bit-lora-7b/README.md b/examples/4bit-lora-7b/README.md
new file mode 100644
index 000000000..eefe98d3f
--- /dev/null
+++ b/examples/4bit-lora-7b/README.md
@@ -0,0 +1,8 @@
+# LLaMa 7B using LoRA
+
+This is a good place to start for beginners. This will run on an NVIDIA RTX4090 with no other changes needed.
+
+```shell
+accelerate launch scripts/finetune.py examples/4bit-lora-7b/config.yml
+
+```
diff --git a/examples/4bit-lora-7b/config.yml b/examples/4bit-lora-7b/config.yml
new file mode 100644
index 000000000..f027880f6
--- /dev/null
+++ b/examples/4bit-lora-7b/config.yml
@@ -0,0 +1,61 @@
+base_model: Neko-Institute-of-Science/LLaMA-7B-4bit-128g
+base_model_config: Neko-Institute-of-Science/LLaMA-7B-4bit-128g
+model_type: LlamaForCausalLM
+tokenizer_type: LlamaTokenizer
+trust_remote_code:
+load_in_8bit: true
+load_4bit: true
+datasets:
+ - path: vicgalle/alpaca-gpt4
+ type: alpaca
+dataset_prepared_path: last_run_prepared
+val_set_size: 0.02
+adapter:
+lora_model_dir:
+sequence_len: 2048
+max_packed_sequence_len:
+lora_r: 8
+lora_alpha: 16
+lora_dropout: 0.05
+lora_target_modules:
+ - q_proj
+ - v_proj
+lora_fan_in_fan_out: false
+wandb_project: llama-7b-lora-int4
+wandb_watch:
+wandb_run_id:
+wandb_log_model: checkpoint
+output_dir: ./llama-7b-lora-int4
+batch_size: 1
+micro_batch_size: 1
+num_epochs: 3
+optimizer: adamw_bnb_8bit
+torchdistx_path:
+lr_scheduler: cosine
+learning_rate: 0.0000002
+train_on_inputs: false
+group_by_length: false
+bf16: true
+tf32: true
+early_stopping_patience:
+resume_from_checkpoint:
+local_rank:
+logging_steps: 5
+xformers_attention:
+flash_attention:
+gradient_checkpointing: true
+gptq_groupsize: 128
+gptq_model_v1: false
+warmup_steps: 20
+eval_steps: 110
+save_steps: 660
+debug:
+deepspeed:
+weight_decay: 0.0001
+fsdp:
+fsdp_config:
+special_tokens:
+ pad_token: "[PAD]"
+ bos_token: ""
+ eos_token: ""
+ unk_token: ""