Experimental ReLoRA (+qlora) implementation

This commit is contained in:
Charles Goddard
2023-07-24 09:53:27 -07:00
committed by Wing Lian
parent 918f1b0dfb
commit b57238ecec
6 changed files with 375 additions and 1 deletions

View File

@@ -0,0 +1,65 @@
base_model: /home/charles/.cache/huggingface/hub/models--openlm-research--open_llama_3b/snapshots/8fcddba529aef0eda7293cc9a4171a3994648d2e/
base_model_config: openlm-research/open_llama_3b
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: false
load_in_4bit: false
strict: false
push_dataset_to_hub:
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca
prompt_format: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.005
adapter: lora
lora_model_dir:
sequence_len: 512
max_packed_sequence_len: 512
lora_r: 8
lora_alpha: 16
lora_dropout: 0.001
lora_target_modules:
lora_target_linear: true
lora_fan_in_fan_out:
relora_steps: 20
relora_warmup_steps: 10
wandb_project: relora
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./lora-out
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 3
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: false
fp16: true
tf32: false
gradient_checkpointing: false
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
sdp_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 20
save_steps: 50
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
bos_token: "<s>"
eos_token: "</s>"
unk_token: "<unk>"