add llama 7b config and fiz lora_fan_in_fan_out for llama (copy pasta bug)
This commit is contained in:
@@ -22,7 +22,7 @@ lora_dropout: 0.05
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- w_proj
|
||||
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
||||
lora_fan_in_fan_out: false
|
||||
wandb_project: llama-65b-lora
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
|
||||
41
configs/llama_7B_alpaca.yml
Normal file
41
configs/llama_7B_alpaca.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
base_model: huggyllama/llama-7b
|
||||
model_type: LlamaForCausalLM
|
||||
tokenizer_type: LlamaTokenizer
|
||||
load_in_8bit: true
|
||||
datasets:
|
||||
- path: data/alpaca_data_gpt4.jsonl
|
||||
type: alpaca
|
||||
- path: data/vicuna_cleaned.jsonl
|
||||
type: sharegpt
|
||||
- path: data/gpt4-instruct-similarity-0.6-dataset.jsonl
|
||||
type: gpteacher
|
||||
- path: data/roleplay-similarity_0.6-instruct-dataset.jsonl
|
||||
type: gpteacher
|
||||
dataset_prepared_path: data/last_run
|
||||
val_set_size: 0.04
|
||||
adapter: lora
|
||||
lora_model_dir:
|
||||
sequence_len: 2048
|
||||
lora_r: 8
|
||||
lora_alpha: 16
|
||||
lora_dropout: 0.05
|
||||
lora_target_modules:
|
||||
- q_proj
|
||||
- w_proj
|
||||
lora_fan_in_fan_out: false
|
||||
wandb_project: llama-7b-lora
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
output_dir: ./lora-llama-alpaca
|
||||
batch_size: 128
|
||||
micro_batch_size: 16
|
||||
num_epochs: 5
|
||||
learning_rate: 0.00003
|
||||
train_on_inputs: false
|
||||
group_by_length: false
|
||||
bf16: true
|
||||
tf32: true
|
||||
resume_from_checkpoint:
|
||||
local_rank:
|
||||
deepspeed:
|
||||
Reference in New Issue
Block a user