Merge pull request #105 from viktoriussuwandi/viktoriussuwandi-patch
Viktoriussuwandi patch
This commit is contained in:
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: pythia-1.4b-lora
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-alpaca
|
||||
batch_size: 32
|
||||
micro_batch_size: 4
|
||||
|
||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project:
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-llama-alpaca
|
||||
batch_size: 32
|
||||
micro_batch_size: 16
|
||||
|
||||
@@ -23,7 +23,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
||||
wandb_project: gpt4all-neox-20b
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./gpt4all-neox-20b
|
||||
batch_size: 48
|
||||
micro_batch_size: 4
|
||||
|
||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project:
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./llama-13b-sharegpt
|
||||
batch_size: 64
|
||||
micro_batch_size: 2
|
||||
|
||||
@@ -27,7 +27,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: llama-65b-lora
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-llama-alpaca
|
||||
batch_size: 128
|
||||
micro_batch_size: 16
|
||||
|
||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project:
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-test
|
||||
batch_size: 8
|
||||
micro_batch_size: 2
|
||||
|
||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: llama-7b-lora
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-llama-alpaca
|
||||
batch_size: 128
|
||||
micro_batch_size: 16
|
||||
|
||||
@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: jeopardy-bot-7b
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./jeopardy-bot-7b
|
||||
batch_size: 4
|
||||
micro_batch_size: 1
|
||||
|
||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
||||
wandb_project: pythia-1.4b-lora
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-alpaca
|
||||
batch_size: 48
|
||||
micro_batch_size: 4
|
||||
|
||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project:
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-test
|
||||
batch_size: 4
|
||||
micro_batch_size: 1
|
||||
|
||||
@@ -49,7 +49,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project:
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
# where to save the finsihed model to
|
||||
output_dir: ./completed-model
|
||||
# training hyperparameters
|
||||
|
||||
@@ -20,7 +20,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: stable-alpaca-3b
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./stable-alpaca-3b
|
||||
batch_size: 2
|
||||
micro_batch_size: 1
|
||||
|
||||
@@ -28,7 +28,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project:
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./lora-reflect
|
||||
batch_size: 8
|
||||
micro_batch_size: 2
|
||||
|
||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: llama-7b-lora-int4
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./llama-7b-lora-int4
|
||||
batch_size: 1
|
||||
micro_batch_size: 1
|
||||
|
||||
@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: mpt-alpaca-7b
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./mpt-alpaca-7b
|
||||
batch_size: 1
|
||||
micro_batch_size: 1
|
||||
|
||||
@@ -23,7 +23,7 @@ lora_fan_in_fan_out: false
|
||||
wandb_project: redpajama-alpaca-3b
|
||||
wandb_watch:
|
||||
wandb_run_id:
|
||||
wandb_log_model: checkpoint
|
||||
wandb_log_model:
|
||||
output_dir: ./redpajama-alpaca-3b
|
||||
batch_size: 4
|
||||
micro_batch_size: 1
|
||||
|
||||
Reference in New Issue
Block a user