Merge pull request #105 from viktoriussuwandi/viktoriussuwandi-patch

Viktoriussuwandi patch
This commit is contained in:
Wing Lian
2023-05-30 15:05:23 -04:00
committed by GitHub
16 changed files with 16 additions and 16 deletions

View File

@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project: pythia-1.4b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-alpaca
batch_size: 32
micro_batch_size: 4

View File

@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-llama-alpaca
batch_size: 32
micro_batch_size: 16

View File

@@ -23,7 +23,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project: gpt4all-neox-20b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./gpt4all-neox-20b
batch_size: 48
micro_batch_size: 4

View File

@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./llama-13b-sharegpt
batch_size: 64
micro_batch_size: 2

View File

@@ -27,7 +27,7 @@ lora_fan_in_fan_out: false
wandb_project: llama-65b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-llama-alpaca
batch_size: 128
micro_batch_size: 16

View File

@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-test
batch_size: 8
micro_batch_size: 2

View File

@@ -26,7 +26,7 @@ lora_fan_in_fan_out: false
wandb_project: llama-7b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-llama-alpaca
batch_size: 128
micro_batch_size: 16

View File

@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
wandb_project: jeopardy-bot-7b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./jeopardy-bot-7b
batch_size: 4
micro_batch_size: 1

View File

@@ -26,7 +26,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project: pythia-1.4b-lora
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-alpaca
batch_size: 48
micro_batch_size: 4

View File

@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-test
batch_size: 4
micro_batch_size: 1

View File

@@ -49,7 +49,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
# where to save the finsihed model to
output_dir: ./completed-model
# training hyperparameters

View File

@@ -20,7 +20,7 @@ lora_fan_in_fan_out: false
wandb_project: stable-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./stable-alpaca-3b
batch_size: 2
micro_batch_size: 1

View File

@@ -28,7 +28,7 @@ lora_fan_in_fan_out: false
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./lora-reflect
batch_size: 8
micro_batch_size: 2

View File

@@ -24,7 +24,7 @@ lora_fan_in_fan_out: false
wandb_project: llama-7b-lora-int4
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./llama-7b-lora-int4
batch_size: 1
micro_batch_size: 1

View File

@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
wandb_project: mpt-alpaca-7b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./mpt-alpaca-7b
batch_size: 1
micro_batch_size: 1

View File

@@ -23,7 +23,7 @@ lora_fan_in_fan_out: false
wandb_project: redpajama-alpaca-3b
wandb_watch:
wandb_run_id:
wandb_log_model: checkpoint
wandb_log_model:
output_dir: ./redpajama-alpaca-3b
batch_size: 4
micro_batch_size: 1