* various bugfixes use latest tinyllama release check if val_set_size is empty first update sdp and xformers llama patches for updated upstream transformers fix system prompt when no input calculate total and total supervised tokens even when not sample packing * add fix for when eval size is estimated to be too small * should be len 1 for dataset length * add catchall kwargs
69 lines
1.1 KiB
YAML
69 lines
1.1 KiB
YAML
base_model: PY007/TinyLlama-1.1B-intermediate-step-715k-1.5T
|
|
|
|
model_type: LlamaForCausalLM
|
|
tokenizer_type: LlamaTokenizer
|
|
is_llama_derived_model: true
|
|
|
|
load_in_8bit: true
|
|
load_in_4bit: false
|
|
strict: false
|
|
|
|
datasets:
|
|
- path: mhenrichsen/alpaca_2k_test
|
|
type: alpaca
|
|
dataset_prepared_path:
|
|
val_set_size: 0.05
|
|
output_dir: ./lora-out
|
|
|
|
sequence_len: 4096
|
|
sample_packing: true
|
|
|
|
adapter: lora
|
|
lora_model_dir:
|
|
lora_r: 32
|
|
lora_alpha: 16
|
|
lora_dropout: 0.05
|
|
lora_target_linear: true
|
|
lora_fan_in_fan_out:
|
|
|
|
wandb_project:
|
|
wandb_entity:
|
|
wandb_watch:
|
|
wandb_run_id:
|
|
wandb_log_model:
|
|
|
|
gradient_accumulation_steps: 4
|
|
micro_batch_size: 2
|
|
num_epochs: 4
|
|
optimizer: adamw_bnb_8bit
|
|
lr_scheduler: cosine
|
|
learning_rate: 0.0002
|
|
|
|
train_on_inputs: false
|
|
group_by_length: false
|
|
bf16: true
|
|
fp16: false
|
|
tf32: false
|
|
|
|
gradient_checkpointing: true
|
|
early_stopping_patience:
|
|
resume_from_checkpoint:
|
|
local_rank:
|
|
logging_steps: 1
|
|
xformers_attention:
|
|
flash_attention: true
|
|
|
|
warmup_steps: 10
|
|
eval_steps: 0.05
|
|
eval_table_size:
|
|
save_steps:
|
|
debug:
|
|
deepspeed:
|
|
weight_decay: 0.0
|
|
fsdp:
|
|
fsdp_config:
|
|
special_tokens:
|
|
bos_token: "<s>"
|
|
eos_token: "</s>"
|
|
unk_token: "<unk>"
|