simplify the example configs to be more minimal and less daunting (#2486) [skip ci]

* simplify the example configs to be more minimal and less daunting

* drop empty s2_attention from example yamls
This commit is contained in:
Wing Lian
2025-04-04 13:47:26 -04:00
committed by GitHub
parent dd66fb163c
commit 9f824ef76a
101 changed files with 14 additions and 1140 deletions

View File

@@ -4,9 +4,6 @@ model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
load_in_8bit: false
load_in_4bit: false
strict: false
datasets:
@@ -27,7 +24,6 @@ lora_r:
lora_alpha:
lora_dropout:
lora_target_linear:
lora_fan_in_fan_out:
wandb_project:
wandb_entity:
@@ -45,30 +41,20 @@ max_grad_norm: 1.0
lr_scheduler: cosine
learning_rate: 0.000003
train_on_inputs: false
group_by_length: false
bf16: auto
fp16:
tf32: true
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: True
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.1
fsdp:
fsdp_config:
resize_token_embeddings_to_32x: true
special_tokens:
pad_token: "<|endoftext|>"