69 lines
1.7 KiB
YAML
69 lines
1.7 KiB
YAML
# the original mxfp4 quantized model is not supported with FSDP cpu_ram_efficient_loading
|
|
# FSDP cpu_ram_efficient_loading is used to reduce the initial CPU memory usage when loading the model
|
|
base_model: axolotl-ai-co/gpt-oss-120b-dequantized
|
|
|
|
use_kernels: false
|
|
|
|
dp_shard_size: 16 # requires 2x8xH100 nodes
|
|
|
|
plugins:
|
|
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
|
|
|
|
experimental_skip_move_to_device: true # prevent OOM by NOT putting model to GPU before sharding
|
|
|
|
datasets:
|
|
- path: HuggingFaceH4/Multilingual-Thinking
|
|
type: chat_template
|
|
field_thinking: thinking
|
|
template_thinking_key: thinking
|
|
|
|
dataset_prepared_path: last_run_prepared
|
|
val_set_size: 0
|
|
output_dir: ./outputs/gpt-oss-out/
|
|
save_total_limit: 2 # the 120B model can use up to 720GB of disk space per checkpoint, so let's only keep the last 2
|
|
|
|
sequence_len: 4096
|
|
sample_packing: true
|
|
pad_to_sequence_len: true
|
|
|
|
wandb_project:
|
|
wandb_entity:
|
|
wandb_watch:
|
|
wandb_name:
|
|
wandb_log_model:
|
|
|
|
gradient_accumulation_steps: 2
|
|
micro_batch_size: 1
|
|
num_epochs: 1
|
|
|
|
optimizer: adamw_torch_fused # 8bit optimizers do not work with FSDP2 offload
|
|
lr_scheduler: constant_with_warmup
|
|
learning_rate: 2e-5
|
|
|
|
bf16: true
|
|
tf32: true
|
|
|
|
flash_attention: true
|
|
attn_implementation: kernels-community/vllm-flash-attn3 # this is not needed if using flash_attn >= 2.8.3
|
|
|
|
gradient_checkpointing: true
|
|
activation_offloading: true
|
|
|
|
logging_steps: 1
|
|
saves_per_epoch: 1
|
|
|
|
warmup_ratio: 0.03
|
|
|
|
special_tokens:
|
|
eot_tokens:
|
|
- "<|end|>"
|
|
|
|
fsdp_version: 2
|
|
fsdp_config:
|
|
offload_params: true
|
|
state_dict_type: SHARDED_STATE_DICT
|
|
auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
|
transformer_layer_cls_to_wrap: GptOssDecoderLayer
|
|
reshard_after_forward: true
|
|
cpu_ram_efficient_loading: true
|