base_model: zai-org/GLM-4.7-Flash plugins: - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin load_in_8bit: true quantize_moe_experts: true datasets: - path: fozziethebeat/alpaca_messages_2k_test type: chat_template dataset_prepared_path: last_run_prepared val_set_size: 0.1 output_dir: ./outputs/glm4.7-flash-lora-8bit-fsdp-out adapter: lora lora_model_dir: sequence_len: 2048 sample_packing: true lora_r: 32 lora_alpha: 16 lora_dropout: 0 lora_target_modules: - q_proj - v_proj - k_proj - o_proj # Uncomment to also target MoE expert weights: # lora_target_parameters: # - mlp.experts.gate_up_proj # - mlp.experts.down_proj # LoRA kernels incompatible with DSA attention lora_mlp_kernel: false lora_qkv_kernel: false lora_o_kernel: false wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 1 optimizer: adamw_torch_8bit lr_scheduler: cosine learning_rate: 0.0002 bf16: auto tf32: false resume_from_checkpoint: logging_steps: 1 flash_attention: true warmup_ratio: 0.1 evals_per_epoch: 1 saves_per_epoch: 1 fsdp_config: fsdp_version: 2 offload_params: false cpu_ram_efficient_loading: false auto_wrap_policy: TRANSFORMER_BASED_WRAP transformer_layer_cls_to_wrap: Glm4MoeLiteDecoderLayer state_dict_type: FULL_STATE_DICT sharding_strategy: FULL_SHARD reshard_after_forward: true activation_checkpointing: true