From 9a8e3e9c7bdb01e3730438c49b4cf5d4c4403c87 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Fri, 11 Apr 2025 20:52:23 +0700 Subject: [PATCH] Feat(examples): add deepcogito (#2516) [skip ci] * feat: add examples for deepcogito * fix: reduce num evals per epoch * fix: reduce num epochs --- .../cogito-v1-preview-llama-3B-lora.yml | 58 +++++++++++++++++++ .../cogito-v1-preview-qwen-14B-lora.yml | 58 +++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100644 examples/deepcogito/cogito-v1-preview-llama-3B-lora.yml create mode 100644 examples/deepcogito/cogito-v1-preview-qwen-14B-lora.yml diff --git a/examples/deepcogito/cogito-v1-preview-llama-3B-lora.yml b/examples/deepcogito/cogito-v1-preview-llama-3B-lora.yml new file mode 100644 index 000000000..2c0495ced --- /dev/null +++ b/examples/deepcogito/cogito-v1-preview-llama-3B-lora.yml @@ -0,0 +1,58 @@ +base_model: deepcogito/cogito-v1-preview-llama-3B +# Automatically upload checkpoint and final model to HF +# hub_model_id: username/custom_model_name + +load_in_8bit: true +load_in_4bit: false +strict: false + +datasets: + - path: fozziethebeat/alpaca_messages_2k_test + type: chat_template + field_messages: messages + message_property_mappings: + role: role + content: content + +dataset_prepared_path: +val_set_size: 0.05 +output_dir: ./outputs/lora-out + +sequence_len: 4096 +sample_packing: true +eval_sample_packing: false +pad_to_sequence_len: true + +adapter: lora +lora_model_dir: +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + +gradient_accumulation_steps: 2 +micro_batch_size: 2 +num_epochs: 1 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +bf16: auto +tf32: true + +gradient_checkpointing: true +resume_from_checkpoint: +logging_steps: 1 +flash_attention: true + +warmup_steps: 10 +evals_per_epoch: 1 +saves_per_epoch: 1 +weight_decay: 0.0 +special_tokens: diff --git a/examples/deepcogito/cogito-v1-preview-qwen-14B-lora.yml b/examples/deepcogito/cogito-v1-preview-qwen-14B-lora.yml new file mode 100644 index 000000000..de9c956e0 --- /dev/null +++ b/examples/deepcogito/cogito-v1-preview-qwen-14B-lora.yml @@ -0,0 +1,58 @@ +base_model: deepcogito/cogito-v1-preview-qwen-14B +# Automatically upload checkpoint and final model to HF +# hub_model_id: username/custom_model_name + +load_in_8bit: true +load_in_4bit: false +strict: false + +datasets: + - path: fozziethebeat/alpaca_messages_2k_test + type: chat_template + field_messages: messages + message_property_mappings: + role: role + content: content + +dataset_prepared_path: +val_set_size: 0.05 +output_dir: ./outputs/lora-out + +sequence_len: 4096 +sample_packing: true +eval_sample_packing: false +pad_to_sequence_len: true + +adapter: lora +lora_model_dir: +lora_r: 32 +lora_alpha: 16 +lora_dropout: 0.05 +lora_target_linear: true + +wandb_project: +wandb_entity: +wandb_watch: +wandb_name: +wandb_log_model: + +gradient_accumulation_steps: 2 +micro_batch_size: 2 +num_epochs: 1 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0002 + +bf16: auto +tf32: true + +gradient_checkpointing: true +resume_from_checkpoint: +logging_steps: 1 +flash_attention: true + +warmup_steps: 10 +evals_per_epoch: 1 +saves_per_epoch: 1 +weight_decay: 0.0 +special_tokens: