* feat: add CCE for gemma3 and cohere1/2 * fix: change from relative import to absolute * feat: add multipack for cohere&cohere2 * chore: improve comments * fix: add gemma3_text * feat: add cohere2 example * fix: cohere forward * fix: patch for cohere2 * feat: add command r v01 qlora sample * chore: lint * feat: upgrade gemma3 and gemma2 patch to use logits_to_keep * chore: lint * fix: add deprecate_kwarg decorator * fix: add cce for gemma3 conditionalgeneration * fix: gemma3 patch to defer logits calculation * fix: patch gemma3 if given as model * fix: remove not working config * fix: update comments to clarify changes * feat(doc): add supported models to readme * fix: address difference in our cohere patch * feat: add mistral3 * feat: add gemma * feat(doc): update README to include gemma and mistral3 in supported models * fix: gemma patch * fix: import * fix: gemma patch to be standalone * fix: gemma3 warn about not support final_logit_softcapping * feat: add mllama CCE * chore: add abbireviation to doc * fix: remove unneeded gemma3 eager warning * fix: save processor if available * fix: enable save processor on merge * fix: wrong env meaning
72 lines
1.2 KiB
YAML
72 lines
1.2 KiB
YAML
base_model: CohereForAI/c4ai-command-r7b-12-2024
|
|
model_type: AutoModelForCausalLM
|
|
tokenizer_type: AutoTokenizer
|
|
|
|
load_in_8bit: false
|
|
load_in_4bit: true
|
|
strict: false
|
|
|
|
# huggingface repo
|
|
chat_template: cohere
|
|
datasets:
|
|
- path: cgato/SlimOrcaDedupCleaned
|
|
type: chat_template
|
|
field_messages: conversations
|
|
message_property_mappings:
|
|
role: from
|
|
content: value
|
|
|
|
val_set_size: 0.0
|
|
output_dir: ./outputs/out
|
|
|
|
adapter: qlora
|
|
lora_r: 32
|
|
lora_alpha: 16
|
|
lora_dropout: 0.05
|
|
lora_target_linear: true
|
|
|
|
sequence_len: 2048
|
|
sample_packing: true
|
|
eval_sample_packing: false
|
|
pad_to_sequence_len: true
|
|
|
|
wandb_project:
|
|
wandb_entity:
|
|
wandb_watch:
|
|
wandb_name:
|
|
wandb_log_model:
|
|
|
|
|
|
gradient_accumulation_steps: 4
|
|
micro_batch_size: 1
|
|
num_epochs: 4
|
|
optimizer: adamw_bnb_8bit
|
|
lr_scheduler: cosine
|
|
learning_rate: 0.0002
|
|
|
|
train_on_inputs: false
|
|
group_by_length: false
|
|
bf16: auto
|
|
fp16:
|
|
tf32: true
|
|
|
|
gradient_checkpointing: true
|
|
early_stopping_patience:
|
|
resume_from_checkpoint:
|
|
local_rank:
|
|
logging_steps: 1
|
|
xformers_attention:
|
|
flash_attention: true
|
|
|
|
warmup_ratio: 0.1
|
|
evals_per_epoch:
|
|
eval_table_size:
|
|
eval_max_new_tokens: 128
|
|
saves_per_epoch: 1
|
|
debug:
|
|
deepspeed:
|
|
weight_decay: 0.0
|
|
fsdp:
|
|
fsdp_config:
|
|
special_tokens:
|