Compare commits
23 Commits
tinyllama-
...
unsloth_mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f9b172c47 | ||
|
|
8671ed5a0c | ||
|
|
538c004080 | ||
|
|
add3b139ed | ||
|
|
a581e9f8f6 | ||
|
|
992e742cdc | ||
|
|
a1da39cd48 | ||
|
|
58ec8b1113 | ||
|
|
476a205cea | ||
|
|
3e3229e2d9 | ||
|
|
1d21aa6b0a | ||
|
|
71b7ea3c05 | ||
|
|
a48dbf6561 | ||
|
|
6a4562ac08 | ||
|
|
1115c501b8 | ||
|
|
7ee3c4cacb | ||
|
|
fb12895a17 | ||
|
|
9fc29e082b | ||
|
|
575a082aae | ||
|
|
ddf815022a | ||
|
|
9bf854e59c | ||
|
|
797f3dd1de | ||
|
|
0de1457189 |
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
@@ -71,6 +71,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
pip3 install --extra-index-url https://download.pytorch.org/whl/cu118 -U torch==2.0.1
|
||||||
pip3 uninstall -y transformers accelerate
|
pip3 uninstall -y transformers accelerate
|
||||||
pip3 install -U -e .[flash-attn]
|
pip3 install -U -e .[flash-attn]
|
||||||
pip3 install -r requirements-tests.txt
|
pip3 install -r requirements-tests.txt
|
||||||
|
|||||||
28
README.md
28
README.md
@@ -77,6 +77,7 @@ Features:
|
|||||||
| XGen | ✅ | ❓ | ✅ | ❓ | ❓ | ❓ | ✅ |
|
| XGen | ✅ | ❓ | ✅ | ❓ | ❓ | ❓ | ✅ |
|
||||||
| phi | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
| phi | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
||||||
| RWKV | ✅ | ❓ | ❓ | ❓ | ❓ | ❓ | ❓ |
|
| RWKV | ✅ | ❓ | ❓ | ❓ | ❓ | ❓ | ❓ |
|
||||||
|
| Qwen | ✅ | ✅ | ✅ | ❓ | ❓ | ❓ | ❓ |
|
||||||
|
|
||||||
|
|
||||||
## Quickstart ⚡
|
## Quickstart ⚡
|
||||||
@@ -85,14 +86,19 @@ Get started with Axolotl in just a few steps! This quickstart guide will walk yo
|
|||||||
|
|
||||||
**Requirements**: Python >=3.9 and Pytorch >=2.0.
|
**Requirements**: Python >=3.9 and Pytorch >=2.0.
|
||||||
|
|
||||||
|
`pip3 install "axolotl[flash-attn,deepspeed] @ git+https://github.com/OpenAccess-AI-Collective/axolotl"`
|
||||||
|
|
||||||
|
### For developers
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
||||||
cd axolotl
|
cd axolotl
|
||||||
|
|
||||||
pip3 install packaging
|
pip3 install packaging
|
||||||
pip3 install -e '.[flash-attn,deepspeed]'
|
pip3 install -e '.[flash-attn,deepspeed]'
|
||||||
pip3 install -U git+https://github.com/huggingface/peft.git
|
```
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
```bash
|
||||||
# finetune lora
|
# finetune lora
|
||||||
accelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml
|
accelerate launch -m axolotl.cli.train examples/openllama-3b/lora.yml
|
||||||
|
|
||||||
@@ -494,6 +500,7 @@ is_falcon_derived_model:
|
|||||||
is_llama_derived_model:
|
is_llama_derived_model:
|
||||||
# Please note that if you set this to true, `padding_side` will be set to "left" by default
|
# Please note that if you set this to true, `padding_side` will be set to "left" by default
|
||||||
is_mistral_derived_model:
|
is_mistral_derived_model:
|
||||||
|
is_qwen_derived_model:
|
||||||
|
|
||||||
# optional overrides to the base model configuration
|
# optional overrides to the base model configuration
|
||||||
model_config:
|
model_config:
|
||||||
@@ -538,6 +545,8 @@ datasets:
|
|||||||
|
|
||||||
# Optional[str] fastchat conversation type, only used with type: sharegpt
|
# Optional[str] fastchat conversation type, only used with type: sharegpt
|
||||||
conversation: # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
conversation: # Options (see Conversation 'name'): https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
||||||
|
field_human: # Optional[str]. Human key to use for conversation.
|
||||||
|
field_model: # Optional[str]. Assistant key to use for conversation.
|
||||||
|
|
||||||
# Custom user prompt
|
# Custom user prompt
|
||||||
- path: repo
|
- path: repo
|
||||||
@@ -603,6 +612,12 @@ eval_sample_packing:
|
|||||||
sample_packing_eff_est:
|
sample_packing_eff_est:
|
||||||
total_num_tokens:
|
total_num_tokens:
|
||||||
|
|
||||||
|
# Passed through to transformers when loading the model when launched without accelerate
|
||||||
|
# Use `sequential` when training w/ model parallelism to limit memory
|
||||||
|
device_map:
|
||||||
|
# Defines the max memory usage per gpu on the system. Passed through to transformers when loading the model.
|
||||||
|
max_memory:
|
||||||
|
|
||||||
# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
# If you want to use 'lora' or 'qlora' or leave blank to train all parameters in original model
|
||||||
adapter: lora
|
adapter: lora
|
||||||
# If you already have a lora model trained that you want to load, put that here.
|
# If you already have a lora model trained that you want to load, put that here.
|
||||||
@@ -650,7 +665,8 @@ wandb_mode: # "offline" to save run metadata locally and not sync to the server,
|
|||||||
wandb_project: # Your wandb project name
|
wandb_project: # Your wandb project name
|
||||||
wandb_entity: # A wandb Team name if using a Team
|
wandb_entity: # A wandb Team name if using a Team
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id: # Set the name of your wandb run
|
wandb_name: # Set the name of your wandb run
|
||||||
|
wandb_run_id: # Set the ID of your wandb run
|
||||||
wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
wandb_log_model: # "checkpoint" to log model to wandb Artifacts every `save_steps` or "end" to log only at the end of training
|
||||||
|
|
||||||
# Where to save the full-finetuned model to
|
# Where to save the full-finetuned model to
|
||||||
@@ -668,7 +684,8 @@ gradient_accumulation_steps: 1
|
|||||||
micro_batch_size: 2
|
micro_batch_size: 2
|
||||||
eval_batch_size:
|
eval_batch_size:
|
||||||
num_epochs: 4
|
num_epochs: 4
|
||||||
warmup_steps: 100
|
warmup_steps: 100 # cannot use with warmup_ratio
|
||||||
|
warmup_ratio: 0.05 # cannot use with warmup_steps
|
||||||
learning_rate: 0.00003
|
learning_rate: 0.00003
|
||||||
lr_quadratic_warmup:
|
lr_quadratic_warmup:
|
||||||
logging_steps:
|
logging_steps:
|
||||||
@@ -684,6 +701,9 @@ max_steps:
|
|||||||
eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
eval_table_size: # Approximate number of predictions sent to wandb depending on batch size. Enabled above 0. Default is 0
|
||||||
eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
eval_table_max_new_tokens: # Total number of tokens generated for predictions sent to wandb. Default is 128
|
||||||
|
|
||||||
|
loss_watchdog_threshold: # High loss value, indicating the learning has broken down (a good estimate is ~2 times the loss at the start of training)
|
||||||
|
loss_watchdog_patience: # Number of high-loss steps in a row before the trainer aborts (default: 3)
|
||||||
|
|
||||||
# Save model as safetensors (require safetensors package)
|
# Save model as safetensors (require safetensors package)
|
||||||
save_safetensors:
|
save_safetensors:
|
||||||
|
|
||||||
@@ -942,7 +962,7 @@ wandb_mode:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -24,16 +24,6 @@
|
|||||||
"weight_decay": "auto"
|
"weight_decay": "auto"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"scheduler": {
|
|
||||||
"type": "WarmupDecayLR",
|
|
||||||
"params": {
|
|
||||||
"warmup_min_lr": "auto",
|
|
||||||
"warmup_max_lr": "auto",
|
|
||||||
"warmup_num_steps": "auto",
|
|
||||||
"warmup_type": "linear",
|
|
||||||
"total_num_steps": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gradient_accumulation_steps": "auto",
|
"gradient_accumulation_steps": "auto",
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
|||||||
@@ -28,16 +28,6 @@
|
|||||||
"weight_decay": "auto"
|
"weight_decay": "auto"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"scheduler": {
|
|
||||||
"type": "WarmupDecayLR",
|
|
||||||
"params": {
|
|
||||||
"warmup_min_lr": "auto",
|
|
||||||
"warmup_max_lr": "auto",
|
|
||||||
"warmup_num_steps": "auto",
|
|
||||||
"warmup_type": "linear",
|
|
||||||
"total_num_steps": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gradient_accumulation_steps": "auto",
|
"gradient_accumulation_steps": "auto",
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
|||||||
@@ -32,16 +32,6 @@
|
|||||||
"weight_decay": "auto"
|
"weight_decay": "auto"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"scheduler": {
|
|
||||||
"type": "WarmupDecayLR",
|
|
||||||
"params": {
|
|
||||||
"warmup_min_lr": "auto",
|
|
||||||
"warmup_max_lr": "auto",
|
|
||||||
"warmup_num_steps": "auto",
|
|
||||||
"warmup_type": "linear",
|
|
||||||
"total_num_steps": "auto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gradient_accumulation_steps": "auto",
|
"gradient_accumulation_steps": "auto",
|
||||||
"train_batch_size": "auto",
|
"train_batch_size": "auto",
|
||||||
"train_micro_batch_size_per_gpu": "auto",
|
"train_micro_batch_size_per_gpu": "auto",
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
output_dir: btlm-out
|
output_dir: btlm-out
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./falcon-7b
|
output_dir: ./falcon-7b
|
||||||
batch_size: 2
|
batch_size: 2
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./falcon-7b
|
output_dir: ./falcon-7b
|
||||||
batch_size: 2
|
batch_size: 2
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
gradient_accumulation_steps: 2
|
gradient_accumulation_steps: 2
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./jeopardy-bot-7b
|
output_dir: ./jeopardy-bot-7b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ lora_target_linear:
|
|||||||
lora_fan_in_fan_out:
|
lora_fan_in_fan_out:
|
||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./model-out
|
output_dir: ./model-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ relora_cpu_offload: false
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ pad_to_sequence_len: true
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ lora_target_modules:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
gradient_accumulation_steps: 4
|
||||||
@@ -62,6 +62,9 @@ logging_steps: 1
|
|||||||
xformers_attention:
|
xformers_attention:
|
||||||
flash_attention: true
|
flash_attention: true
|
||||||
|
|
||||||
|
loss_watchdog_threshold: 5.0
|
||||||
|
loss_watchdog_patience: 3
|
||||||
|
|
||||||
warmup_steps: 10
|
warmup_steps: 10
|
||||||
eval_steps: 0.05
|
eval_steps: 0.05
|
||||||
eval_table_size:
|
eval_table_size:
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project: mpt-alpaca-7b
|
wandb_project: mpt-alpaca-7b
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./mpt-alpaca-7b
|
output_dir: ./mpt-alpaca-7b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./openllama-out
|
output_dir: ./openllama-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-out
|
output_dir: ./lora-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
base_model: microsoft/phi-1_5
|
base_model: microsoft/phi-1_5
|
||||||
model_type: MixFormerSequentialForCausalLM
|
model_type: PhiForCausalLM
|
||||||
tokenizer_type: AutoTokenizer
|
tokenizer_type: AutoTokenizer
|
||||||
is_llama_derived_model: false
|
is_llama_derived_model: false
|
||||||
trust_remote_code: true
|
trust_remote_code: true
|
||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
|
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./pythia-12b
|
output_dir: ./pythia-12b
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-alpaca-pythia
|
output_dir: ./lora-alpaca-pythia
|
||||||
gradient_accumulation_steps: 1
|
gradient_accumulation_steps: 1
|
||||||
|
|||||||
68
examples/qwen/lora.yml
Normal file
68
examples/qwen/lora.yml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
base_model: Qwen/Qwen-7B
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
is_qwen_derived_model: true
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: true
|
||||||
|
load_in_4bit: false
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 2048 # supports up to 8192
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len:
|
||||||
|
|
||||||
|
adapter: lora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 0.05
|
||||||
|
eval_table_size:
|
||||||
|
eval_table_max_new_tokens: 128
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
68
examples/qwen/qlora.yml
Normal file
68
examples/qwen/qlora.yml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
base_model: Qwen/Qwen-7B
|
||||||
|
model_type: AutoModelForCausalLM
|
||||||
|
tokenizer_type: AutoTokenizer
|
||||||
|
|
||||||
|
is_qwen_derived_model: true
|
||||||
|
trust_remote_code: true
|
||||||
|
|
||||||
|
load_in_8bit: false
|
||||||
|
load_in_4bit: true
|
||||||
|
strict: false
|
||||||
|
|
||||||
|
datasets:
|
||||||
|
- path: mhenrichsen/alpaca_2k_test
|
||||||
|
type: alpaca
|
||||||
|
dataset_prepared_path:
|
||||||
|
val_set_size: 0.05
|
||||||
|
output_dir: ./lora-out
|
||||||
|
|
||||||
|
sequence_len: 2048 # supports up to 8192
|
||||||
|
sample_packing: false
|
||||||
|
pad_to_sequence_len:
|
||||||
|
|
||||||
|
adapter: qlora
|
||||||
|
lora_model_dir:
|
||||||
|
lora_r: 32
|
||||||
|
lora_alpha: 16
|
||||||
|
lora_dropout: 0.05
|
||||||
|
lora_target_linear: true
|
||||||
|
lora_fan_in_fan_out:
|
||||||
|
|
||||||
|
wandb_project:
|
||||||
|
wandb_entity:
|
||||||
|
wandb_watch:
|
||||||
|
wandb_name:
|
||||||
|
wandb_log_model:
|
||||||
|
|
||||||
|
gradient_accumulation_steps: 4
|
||||||
|
micro_batch_size: 2
|
||||||
|
num_epochs: 4
|
||||||
|
optimizer: adamw_bnb_8bit
|
||||||
|
lr_scheduler: cosine
|
||||||
|
learning_rate: 0.0002
|
||||||
|
|
||||||
|
train_on_inputs: false
|
||||||
|
group_by_length: false
|
||||||
|
bf16: true
|
||||||
|
fp16: false
|
||||||
|
tf32: false
|
||||||
|
|
||||||
|
gradient_checkpointing: false
|
||||||
|
early_stopping_patience:
|
||||||
|
resume_from_checkpoint:
|
||||||
|
local_rank:
|
||||||
|
logging_steps: 1
|
||||||
|
xformers_attention:
|
||||||
|
flash_attention:
|
||||||
|
|
||||||
|
warmup_steps: 10
|
||||||
|
eval_steps: 0.05
|
||||||
|
eval_table_size:
|
||||||
|
eval_table_max_new_tokens: 128
|
||||||
|
save_steps:
|
||||||
|
debug:
|
||||||
|
deepspeed:
|
||||||
|
weight_decay: 0.0
|
||||||
|
fsdp:
|
||||||
|
fsdp_config:
|
||||||
|
special_tokens:
|
||||||
@@ -22,7 +22,7 @@ lora_fan_in_fan_out: false
|
|||||||
wandb_project: redpajama-alpaca-3b
|
wandb_project: redpajama-alpaca-3b
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./redpajama-alpaca-3b
|
output_dir: ./redpajama-alpaca-3b
|
||||||
batch_size: 4
|
batch_size: 4
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project: lora-replit
|
wandb_project: lora-replit
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./lora-replit
|
output_dir: ./lora-replit
|
||||||
batch_size: 8
|
batch_size: 8
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ lora_fan_in_fan_out:
|
|||||||
wandb_project:
|
wandb_project:
|
||||||
wandb_entity:
|
wandb_entity:
|
||||||
wandb_watch:
|
wandb_watch:
|
||||||
wandb_run_id:
|
wandb_name:
|
||||||
wandb_log_model:
|
wandb_log_model:
|
||||||
output_dir: ./qlora-out
|
output_dir: ./qlora-out
|
||||||
|
|
||||||
|
|||||||
@@ -1,22 +1,21 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
|
||||||
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
||||||
torch==2.0.1
|
auto-gptq==0.5.1
|
||||||
auto-gptq==0.4.2
|
|
||||||
packaging
|
packaging
|
||||||
peft==0.6.0
|
peft==0.6.0
|
||||||
transformers @ git+https://github.com/huggingface/transformers.git@acc394c4f5e1283c19783581790b3dc3105a3697
|
transformers==4.35.2
|
||||||
|
tokenizers==0.15.0
|
||||||
bitsandbytes>=0.41.1
|
bitsandbytes>=0.41.1
|
||||||
accelerate @ git+https://github.com/huggingface/accelerate@80da9cfb09bb3cc9f1b385cb55d6b90d025a5fd9
|
accelerate==0.24.1
|
||||||
deepspeed
|
deepspeed
|
||||||
addict
|
addict
|
||||||
fire
|
fire
|
||||||
PyYAML>=6.0
|
PyYAML>=6.0
|
||||||
datasets>=2.14.0
|
datasets>=2.15.0
|
||||||
flash-attn>=2.3.0
|
flash-attn==2.3.3
|
||||||
sentencepiece
|
sentencepiece
|
||||||
wandb
|
wandb
|
||||||
einops
|
einops
|
||||||
xformers>=0.0.22
|
xformers==0.0.22
|
||||||
optimum==1.13.2
|
optimum==1.13.2
|
||||||
hf_transfer
|
hf_transfer
|
||||||
colorama
|
colorama
|
||||||
@@ -31,7 +30,7 @@ scikit-learn==1.2.2
|
|||||||
pynvml
|
pynvml
|
||||||
art
|
art
|
||||||
fschat==0.2.29
|
fschat==0.2.29
|
||||||
gradio
|
gradio==3.50.2
|
||||||
tensorboard
|
tensorboard
|
||||||
|
|
||||||
# remote filesystems
|
# remote filesystems
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ from axolotl.utils.dict import DictDefault
|
|||||||
from axolotl.utils.distributed import is_main_process
|
from axolotl.utils.distributed import is_main_process
|
||||||
from axolotl.utils.models import load_tokenizer
|
from axolotl.utils.models import load_tokenizer
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
|
from axolotl.utils.trainer import prepare_optim_env
|
||||||
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
||||||
|
|
||||||
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
@@ -71,7 +72,7 @@ def do_merge_lora(
|
|||||||
|
|
||||||
LOG.info("running merge of LoRA with base model")
|
LOG.info("running merge of LoRA with base model")
|
||||||
model = model.merge_and_unload()
|
model = model.merge_and_unload()
|
||||||
model.to(dtype=torch.float16)
|
model.to(dtype=cfg.torch_dtype)
|
||||||
|
|
||||||
if cfg.local_rank == 0:
|
if cfg.local_rank == 0:
|
||||||
LOG.info(f"saving merged model to: {str(Path(cfg.output_dir) / 'merged')}")
|
LOG.info(f"saving merged model to: {str(Path(cfg.output_dir) / 'merged')}")
|
||||||
@@ -296,6 +297,8 @@ def load_cfg(config: Path = Path("examples/"), **kwargs):
|
|||||||
|
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
|
prepare_optim_env(cfg)
|
||||||
|
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
|
|
||||||
setup_wandb_env_vars(cfg)
|
setup_wandb_env_vars(cfg)
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
|
|||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
EvalFirstStepCallback,
|
EvalFirstStepCallback,
|
||||||
GPUStatsCallback,
|
GPUStatsCallback,
|
||||||
|
LossWatchDogCallback,
|
||||||
SaveAxolotlConfigtoWandBCallback,
|
SaveAxolotlConfigtoWandBCallback,
|
||||||
SaveBetterTransformerModelCallback,
|
SaveBetterTransformerModelCallback,
|
||||||
bench_eval_callback_factory,
|
bench_eval_callback_factory,
|
||||||
@@ -430,6 +431,9 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
SaveAxolotlConfigtoWandBCallback(self.cfg.axolotl_config_path)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.cfg.loss_watchdog_threshold is not None:
|
||||||
|
callbacks.append(LossWatchDogCallback(self.cfg))
|
||||||
|
|
||||||
return callbacks
|
return callbacks
|
||||||
|
|
||||||
def get_post_trainer_create_callbacks(self, trainer):
|
def get_post_trainer_create_callbacks(self, trainer):
|
||||||
@@ -461,11 +465,14 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
return AxolotlTrainer
|
return AxolotlTrainer
|
||||||
|
|
||||||
def build(self, total_num_steps):
|
def build(self, total_num_steps):
|
||||||
warmup_steps = (
|
warmup_steps = None
|
||||||
self.cfg.warmup_steps
|
if self.cfg.warmup_steps is not None:
|
||||||
if self.cfg.warmup_steps is not None
|
warmup_steps = self.cfg.warmup_steps
|
||||||
else min(int(0.03 * total_num_steps), 100)
|
elif self.cfg.warmup_ratio is not None:
|
||||||
)
|
warmup_steps = max(int(self.cfg.warmup_ratio * total_num_steps), 0)
|
||||||
|
else:
|
||||||
|
warmup_steps = min(int(0.03 * total_num_steps), 100)
|
||||||
|
|
||||||
logging_steps = (
|
logging_steps = (
|
||||||
self.cfg.logging_steps
|
self.cfg.logging_steps
|
||||||
if self.cfg.logging_steps is not None
|
if self.cfg.logging_steps is not None
|
||||||
@@ -640,7 +647,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["group_by_length"] = self.cfg.group_by_length
|
training_arguments_kwargs["group_by_length"] = self.cfg.group_by_length
|
||||||
training_arguments_kwargs["report_to"] = "wandb" if self.cfg.use_wandb else None
|
training_arguments_kwargs["report_to"] = "wandb" if self.cfg.use_wandb else None
|
||||||
training_arguments_kwargs["run_name"] = (
|
training_arguments_kwargs["run_name"] = (
|
||||||
self.cfg.wandb_run_id if self.cfg.use_wandb else None
|
self.cfg.wandb_name if self.cfg.use_wandb else None
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["optim"] = (
|
training_arguments_kwargs["optim"] = (
|
||||||
self.cfg.optimizer if self.cfg.optimizer else "adamw_hf"
|
self.cfg.optimizer if self.cfg.optimizer else "adamw_hf"
|
||||||
@@ -658,7 +665,9 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
self.cfg.sample_packing if self.cfg.sample_packing else False
|
self.cfg.sample_packing if self.cfg.sample_packing else False
|
||||||
)
|
)
|
||||||
training_arguments_kwargs["eval_sample_packing"] = (
|
training_arguments_kwargs["eval_sample_packing"] = (
|
||||||
self.cfg.sample_packing if self.cfg.sample_packing else False
|
self.cfg.sample_packing
|
||||||
|
if self.cfg.eval_sample_packing is not False
|
||||||
|
else False
|
||||||
)
|
)
|
||||||
training_arguments_kwargs[
|
training_arguments_kwargs[
|
||||||
"sample_packing_seq_len_multiplier"
|
"sample_packing_seq_len_multiplier"
|
||||||
|
|||||||
@@ -3,4 +3,6 @@ MixFormers model architecture used for phi models
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from .configuration_mixformer_sequential import MixFormerSequentialConfig # noqa
|
from .configuration_mixformer_sequential import MixFormerSequentialConfig # noqa
|
||||||
|
from .configuration_phi import PhiConfig # noqa
|
||||||
from .modeling_mixformer_sequential import MixFormerSequentialForCausalLM # noqa
|
from .modeling_mixformer_sequential import MixFormerSequentialForCausalLM # noqa
|
||||||
|
from .modeling_phi import PhiForCausalLM # noqa
|
||||||
|
|||||||
65
src/axolotl/models/phi/configuration_phi.py
Normal file
65
src/axolotl/models/phi/configuration_phi.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# pylint: skip-file
|
||||||
|
# Copyright (c) Microsoft Corporation.
|
||||||
|
# Licensed under the MIT license.
|
||||||
|
|
||||||
|
import math
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from transformers import PretrainedConfig
|
||||||
|
|
||||||
|
|
||||||
|
class PhiConfig(PretrainedConfig):
|
||||||
|
"""Phi configuration."""
|
||||||
|
|
||||||
|
model_type = "phi"
|
||||||
|
attribute_map = {
|
||||||
|
"max_position_embeddings": "n_positions",
|
||||||
|
"hidden_size": "n_embd",
|
||||||
|
"num_attention_heads": "n_head",
|
||||||
|
"num_hidden_layers": "n_layer",
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
vocab_size: int = 50304,
|
||||||
|
n_positions: int = 2048,
|
||||||
|
n_embd: int = 1024,
|
||||||
|
n_layer: int = 20,
|
||||||
|
n_inner: Optional[int] = None,
|
||||||
|
n_head: int = 16,
|
||||||
|
n_head_kv: Optional[int] = None,
|
||||||
|
rotary_dim: Optional[int] = 32,
|
||||||
|
activation_function: Optional[str] = "gelu_new",
|
||||||
|
flash_attn: bool = False,
|
||||||
|
flash_rotary: bool = False,
|
||||||
|
fused_dense: bool = False,
|
||||||
|
attn_pdrop: float = 0.0,
|
||||||
|
embd_pdrop: float = 0.0,
|
||||||
|
resid_pdrop: float = 0.0,
|
||||||
|
layer_norm_epsilon: float = 1e-5,
|
||||||
|
initializer_range: float = 0.02,
|
||||||
|
tie_word_embeddings: bool = False,
|
||||||
|
pad_vocab_size_multiple: int = 64,
|
||||||
|
**kwargs
|
||||||
|
) -> None:
|
||||||
|
self.vocab_size = int(
|
||||||
|
math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple
|
||||||
|
)
|
||||||
|
self.n_positions = n_positions
|
||||||
|
self.n_embd = n_embd
|
||||||
|
self.n_layer = n_layer
|
||||||
|
self.n_inner = n_inner
|
||||||
|
self.n_head = n_head
|
||||||
|
self.n_head_kv = n_head_kv
|
||||||
|
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
||||||
|
self.activation_function = activation_function
|
||||||
|
self.flash_attn = flash_attn
|
||||||
|
self.flash_rotary = flash_rotary
|
||||||
|
self.fused_dense = fused_dense
|
||||||
|
self.attn_pdrop = attn_pdrop
|
||||||
|
self.embd_pdrop = embd_pdrop
|
||||||
|
self.resid_pdrop = resid_pdrop
|
||||||
|
self.layer_norm_epsilon = layer_norm_epsilon
|
||||||
|
self.initializer_range = initializer_range
|
||||||
|
|
||||||
|
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
||||||
1063
src/axolotl/models/phi/modeling_phi.py
Normal file
1063
src/axolotl/models/phi/modeling_phi.py
Normal file
File diff suppressed because it is too large
Load Diff
168
src/axolotl/monkeypatch/cross_entropy.py
Normal file
168
src/axolotl/monkeypatch/cross_entropy.py
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
# Adapted from Unsloth
|
||||||
|
# https://github.com/unslothai/unsloth/blob/4b97a810b509c93f44be4c037c7aa18fb8922884/unsloth/kernels/cross_entropy_loss.py
|
||||||
|
|
||||||
|
import triton
|
||||||
|
import triton.language as tl
|
||||||
|
import torch
|
||||||
|
|
||||||
|
MAX_FUSED_SIZE = 65536
|
||||||
|
|
||||||
|
def calculate_settings(n):
|
||||||
|
BLOCK_SIZE = triton.next_power_of_2(n)
|
||||||
|
# CUDA only supports 65536 - 2^16 threads per block
|
||||||
|
if BLOCK_SIZE > MAX_FUSED_SIZE:
|
||||||
|
raise RuntimeError(f"Cannot launch Triton kernel since n = {n} exceeds "\
|
||||||
|
f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.")
|
||||||
|
num_warps = 4
|
||||||
|
if BLOCK_SIZE >= 32768: num_warps = 32
|
||||||
|
elif BLOCK_SIZE >= 8192: num_warps = 16
|
||||||
|
elif BLOCK_SIZE >= 2048: num_warps = 8
|
||||||
|
return BLOCK_SIZE, num_warps
|
||||||
|
pass
|
||||||
|
|
||||||
|
@triton.jit
|
||||||
|
def _cross_entropy_forward(logits_ptr, logits_row_stride,
|
||||||
|
loss_ptr,
|
||||||
|
lse_ptr,
|
||||||
|
labels_ptr,
|
||||||
|
n_cols,
|
||||||
|
BLOCK_SIZE: tl.constexpr,):
|
||||||
|
"""
|
||||||
|
Cross Entropy Loss = 1/n sum [ -yi log(Pi) ]
|
||||||
|
Pi = exp(xi) / sum(exp(xi))
|
||||||
|
CE_i = -y log(p) = -y log[ exp(x) / sum(exp(x)) ]
|
||||||
|
= -y [ x - log[sum(exp(x))] ]
|
||||||
|
= y * (log[sum(exp(x))] - x)
|
||||||
|
If y == 0: CE_i = 0
|
||||||
|
If y == 1: CE_i = logsumexp - x
|
||||||
|
"""
|
||||||
|
row_idx = tl.program_id(0)
|
||||||
|
logits_ptr += row_idx * logits_row_stride
|
||||||
|
loss_ptr += row_idx
|
||||||
|
lse_ptr += row_idx
|
||||||
|
labels_ptr += row_idx
|
||||||
|
|
||||||
|
col_offsets = tl.arange(0, BLOCK_SIZE)
|
||||||
|
mask = col_offsets < n_cols
|
||||||
|
|
||||||
|
# TODO: Fixup int32 locations to int64
|
||||||
|
label_idx = tl.load(labels_ptr).to(tl.int32)
|
||||||
|
logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32)
|
||||||
|
max_logits = tl.max(logits, 0)
|
||||||
|
# Maximum stops overflow
|
||||||
|
lse = tl.log(tl.sum(tl.exp(logits - max_logits), 0)) + max_logits
|
||||||
|
tl.store(lse_ptr, lse)
|
||||||
|
|
||||||
|
if label_idx != -100:
|
||||||
|
logits_label = tl.load(logits_ptr + label_idx).to(tl.float32)
|
||||||
|
loss = lse - logits_label
|
||||||
|
else:
|
||||||
|
loss = 0.0
|
||||||
|
tl.store(loss_ptr, loss)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@triton.jit
|
||||||
|
def _cross_entropy_backward(logits_ptr, logits_row_stride,
|
||||||
|
dloss_ptr, dloss_row_stride,
|
||||||
|
lse_ptr,
|
||||||
|
labels_ptr,
|
||||||
|
n_cols,
|
||||||
|
BLOCK_SIZE: tl.constexpr,):
|
||||||
|
"""
|
||||||
|
CE_i = -y log(P) = y * (log[sum(exp(x))] - x)
|
||||||
|
dC/dx = d/dx (y * log[sum(exp(x))] - x * y)
|
||||||
|
|
||||||
|
From https://en.wikipedia.org/wiki/LogSumExp
|
||||||
|
d/dx logsumexp = exp(x) / sum(exp(x)) = softmax(x)
|
||||||
|
|
||||||
|
dC/dx = y * exp(x) / sum(exp(x)) - d/dx (x * y)
|
||||||
|
dC/dx = y * exp[ log[exp(x) / sum(exp(x))] ] using x = exp(log(x)) trick
|
||||||
|
dC/dx = y * exp[x - logsumexp] - d/dx (x * y)
|
||||||
|
|
||||||
|
If y == 0: dC/dx = 0
|
||||||
|
If y == 1 and x == label: dC/dlabel = exp[x - logsumexp] - 1
|
||||||
|
If y == 1 and x != label: dC/dx = exp[x - logsumexp]
|
||||||
|
"""
|
||||||
|
row_idx = tl.program_id(0)
|
||||||
|
logits_ptr += row_idx * logits_row_stride
|
||||||
|
dloss_ptr += row_idx * dloss_row_stride
|
||||||
|
col_offsets = tl.arange(0, BLOCK_SIZE)
|
||||||
|
mask = col_offsets < n_cols
|
||||||
|
# TODO: Fixup int32 locations to int64
|
||||||
|
label_idx = tl.load(labels_ptr + row_idx).to(tl.int32)
|
||||||
|
|
||||||
|
if label_idx != -100:
|
||||||
|
dloss = tl.load(dloss_ptr)
|
||||||
|
else:
|
||||||
|
dloss = 0.0
|
||||||
|
logits = tl.load(logits_ptr + col_offsets, mask = mask, other = 0).to(tl.float32)
|
||||||
|
lse = tl.load(lse_ptr + row_idx)
|
||||||
|
probs = tl.exp(logits - lse)
|
||||||
|
|
||||||
|
probs = tl.where(col_offsets == label_idx, probs - 1.0, probs)
|
||||||
|
tl.store(logits_ptr + col_offsets, dloss * probs, mask = mask)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class CrossEntropyLoss(torch.autograd.Function):
|
||||||
|
@staticmethod
|
||||||
|
def forward(ctx, logits, labels):
|
||||||
|
n_rows, n_cols = logits.shape
|
||||||
|
BLOCK_SIZE, num_warps = calculate_settings(n_cols)
|
||||||
|
losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda")
|
||||||
|
logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda")
|
||||||
|
|
||||||
|
_cross_entropy_forward[(n_rows,)](
|
||||||
|
logits, logits.stride(0),
|
||||||
|
losses,
|
||||||
|
logsumexp,
|
||||||
|
labels,
|
||||||
|
n_cols,
|
||||||
|
BLOCK_SIZE = BLOCK_SIZE,
|
||||||
|
num_warps = num_warps,
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx.BLOCK_SIZE = BLOCK_SIZE
|
||||||
|
ctx.num_warps = num_warps
|
||||||
|
ctx.save_for_backward(logits, logsumexp, labels)
|
||||||
|
return losses
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def backward(ctx, dlosses):
|
||||||
|
logits, logsumexp, labels = ctx.saved_tensors
|
||||||
|
n_rows, n_cols = logits.shape
|
||||||
|
|
||||||
|
_cross_entropy_backward[(n_rows,)](
|
||||||
|
logits, logits.stride(0),
|
||||||
|
dlosses, dlosses.stride(0),
|
||||||
|
logsumexp,
|
||||||
|
labels,
|
||||||
|
n_cols,
|
||||||
|
BLOCK_SIZE = ctx.BLOCK_SIZE,
|
||||||
|
num_warps = ctx.num_warps,
|
||||||
|
)
|
||||||
|
return logits, None, None,
|
||||||
|
pass
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def fast_cross_entropy_loss(logits, labels):
|
||||||
|
"""
|
||||||
|
Arguments:
|
||||||
|
logits: (batch, seq_len, vocab_size)
|
||||||
|
labels: (batch, seq_len,)
|
||||||
|
Returns:
|
||||||
|
losses: float
|
||||||
|
"""
|
||||||
|
batch, seq_len, d = logits.shape
|
||||||
|
assert(labels.shape == (batch, seq_len))
|
||||||
|
|
||||||
|
loss = CrossEntropyLoss.apply(
|
||||||
|
logits.view(batch*seq_len, d),
|
||||||
|
labels.view(-1),
|
||||||
|
)
|
||||||
|
n_items = torch.count_nonzero(labels != -100)
|
||||||
|
return loss.sum() / n_items
|
||||||
|
pass
|
||||||
@@ -13,16 +13,20 @@ from flash_attn.flash_attn_interface import ( # pylint: disable=ungrouped-impor
|
|||||||
flash_attn_varlen_kvpacked_func,
|
flash_attn_varlen_kvpacked_func,
|
||||||
flash_attn_varlen_qkvpacked_func,
|
flash_attn_varlen_qkvpacked_func,
|
||||||
)
|
)
|
||||||
from transformers.modeling_outputs import BaseModelOutputWithPast
|
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
||||||
from transformers.models.mistral.modeling_mistral import (
|
from transformers.models.mistral.modeling_mistral import (
|
||||||
MistralAttention as OriginalMistralAttention,
|
MistralAttention as OriginalMistralAttention,
|
||||||
)
|
)
|
||||||
from transformers.models.mistral.modeling_mistral import (
|
from transformers.models.mistral.modeling_mistral import (
|
||||||
MistralDecoderLayer as OriginalMistralDecoderLayer,
|
MistralDecoderLayer as OriginalMistralDecoderLayer,
|
||||||
)
|
)
|
||||||
|
from transformers.models.mistral.modeling_mistral import (
|
||||||
|
MistralForCausalLM as OriginalMistralForCausalLM,
|
||||||
|
)
|
||||||
from transformers.models.mistral.modeling_mistral import apply_rotary_pos_emb, repeat_kv
|
from transformers.models.mistral.modeling_mistral import apply_rotary_pos_emb, repeat_kv
|
||||||
|
|
||||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
||||||
|
from axolotl.monkeypatch.cross_entropy import fast_cross_entropy_loss
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.monkeypatch.mistral")
|
LOG = logging.getLogger("axolotl.monkeypatch.mistral")
|
||||||
|
|
||||||
@@ -36,6 +40,9 @@ def replace_mistral_attn_with_flash_attn(
|
|||||||
transformers.models.mistral.modeling_mistral.MistralAttention.forward = (
|
transformers.models.mistral.modeling_mistral.MistralAttention.forward = (
|
||||||
flashattn_forward
|
flashattn_forward
|
||||||
)
|
)
|
||||||
|
transformers.models.mistral.modeling_mistral.MistralForCausalLM.forward = (
|
||||||
|
mistral_causallm_forward
|
||||||
|
)
|
||||||
if packed:
|
if packed:
|
||||||
transformers.models.mistral.modeling_mistral.MistralDecoderLayer = (
|
transformers.models.mistral.modeling_mistral.MistralDecoderLayer = (
|
||||||
MistralDecoderLayer
|
MistralDecoderLayer
|
||||||
@@ -641,3 +648,71 @@ class MistralDecoderLayer(OriginalMistralDecoderLayer):
|
|||||||
outputs += (present_key_value,)
|
outputs += (present_key_value,)
|
||||||
|
|
||||||
return outputs
|
return outputs
|
||||||
|
|
||||||
|
def mistral_causallm_forward(
|
||||||
|
self: OriginalMistralForCausalLM,
|
||||||
|
input_ids: torch.LongTensor = None,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
position_ids: Optional[torch.LongTensor] = None,
|
||||||
|
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||||||
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||||
|
labels: Optional[torch.LongTensor] = None,
|
||||||
|
use_cache: Optional[bool] = None,
|
||||||
|
output_attentions: Optional[bool] = None,
|
||||||
|
output_hidden_states: Optional[bool] = None,
|
||||||
|
return_dict: Optional[bool] = None,
|
||||||
|
*args, **kwargs
|
||||||
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
||||||
|
r"""
|
||||||
|
Args:
|
||||||
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||||
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||||
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||||
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||||
|
```"""
|
||||||
|
|
||||||
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||||
|
output_hidden_states = (
|
||||||
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||||
|
)
|
||||||
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||||
|
|
||||||
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||||
|
outputs = self.model(
|
||||||
|
input_ids=input_ids,
|
||||||
|
attention_mask=attention_mask,
|
||||||
|
position_ids=position_ids,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
use_cache=use_cache,
|
||||||
|
output_attentions=output_attentions,
|
||||||
|
output_hidden_states=output_hidden_states,
|
||||||
|
return_dict=return_dict,
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = outputs[0]
|
||||||
|
logits = self.lm_head(hidden_states)
|
||||||
|
|
||||||
|
loss = None
|
||||||
|
if labels is not None:
|
||||||
|
shift_logits = logits
|
||||||
|
if not hasattr(self, "extra_ignored_labels"):
|
||||||
|
self.extra_ignored_labels = torch.full((self.model.config.max_position_embeddings, 1), -100, device=shift_logits.device)
|
||||||
|
|
||||||
|
shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]]))
|
||||||
|
shift_labels = shift_labels.to(shift_logits.device)
|
||||||
|
|
||||||
|
# FAST CROSS ENTROPY
|
||||||
|
loss = fast_cross_entropy_loss(shift_logits, shift_labels)
|
||||||
|
|
||||||
|
if not return_dict:
|
||||||
|
output = (logits,) + outputs[1:]
|
||||||
|
return (loss,) + output if loss is not None else output
|
||||||
|
|
||||||
|
return CausalLMOutputWithPast(
|
||||||
|
loss=loss,
|
||||||
|
logits=logits,
|
||||||
|
past_key_values=outputs.past_key_values,
|
||||||
|
hidden_states=outputs.hidden_states,
|
||||||
|
attentions=outputs.attentions,
|
||||||
|
)
|
||||||
@@ -124,6 +124,36 @@ class GPUStatsCallback(
|
|||||||
return control
|
return control
|
||||||
|
|
||||||
|
|
||||||
|
class LossWatchDogCallback(TrainerCallback):
|
||||||
|
"""Callback to track loss and stop training if loss is too high"""
|
||||||
|
|
||||||
|
def __init__(self, cfg):
|
||||||
|
self.cfg = cfg
|
||||||
|
self.logged = False
|
||||||
|
self.violations = 0
|
||||||
|
self.threshold = cfg.loss_watchdog_threshold
|
||||||
|
self.patience = cfg.loss_watchdog_patience or 3
|
||||||
|
|
||||||
|
def on_step_end(
|
||||||
|
self,
|
||||||
|
_args: TrainingArguments,
|
||||||
|
state: TrainerState,
|
||||||
|
control: TrainerControl,
|
||||||
|
**_kwargs,
|
||||||
|
):
|
||||||
|
if len(state.log_history) > 0 and "loss" in state.log_history[-1]:
|
||||||
|
if state.log_history[-1]["loss"] > self.threshold:
|
||||||
|
self.violations += 1
|
||||||
|
if self.violations >= self.patience:
|
||||||
|
LOG.warning(
|
||||||
|
"Loss is too high, stopping training (loss_watchdog_threshold)"
|
||||||
|
)
|
||||||
|
control.should_training_stop = True
|
||||||
|
else:
|
||||||
|
self.violations = 0
|
||||||
|
return control
|
||||||
|
|
||||||
|
|
||||||
def bench_eval_callback_factory(trainer, tokenizer):
|
def bench_eval_callback_factory(trainer, tokenizer):
|
||||||
accuracy = evaluate.load("accuracy")
|
accuracy = evaluate.load("accuracy")
|
||||||
abcd_idx = [
|
abcd_idx = [
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ def choose_device(cfg):
|
|||||||
|
|
||||||
cfg.device = get_device()
|
cfg.device = get_device()
|
||||||
if cfg.world_size == 1:
|
if cfg.world_size == 1:
|
||||||
cfg.device_map = "auto"
|
cfg.device_map = cfg.device_map or "auto"
|
||||||
else:
|
else:
|
||||||
if cfg.device.startswith("cuda"):
|
if cfg.device.startswith("cuda"):
|
||||||
cfg.device_map = {"": torch.cuda.current_device()}
|
cfg.device_map = {"": torch.cuda.current_device()}
|
||||||
@@ -122,6 +122,19 @@ def normalize_config(cfg):
|
|||||||
or (cfg.model_type and "mistral" in cfg.model_type.lower())
|
or (cfg.model_type and "mistral" in cfg.model_type.lower())
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cfg.is_qwen_derived_model = (
|
||||||
|
(
|
||||||
|
hasattr(model_config, "model_type")
|
||||||
|
and model_config.model_type
|
||||||
|
in [
|
||||||
|
"qwen",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
or cfg.is_qwen_derived_model
|
||||||
|
or "qwen" in cfg.base_model.lower()
|
||||||
|
or (cfg.model_type and "qwen" in cfg.model_type.lower())
|
||||||
|
)
|
||||||
|
|
||||||
if isinstance(cfg.learning_rate, str):
|
if isinstance(cfg.learning_rate, str):
|
||||||
cfg.learning_rate = float(cfg.learning_rate)
|
cfg.learning_rate = float(cfg.learning_rate)
|
||||||
|
|
||||||
@@ -165,7 +178,11 @@ def validate_config(cfg):
|
|||||||
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
"batch_size is not recommended. Please use gradient_accumulation_steps instead.",
|
||||||
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
"To calculate the equivalent gradient_accumulation_steps, divide batch_size / micro_batch_size / number of gpus.",
|
||||||
)
|
)
|
||||||
if cfg.eval_batch_size != cfg.micro_batch_size:
|
if (
|
||||||
|
cfg.eval_batch_size
|
||||||
|
and cfg.micro_batch_size
|
||||||
|
and cfg.eval_batch_size != cfg.micro_batch_size
|
||||||
|
):
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
"eval_batch_size != micro_batch_size. This can lead to VRAM instability."
|
"eval_batch_size != micro_batch_size. This can lead to VRAM instability."
|
||||||
)
|
)
|
||||||
@@ -372,6 +389,21 @@ def validate_config(cfg):
|
|||||||
if cfg.rope_scaling:
|
if cfg.rope_scaling:
|
||||||
LOG.warning("`rope_scaling` should now be be a key under `model_config`")
|
LOG.warning("`rope_scaling` should now be be a key under `model_config`")
|
||||||
|
|
||||||
|
if cfg.warmup_steps and cfg.warmup_ratio:
|
||||||
|
raise ValueError("warmup_steps and warmup_ratio are mutually exclusive")
|
||||||
|
|
||||||
|
if cfg.is_qwen_derived_model and cfg.gradient_checkpointing:
|
||||||
|
LOG.warning(
|
||||||
|
"Gradient checkpointing is broken for Qwen models for transformers>=4.35.0, except main branch."
|
||||||
|
)
|
||||||
|
|
||||||
|
if cfg.wandb_run_id and not cfg.wandb_name:
|
||||||
|
cfg.wandb_name = cfg.wandb_run_id
|
||||||
|
|
||||||
|
LOG.warning(
|
||||||
|
"wandb_run_id sets the ID of the run. If you would like to set the name, please use wandb_name instead."
|
||||||
|
)
|
||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# MPT 7b
|
# MPT 7b
|
||||||
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
# https://github.com/facebookresearch/bitsandbytes/issues/25
|
||||||
|
|||||||
@@ -79,6 +79,14 @@ def prepare_dataset(cfg, tokenizer):
|
|||||||
train_dataset, eval_dataset = process_datasets_for_packing(
|
train_dataset, eval_dataset = process_datasets_for_packing(
|
||||||
cfg, train_dataset, eval_dataset, tokenizer
|
cfg, train_dataset, eval_dataset, tokenizer
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if eval_dataset and cfg.sample_packing and cfg.eval_sample_packing is not False:
|
||||||
|
total_eval_steps = calculate_total_num_steps(cfg, eval_dataset, update=False)
|
||||||
|
if total_eval_steps == 0:
|
||||||
|
raise ValueError(
|
||||||
|
"eval dataset split is too small for sample_packing. You should set `eval_sample_packing: False`. "
|
||||||
|
)
|
||||||
|
|
||||||
if cfg.max_steps:
|
if cfg.max_steps:
|
||||||
total_num_steps = min(
|
total_num_steps = min(
|
||||||
calculate_total_num_steps(cfg, train_dataset), cfg.max_steps
|
calculate_total_num_steps(cfg, train_dataset), cfg.max_steps
|
||||||
@@ -234,7 +242,14 @@ def load_tokenized_prepared_datasets(
|
|||||||
local_path = Path(config_dataset.path)
|
local_path = Path(config_dataset.path)
|
||||||
if local_path.exists():
|
if local_path.exists():
|
||||||
if local_path.is_dir():
|
if local_path.is_dir():
|
||||||
ds = load_from_disk(config_dataset.path)
|
# TODO dirs with arrow or parquet files could be loaded with `load_from_disk`
|
||||||
|
ds = load_dataset(
|
||||||
|
config_dataset.path,
|
||||||
|
name=config_dataset.name,
|
||||||
|
data_files=config_dataset.data_files,
|
||||||
|
streaming=False,
|
||||||
|
split=None,
|
||||||
|
)
|
||||||
elif local_path.is_file():
|
elif local_path.is_file():
|
||||||
ds_type = get_ds_type(config_dataset)
|
ds_type = get_ds_type(config_dataset)
|
||||||
|
|
||||||
|
|||||||
@@ -28,6 +28,27 @@ from axolotl.utils.dict import DictDefault
|
|||||||
LOG = logging.getLogger("axolotl")
|
LOG = logging.getLogger("axolotl")
|
||||||
|
|
||||||
|
|
||||||
|
def check_model_config(cfg: DictDefault, model_config: AutoConfig):
|
||||||
|
quant_config_exists = hasattr(model_config, "quantization_config")
|
||||||
|
quant_config_method_is_gptq = (
|
||||||
|
quant_config_exists
|
||||||
|
and "quant_method" in model_config.quantization_config
|
||||||
|
and model_config.quantization_config["quant_method"] == "gptq"
|
||||||
|
)
|
||||||
|
|
||||||
|
if cfg.gptq and not quant_config_method_is_gptq:
|
||||||
|
raise ValueError(
|
||||||
|
"model_config.quantization_config is not set or quant_method is not set to gptq. "
|
||||||
|
"Please make sure to point to a GPTQ model."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not cfg.gptq and quant_config_exists:
|
||||||
|
raise ValueError(
|
||||||
|
"model_config.quantization_config is set but `gptq` flag is not. "
|
||||||
|
"Please use the `gptq` flag to train quantized model or point to a non-quantized model."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def load_model_config(cfg):
|
def load_model_config(cfg):
|
||||||
model_config_name = cfg.base_model_config or cfg.base_model
|
model_config_name = cfg.base_model_config or cfg.base_model
|
||||||
trust_remote_code = cfg.trust_remote_code is True
|
trust_remote_code = cfg.trust_remote_code is True
|
||||||
@@ -38,6 +59,8 @@ def load_model_config(cfg):
|
|||||||
for key, val in cfg.model_config.items():
|
for key, val in cfg.model_config.items():
|
||||||
setattr(model_config, key, val)
|
setattr(model_config, key, val)
|
||||||
|
|
||||||
|
check_model_config(cfg, model_config)
|
||||||
|
|
||||||
return model_config
|
return model_config
|
||||||
|
|
||||||
|
|
||||||
@@ -84,6 +107,18 @@ def load_tokenizer(cfg):
|
|||||||
if cfg.is_mistral_derived_model and cfg.flash_attention and not cfg.sample_packing:
|
if cfg.is_mistral_derived_model and cfg.flash_attention and not cfg.sample_packing:
|
||||||
tokenizer.padding_side = "left"
|
tokenizer.padding_side = "left"
|
||||||
|
|
||||||
|
# Qwen base only has single token, so we need to set the special tokens
|
||||||
|
if cfg.is_qwen_derived_model:
|
||||||
|
token_ids = ["bos_token_id", "eos_token_id", "pad_token_id", "unk_token_id"]
|
||||||
|
for attr_name in token_ids:
|
||||||
|
if getattr(tokenizer, attr_name) is None:
|
||||||
|
setattr(tokenizer, attr_name, tokenizer.eod_id)
|
||||||
|
|
||||||
|
token_names = ["bos_token", "eos_token", "pad_token", "unk_token"]
|
||||||
|
for attr_name in token_names:
|
||||||
|
if getattr(tokenizer, attr_name) is None:
|
||||||
|
setattr(tokenizer, attr_name, "<|endoftext|>")
|
||||||
|
|
||||||
if cfg.special_tokens:
|
if cfg.special_tokens:
|
||||||
for k, val in cfg.special_tokens.items():
|
for k, val in cfg.special_tokens.items():
|
||||||
tokenizer.add_special_tokens(
|
tokenizer.add_special_tokens(
|
||||||
@@ -204,6 +239,7 @@ def load_model(
|
|||||||
model_kwargs = {}
|
model_kwargs = {}
|
||||||
|
|
||||||
model_kwargs["device_map"] = cfg.device_map
|
model_kwargs["device_map"] = cfg.device_map
|
||||||
|
model_kwargs["max_memory"] = cfg.max_memory
|
||||||
model_kwargs["torch_dtype"] = cfg.torch_dtype
|
model_kwargs["torch_dtype"] = cfg.torch_dtype
|
||||||
|
|
||||||
if cfg.model_revision:
|
if cfg.model_revision:
|
||||||
@@ -288,10 +324,10 @@ def load_model(
|
|||||||
# device=cfg.device,
|
# device=cfg.device,
|
||||||
# )
|
# )
|
||||||
# model.train() # sets to train instead of eval mode
|
# model.train() # sets to train instead of eval mode
|
||||||
elif model_type == "MixFormerSequentialForCausalLM":
|
elif model_type == "PhiForCausalLM":
|
||||||
from axolotl.models.phi import MixFormerSequentialForCausalLM
|
from axolotl.models.phi import PhiForCausalLM
|
||||||
|
|
||||||
model = MixFormerSequentialForCausalLM.from_pretrained(
|
model = PhiForCausalLM.from_pretrained(
|
||||||
base_model,
|
base_model,
|
||||||
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
load_in_8bit=cfg.load_in_8bit and cfg.adapter is not None,
|
||||||
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
load_in_4bit=cfg.load_in_4bit and cfg.adapter is not None,
|
||||||
@@ -400,15 +436,22 @@ def load_model(
|
|||||||
module.to(torch.float32)
|
module.to(torch.float32)
|
||||||
|
|
||||||
needs_fa2_dtype = cfg.adapter or cfg.fsdp
|
needs_fa2_dtype = cfg.adapter or cfg.fsdp
|
||||||
|
skip_prepare_model_for_kbit_training = False
|
||||||
|
|
||||||
|
if cfg.model_config_type == "qwen" and cfg.adapter == "lora":
|
||||||
|
# Qwen doesn't play nicely with LoRA if this is enabled
|
||||||
|
skip_prepare_model_for_kbit_training = True
|
||||||
|
|
||||||
if (cfg.adapter == "lora" and load_in_8bit) or (
|
if (cfg.adapter == "lora" and load_in_8bit) or (
|
||||||
cfg.adapter == "qlora" and cfg.load_in_4bit
|
cfg.adapter == "qlora" and cfg.load_in_4bit
|
||||||
):
|
):
|
||||||
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
||||||
if cfg.gradient_checkpointing:
|
if cfg.gradient_checkpointing:
|
||||||
model.gradient_checkpointing_enable()
|
model.gradient_checkpointing_enable()
|
||||||
model = prepare_model_for_kbit_training(
|
if not skip_prepare_model_for_kbit_training:
|
||||||
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
model = prepare_model_for_kbit_training(
|
||||||
)
|
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
||||||
|
)
|
||||||
needs_fa2_dtype = True
|
needs_fa2_dtype = True
|
||||||
|
|
||||||
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so we need to
|
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so we need to
|
||||||
|
|||||||
@@ -182,7 +182,7 @@ class MultipackBatchSampler(BatchSampler):
|
|||||||
|
|
||||||
# shave off 1% + 1 for dealing with variance in packing from random sampler to sampler
|
# shave off 1% + 1 for dealing with variance in packing from random sampler to sampler
|
||||||
return max(
|
return max(
|
||||||
1,
|
0,
|
||||||
(
|
(
|
||||||
world_size
|
world_size
|
||||||
* math.floor(
|
* math.floor(
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset, tokenizer):
|
|||||||
return train_dataset, eval_dataset
|
return train_dataset, eval_dataset
|
||||||
|
|
||||||
|
|
||||||
def calculate_total_num_steps(cfg, train_dataset):
|
def calculate_total_num_steps(cfg, train_dataset, update=True):
|
||||||
if not cfg.total_num_tokens:
|
if not cfg.total_num_tokens:
|
||||||
total_num_tokens = np.sum(
|
total_num_tokens = np.sum(
|
||||||
train_dataset.data.column("input_ids")
|
train_dataset.data.column("input_ids")
|
||||||
@@ -150,7 +150,8 @@ def calculate_total_num_steps(cfg, train_dataset):
|
|||||||
.values
|
.values
|
||||||
)
|
)
|
||||||
LOG.debug(f"total_num_tokens: {total_num_tokens}", main_process_only=True)
|
LOG.debug(f"total_num_tokens: {total_num_tokens}", main_process_only=True)
|
||||||
cfg.total_num_tokens = total_num_tokens
|
if update:
|
||||||
|
cfg.total_num_tokens = total_num_tokens
|
||||||
|
|
||||||
if not cfg.total_supervised_tokens:
|
if not cfg.total_supervised_tokens:
|
||||||
total_supervised_tokens = (
|
total_supervised_tokens = (
|
||||||
@@ -163,7 +164,8 @@ def calculate_total_num_steps(cfg, train_dataset):
|
|||||||
f"`total_supervised_tokens: {total_supervised_tokens}`",
|
f"`total_supervised_tokens: {total_supervised_tokens}`",
|
||||||
main_process_only=True,
|
main_process_only=True,
|
||||||
)
|
)
|
||||||
cfg.total_supervised_tokens = total_supervised_tokens
|
if update:
|
||||||
|
cfg.total_supervised_tokens = total_supervised_tokens
|
||||||
|
|
||||||
if cfg.sample_packing:
|
if cfg.sample_packing:
|
||||||
# we have to drop anything longer then sequence len otherwise
|
# we have to drop anything longer then sequence len otherwise
|
||||||
@@ -232,7 +234,8 @@ def calculate_total_num_steps(cfg, train_dataset):
|
|||||||
sample_packing_eff_est = (
|
sample_packing_eff_est = (
|
||||||
math.ceil(sample_packing_actual_eff_all * 100.0) / 100.0
|
math.ceil(sample_packing_actual_eff_all * 100.0) / 100.0
|
||||||
)
|
)
|
||||||
cfg.sample_packing_eff_est = sample_packing_eff_est
|
if update:
|
||||||
|
cfg.sample_packing_eff_est = sample_packing_eff_est
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"sample_packing_eff_est: {cfg.sample_packing_eff_est}",
|
f"sample_packing_eff_est: {cfg.sample_packing_eff_est}",
|
||||||
main_process_only=True,
|
main_process_only=True,
|
||||||
@@ -264,12 +267,14 @@ def setup_fsdp_envs(cfg):
|
|||||||
] = cfg.fsdp_config.fsdp_transformer_layer_cls_to_wrap
|
] = cfg.fsdp_config.fsdp_transformer_layer_cls_to_wrap
|
||||||
|
|
||||||
|
|
||||||
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps):
|
def prepare_optim_env(cfg):
|
||||||
if cfg.fsdp:
|
if cfg.fsdp:
|
||||||
setup_fsdp_envs(cfg)
|
setup_fsdp_envs(cfg)
|
||||||
elif cfg.deepspeed:
|
elif cfg.deepspeed:
|
||||||
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
|
||||||
|
|
||||||
|
|
||||||
|
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps):
|
||||||
trainer_builder = HFCausalTrainerBuilder(cfg, model, tokenizer)
|
trainer_builder = HFCausalTrainerBuilder(cfg, model, tokenizer)
|
||||||
trainer_builder.train_dataset = train_dataset
|
trainer_builder.train_dataset = train_dataset
|
||||||
trainer_builder.eval_dataset = eval_dataset
|
trainer_builder.eval_dataset = eval_dataset
|
||||||
|
|||||||
@@ -2,20 +2,20 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
def setup_wandb_env_vars(cfg):
|
|
||||||
if cfg.wandb_mode and cfg.wandb_mode == "offline":
|
def setup_wandb_env_vars(cfg: DictDefault):
|
||||||
os.environ["WANDB_MODE"] = cfg.wandb_mode
|
for key in cfg.keys():
|
||||||
elif cfg.wandb_project and len(cfg.wandb_project) > 0:
|
if key.startswith("wandb_"):
|
||||||
os.environ["WANDB_PROJECT"] = cfg.wandb_project
|
value = cfg.get(key, "")
|
||||||
|
|
||||||
|
if value and isinstance(value, str) and len(value) > 0:
|
||||||
|
os.environ[key.upper()] = value
|
||||||
|
|
||||||
|
# Enable wandb if project name is present
|
||||||
|
if cfg.wandb_project and len(cfg.wandb_project) > 0:
|
||||||
cfg.use_wandb = True
|
cfg.use_wandb = True
|
||||||
if cfg.wandb_entity and len(cfg.wandb_entity) > 0:
|
os.environ.pop("WANDB_DISABLED", None) # Remove if present
|
||||||
os.environ["WANDB_ENTITY"] = cfg.wandb_entity
|
|
||||||
if cfg.wandb_watch and len(cfg.wandb_watch) > 0:
|
|
||||||
os.environ["WANDB_WATCH"] = cfg.wandb_watch
|
|
||||||
if cfg.wandb_log_model and len(cfg.wandb_log_model) > 0:
|
|
||||||
os.environ["WANDB_LOG_MODEL"] = cfg.wandb_log_model
|
|
||||||
if cfg.wandb_run_id and len(cfg.wandb_run_id) > 0:
|
|
||||||
os.environ["WANDB_RUN_ID"] = cfg.wandb_run_id
|
|
||||||
else:
|
else:
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ class TestPhi(unittest.TestCase):
|
|||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "microsoft/phi-1_5",
|
||||||
"trust_remote_code": True,
|
"trust_remote_code": True,
|
||||||
"model_type": "MixFormerSequentialForCausalLM",
|
"model_type": "PhiForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 512,
|
"sequence_len": 512,
|
||||||
"sample_packing": False,
|
"sample_packing": False,
|
||||||
@@ -76,7 +76,7 @@ class TestPhi(unittest.TestCase):
|
|||||||
{
|
{
|
||||||
"base_model": "microsoft/phi-1_5",
|
"base_model": "microsoft/phi-1_5",
|
||||||
"trust_remote_code": True,
|
"trust_remote_code": True,
|
||||||
"model_type": "MixFormerSequentialForCausalLM",
|
"model_type": "PhiForCausalLM",
|
||||||
"tokenizer_type": "AutoTokenizer",
|
"tokenizer_type": "AutoTokenizer",
|
||||||
"sequence_len": 512,
|
"sequence_len": 512,
|
||||||
"sample_packing": True,
|
"sample_packing": True,
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""Module for testing the validation module"""
|
"""Module for testing the validation module"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@@ -8,6 +9,7 @@ import pytest
|
|||||||
|
|
||||||
from axolotl.utils.config import validate_config
|
from axolotl.utils.config import validate_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
||||||
|
|
||||||
|
|
||||||
class ValidationTest(unittest.TestCase):
|
class ValidationTest(unittest.TestCase):
|
||||||
@@ -649,3 +651,113 @@ class ValidationTest(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
|
def test_warmup_step_no_conflict(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"warmup_steps": 10,
|
||||||
|
"warmup_ratio": 0.1,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(
|
||||||
|
ValueError,
|
||||||
|
match=r".*warmup_steps and warmup_ratio are mutually exclusive*",
|
||||||
|
):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"warmup_steps": 10,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"warmup_ratio": 0.1,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
|
||||||
|
class ValidationWandbTest(ValidationTest):
|
||||||
|
"""
|
||||||
|
Validation test for wandb
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_wandb_set_run_id_to_name(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"wandb_run_id": "foo",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._caplog.at_level(logging.WARNING):
|
||||||
|
validate_config(cfg)
|
||||||
|
assert any(
|
||||||
|
"wandb_run_id sets the ID of the run. If you would like to set the name, please use wandb_name instead."
|
||||||
|
in record.message
|
||||||
|
for record in self._caplog.records
|
||||||
|
)
|
||||||
|
|
||||||
|
assert cfg.wandb_name == "foo" and cfg.wandb_run_id == "foo"
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"wandb_name": "foo",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
assert cfg.wandb_name == "foo" and cfg.wandb_run_id is None
|
||||||
|
|
||||||
|
def test_wandb_sets_env(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"wandb_project": "foo",
|
||||||
|
"wandb_name": "bar",
|
||||||
|
"wandb_run_id": "bat",
|
||||||
|
"wandb_entity": "baz",
|
||||||
|
"wandb_mode": "online",
|
||||||
|
"wandb_watch": "false",
|
||||||
|
"wandb_log_model": "checkpoint",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
setup_wandb_env_vars(cfg)
|
||||||
|
|
||||||
|
assert os.environ.get("WANDB_PROJECT", "") == "foo"
|
||||||
|
assert os.environ.get("WANDB_NAME", "") == "bar"
|
||||||
|
assert os.environ.get("WANDB_RUN_ID", "") == "bat"
|
||||||
|
assert os.environ.get("WANDB_ENTITY", "") == "baz"
|
||||||
|
assert os.environ.get("WANDB_MODE", "") == "online"
|
||||||
|
assert os.environ.get("WANDB_WATCH", "") == "false"
|
||||||
|
assert os.environ.get("WANDB_LOG_MODEL", "") == "checkpoint"
|
||||||
|
assert os.environ.get("WANDB_DISABLED", "") != "true"
|
||||||
|
|
||||||
|
def test_wandb_set_disabled(self):
|
||||||
|
cfg = DictDefault({})
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
setup_wandb_env_vars(cfg)
|
||||||
|
|
||||||
|
assert os.environ.get("WANDB_DISABLED", "") == "true"
|
||||||
|
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"wandb_project": "foo",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
|
setup_wandb_env_vars(cfg)
|
||||||
|
|
||||||
|
assert os.environ.get("WANDB_DISABLED", "") != "true"
|
||||||
|
|||||||
Reference in New Issue
Block a user