* upgrade to torchao 0.17.0 * chore: lint * refactor attention handling * replace legacy attention boolean flags with capability properties Replace checks with capability-based properties derived from attn_implementation This separates three concerns that were conflated under flash_attention: 1. Backend selection -> attn_implementation enum 2. Packing capability -> attn_supports_packing property 3. Flash-attn library dependency -> attn_uses_flash_lib property * compute attn capability flags in normalizer instead of properties * make attn_implementation the single source of truth * move attention-dependent validators to mode=after * migrate remaining consumers to canonical attn_implementation * expand attention tests + rewrite docs * migrate example configs to canonical attn_implementation * update doc snippets + reject gemma4-hybrid with non-FA2 backend * remove dead gemma4 branch in _set_attention_config * fix duplicate attn_implementation in gpt-oss yamls and flaky caplog tests * drop "Phase 2" naming from attn-implementation tests * regroup attn_implementation tests by feature concern * clean up verbose comments and remove MD Signed-off-by: Wing Lian <wing@axolotl.ai> Co-authored-by: Axolotl Swarm <no-reply@axolotl.ai> * fix(collator): pass return_dict=True at apply_chat_template top level for transformers 5.x In transformers 5.x, ProcessorMixin.apply_chat_template gained its own `return_dict` parameter (defaulting to False). When return_dict=False and tokenize=True the method returns out["input_ids"] directly — a 2-D tensor — rather than the full BatchFeature dict. The old code placed `return_dict=True` inside processor_kwargs. In transformers 5.x those kwargs are forwarded to the underlying processor call self(...) where _merge_kwargs silently ignores any key not present in MllamaProcessorKwargs (emitting a warning). The outer return_dict therefore stayed False, apply_chat_template returned the raw input_ids tensor, and the subsequent `batch["input_ids"]` attempted to index a 2-D tensor with the 9-character string "input_ids", producing: IndexError: too many indices for tensor of dimension 2 The fix is to pass return_dict=True as a top-level keyword argument to apply_chat_template (where it is actually consumed) and remove it from processor_kwargs (where it was silently dropped). No version guard is needed: transformers is pinned to ==5.5.4 in pyproject.toml. Adds a unit-level regression test (tests/test_mm_chat_collator.py) that mocks the processor to return a raw tensor when apply_chat_template is called without top-level return_dict=True, verifying the four invariants: process_rows returns a dict, input_ids is 2-D, labels is 2-D, and apply_chat_template receives return_dict=True as a top-level kwarg. Fixes: tests/e2e/test_llama_vision.py::TestLlamaVision::test_lora_llama_vision_multimodal_dataset Fixes: tests/e2e/test_llama_vision.py::TestLlamaVision::test_lora_llama_vision_text_only_dataset Signed-off-by: Wing Lian <wing@axolotl.ai> Co-authored-by: Axolotl Swarm <no-reply@axolotl.ai> * fix(collator): process_rows returns dict (BatchFeature) shape Two related changes for the multimodal chat collator under transformers 5.x: 1. Wrap apply_chat_template result in dict(...) so process_rows returns a plain dict rather than a BatchFeature instance. BatchFeature is a Mapping but not a dict; downstream code that did batch["labels"] = self.processing_strategy.process_labels(batch["input_ids"]) would index on a tensor when the result wasn't dict-shaped, raising IndexError: too many indices for tensor of dimension 2 2. Soften the regression test's contract from `dict` to `Mapping` so it exercises the actual semantic guarantee (key/value access) rather than the implementation detail (dict vs BatchFeature). Test guards against the original transformers 5.x breakage where apply_chat_template's return_dict default went from True to False. Includes regression test under tests/test_mm_chat_collator.py. Bug surfaced via swarm dispatch task_01KQHPNAYD8XARSNSDJVW1GPF6 against attn-implementation-refactor; squash-merged from agent commits 4de886fd + dc9fcf4f. Signed-off-by: Wing Lian <wing@axolotl.ai> --------- Signed-off-by: Wing Lian <wing@axolotl.ai> Co-authored-by: Axolotl Swarm <no-reply@axolotl.ai>
86 lines
2.1 KiB
YAML
86 lines
2.1 KiB
YAML
# An example finetuning Saleforce's XGen-7b model with 8k context using qlora
|
|
# on Tim Dettmer's Guanaco dataset.
|
|
base_model: Salesforce/xgen-7b-8k-base
|
|
# optionally might have model_type or tokenizer_type
|
|
model_type: AutoModelForCausalLM
|
|
tokenizer_type: AutoTokenizer
|
|
# Automatically upload checkpoint and final model to HF
|
|
# hub_model_id: username/custom_model_name
|
|
|
|
trust_remote_code: true
|
|
|
|
load_in_8bit: false
|
|
# enable 4bit for QLoRA
|
|
load_in_4bit: true
|
|
gptq: false
|
|
push_dataset_to_hub:
|
|
datasets:
|
|
- path: timdettmers/openassistant-guanaco
|
|
data_files:
|
|
- openassistant_best_replies_train.jsonl
|
|
type: "completion"
|
|
dataset_prepared_path:
|
|
val_set_size: 0.05
|
|
# enable QLoRA
|
|
adapter: qlora
|
|
lora_model_dir:
|
|
sequence_len: 8192
|
|
max_packed_sequence_len:
|
|
|
|
# hyperparameters from QLoRA paper Appendix B.2
|
|
# "We find hyperparameters to be largely robust across datasets"
|
|
lora_r: 64
|
|
lora_alpha: 16
|
|
# 0.1 for models up to 13B
|
|
# 0.05 for 33B and 65B models
|
|
lora_dropout: 0.05
|
|
# add LoRA modules on all linear layers of the base model
|
|
lora_target_linear: true
|
|
|
|
wandb_project:
|
|
wandb_entity:
|
|
wandb_watch:
|
|
wandb_name:
|
|
wandb_log_model:
|
|
output_dir: ./outputs/qlora-out
|
|
|
|
# QLoRA paper Table 9
|
|
# - 16 for 7b & 13b
|
|
# - 32 for 33b, 64 for 64b
|
|
# Max size tested on A6000
|
|
# - 7b: 40
|
|
# - 40b: 4
|
|
# decrease if OOM, increase for max VRAM utilization
|
|
micro_batch_size: 1
|
|
gradient_accumulation_steps: 1
|
|
num_epochs: 4
|
|
# Optimizer for QLoRA
|
|
optimizer: paged_adamw_32bit
|
|
torchdistx_path:
|
|
lr_scheduler: cosine
|
|
# QLoRA paper Table 9
|
|
# - 2e-4 for 7b & 13b
|
|
# - 1e-4 for 33b & 64b
|
|
learning_rate: 0.00002
|
|
bf16: auto
|
|
tf32: false
|
|
gradient_checkpointing: true
|
|
# stop training after this many evaluation losses have increased in a row
|
|
# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback
|
|
early_stopping_patience: 3
|
|
resume_from_checkpoint:
|
|
auto_resume_from_checkpoints: true
|
|
logging_steps: 1
|
|
attn_implementation: xformers
|
|
gptq_groupsize:
|
|
gptq_model_v1:
|
|
warmup_ratio: 0.1
|
|
evals_per_epoch: 4
|
|
saves_per_epoch: 1
|
|
weight_decay: 0.0
|
|
special_tokens:
|
|
eos_token: "<|endoftext|>"
|
|
bos_token: "<|endoftext|>"
|
|
unk_token: "<|endoftext|>"
|
|
pad_token: "<|endoftext|>"
|