Compare commits
3 Commits
fix/gemma3
...
accelerato
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d260eeb57d | ||
|
|
5a7f007d20 | ||
|
|
5eb265513c |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -193,6 +193,3 @@ out/
|
|||||||
|
|
||||||
# scm auto-versioning
|
# scm auto-versioning
|
||||||
src/axolotl/_version.py
|
src/axolotl/_version.py
|
||||||
|
|
||||||
# macOS
|
|
||||||
.DS_Store
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
base_model: google/gemma-3-4b-it
|
base_model: google/gemma-3-4b-it
|
||||||
|
|
||||||
plugins:
|
# Need to set else transformers tries to load vision too
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
model_type: Gemma3ForCausalLM
|
||||||
|
cls_model_config: Gemma3TextConfig
|
||||||
|
|
||||||
load_in_4bit: true
|
load_in_4bit: true
|
||||||
|
|
||||||
@@ -29,6 +30,7 @@ lora_model_dir:
|
|||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
sample_packing: true
|
sample_packing: true
|
||||||
|
|
||||||
|
|
||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
lora_dropout: 0
|
lora_dropout: 0
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
base_model: google/gemma-3-12b-it
|
base_model: google/gemma-3-12b-it
|
||||||
|
# Automatically upload checkpoint and final model to HF
|
||||||
# hub_model_id: username/custom_model_name
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
strict: false
|
strict: false
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
liger_rope: true
|
liger_rope: true
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ load_in_4bit: false
|
|||||||
strict: false
|
strict: false
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
liger_rope: true
|
liger_rope: true
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
base_model: google/gemma-3-12b-it
|
base_model: google/gemma-3-12b-it
|
||||||
|
# Math finetuning configuration for Gemma3-12B
|
||||||
# hub_model_id: username/custom_model_name
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
strict: false
|
strict: false
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
liger_rope: true
|
liger_rope: true
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ load_in_4bit: false
|
|||||||
strict: false
|
strict: false
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
liger_rope: true
|
liger_rope: true
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
base_model: google/gemma-3-27b-it
|
base_model: google/gemma-3-27b-it
|
||||||
|
# Math finetuning configuration for Gemma3-27B
|
||||||
# hub_model_id: username/custom_model_name
|
# hub_model_id: username/custom_model_name
|
||||||
|
|
||||||
load_in_8bit: false
|
load_in_8bit: false
|
||||||
load_in_4bit: false
|
load_in_4bit: false
|
||||||
strict: false
|
strict: false
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
liger_rope: true
|
liger_rope: true
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ load_in_4bit: false
|
|||||||
strict: false
|
strict: false
|
||||||
|
|
||||||
plugins:
|
plugins:
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
- axolotl.integrations.liger.LigerPlugin
|
- axolotl.integrations.liger.LigerPlugin
|
||||||
|
|
||||||
liger_rope: true
|
liger_rope: true
|
||||||
|
|||||||
@@ -1,225 +0,0 @@
|
|||||||
"""Merge trained text-only Gemma3 weights back into a full multimodal checkpoint.
|
|
||||||
|
|
||||||
After training with the Gemma3TextFromMultimodalPlugin, the saved checkpoint
|
|
||||||
contains only the language model weights (with ``model.language_model.*``
|
|
||||||
prefix, reversed by transformers v5's key_mapping on save).
|
|
||||||
|
|
||||||
This script reconstructs a full ``Gemma3ForConditionalGeneration`` checkpoint by
|
|
||||||
combining the trained language model weights with the original vision tower and
|
|
||||||
projector weights from the base multimodal model.
|
|
||||||
|
|
||||||
Usage::
|
|
||||||
|
|
||||||
python scripts/merge_gemma3_multimodal_weights.py \\
|
|
||||||
--original-model google/gemma-3-4b-it \\
|
|
||||||
--trained-model /path/to/trained/output \\
|
|
||||||
--output-dir /path/to/merged
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from huggingface_hub import split_torch_state_dict_into_shards
|
|
||||||
from safetensors.torch import load_file, save_file
|
|
||||||
from transformers import AutoConfig
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def collect_safetensors(model_dir: Path) -> dict[str, torch.Tensor]:
|
|
||||||
"""Load and merge all safetensors shard files in a directory."""
|
|
||||||
shard_files = sorted(model_dir.glob("*.safetensors"))
|
|
||||||
if not shard_files:
|
|
||||||
raise FileNotFoundError(f"No safetensors files found in {model_dir}")
|
|
||||||
|
|
||||||
state_dict: dict[str, torch.Tensor] = {}
|
|
||||||
for shard in shard_files:
|
|
||||||
LOG.info("Loading %s", shard.name)
|
|
||||||
state_dict.update(load_file(str(shard)))
|
|
||||||
return state_dict
|
|
||||||
|
|
||||||
|
|
||||||
def merge(
|
|
||||||
original_model: str,
|
|
||||||
trained_model: str,
|
|
||||||
output_dir: str,
|
|
||||||
*,
|
|
||||||
trust_remote_code: bool = False,
|
|
||||||
) -> None:
|
|
||||||
original_path = Path(original_model)
|
|
||||||
trained_path = Path(trained_model)
|
|
||||||
out_path = Path(output_dir)
|
|
||||||
out_path.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# 1. Load the original multimodal checkpoint
|
|
||||||
LOG.info("Loading original multimodal weights from %s", original_model)
|
|
||||||
if original_path.is_dir():
|
|
||||||
original_sd = collect_safetensors(original_path)
|
|
||||||
else:
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
|
|
||||||
cached = Path(
|
|
||||||
snapshot_download(original_model, allow_patterns=["*.safetensors"])
|
|
||||||
)
|
|
||||||
original_sd = collect_safetensors(cached)
|
|
||||||
|
|
||||||
# 2. Load trained text-only weights (already reversed to model.language_model.* by
|
|
||||||
# transformers v5 key_mapping on save)
|
|
||||||
LOG.info("Loading trained text-only weights from %s", trained_model)
|
|
||||||
trained_sd = collect_safetensors(trained_path)
|
|
||||||
|
|
||||||
# 3. Classify original keys
|
|
||||||
lang_keys = {k for k in original_sd if k.startswith("model.language_model.")}
|
|
||||||
vision_keys = {k for k in original_sd if k.startswith("model.vision_tower.")}
|
|
||||||
projector_keys = {
|
|
||||||
k for k in original_sd if k.startswith("model.multi_modal_projector.")
|
|
||||||
}
|
|
||||||
other_keys = set(original_sd.keys()) - lang_keys - vision_keys - projector_keys
|
|
||||||
|
|
||||||
LOG.info(
|
|
||||||
"Original checkpoint: %d language, %d vision, %d projector, %d other keys",
|
|
||||||
len(lang_keys),
|
|
||||||
len(vision_keys),
|
|
||||||
len(projector_keys),
|
|
||||||
len(other_keys),
|
|
||||||
)
|
|
||||||
|
|
||||||
# 4. Classify trained keys (reverse mapping on save gives model.language_model.* prefix)
|
|
||||||
trained_lang_keys = {k for k in trained_sd if k.startswith("model.language_model.")}
|
|
||||||
trained_other = set(trained_sd.keys()) - trained_lang_keys
|
|
||||||
|
|
||||||
LOG.info(
|
|
||||||
"Trained checkpoint: %d language keys, %d other keys",
|
|
||||||
len(trained_lang_keys),
|
|
||||||
len(trained_other),
|
|
||||||
)
|
|
||||||
|
|
||||||
# 5. Build merged state dict
|
|
||||||
merged: dict[str, torch.Tensor] = {}
|
|
||||||
|
|
||||||
# Keep vision tower and projector from original
|
|
||||||
for key in vision_keys | projector_keys:
|
|
||||||
merged[key] = original_sd[key]
|
|
||||||
|
|
||||||
# Use trained language model weights (overwrite original)
|
|
||||||
for key in trained_lang_keys:
|
|
||||||
merged[key] = trained_sd[key]
|
|
||||||
|
|
||||||
# For other trained keys (like lm_head.weight), use trained version
|
|
||||||
for key in trained_other:
|
|
||||||
merged[key] = trained_sd[key]
|
|
||||||
|
|
||||||
# For any original other keys not covered by trained (shouldn't usually happen),
|
|
||||||
# keep original
|
|
||||||
for key in other_keys:
|
|
||||||
if key not in merged:
|
|
||||||
merged[key] = original_sd[key]
|
|
||||||
|
|
||||||
# Check for missing language keys that were in original but not in trained
|
|
||||||
missing_lang = lang_keys - trained_lang_keys
|
|
||||||
if missing_lang:
|
|
||||||
LOG.warning(
|
|
||||||
"%d language keys in original but not in trained; keeping original: %s",
|
|
||||||
len(missing_lang),
|
|
||||||
list(missing_lang)[:5],
|
|
||||||
)
|
|
||||||
for key in missing_lang:
|
|
||||||
merged[key] = original_sd[key]
|
|
||||||
|
|
||||||
LOG.info("Merged checkpoint: %d total keys", len(merged))
|
|
||||||
|
|
||||||
# 6. Save merged weights (sharded at 50GB, matching transformers default)
|
|
||||||
LOG.info("Saving merged weights to %s", out_path)
|
|
||||||
state_dict_split = split_torch_state_dict_into_shards(merged, max_shard_size="50GB")
|
|
||||||
|
|
||||||
for filename, tensors in state_dict_split.filename_to_tensors.items():
|
|
||||||
shard = {name: merged[name] for name in tensors}
|
|
||||||
save_file(shard, str(out_path / filename))
|
|
||||||
|
|
||||||
if state_dict_split.is_sharded:
|
|
||||||
index = {
|
|
||||||
"metadata": {
|
|
||||||
"total_size": sum(t.numel() * t.element_size() for t in merged.values())
|
|
||||||
},
|
|
||||||
"weight_map": state_dict_split.tensor_to_filename,
|
|
||||||
}
|
|
||||||
with open(out_path / "model.safetensors.index.json", "w") as f:
|
|
||||||
json.dump(index, f, indent=2)
|
|
||||||
LOG.info("Saved %d shards", len(state_dict_split.filename_to_tensors))
|
|
||||||
|
|
||||||
# 7. Copy/update config
|
|
||||||
LOG.info("Writing config.json")
|
|
||||||
original_config = AutoConfig.from_pretrained(
|
|
||||||
original_model, trust_remote_code=trust_remote_code
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update text_config fields from trained model's config if available
|
|
||||||
trained_config_path = trained_path / "config.json"
|
|
||||||
if trained_config_path.exists():
|
|
||||||
with open(trained_config_path) as f:
|
|
||||||
trained_config_dict = json.load(f)
|
|
||||||
|
|
||||||
# The trained config is the text sub-config; merge its fields into
|
|
||||||
# the original composite config's text_config
|
|
||||||
if hasattr(original_config, "text_config"):
|
|
||||||
for key, val in trained_config_dict.items():
|
|
||||||
if key not in ("model_type", "_name_or_path", "architectures"):
|
|
||||||
if hasattr(original_config.text_config, key):
|
|
||||||
setattr(original_config.text_config, key, val)
|
|
||||||
|
|
||||||
original_config.save_pretrained(out_path)
|
|
||||||
|
|
||||||
# 8. Copy tokenizer files from trained model if present
|
|
||||||
tokenizer_files = list(trained_path.glob("tokenizer*")) + list(
|
|
||||||
trained_path.glob("special_tokens_map*")
|
|
||||||
)
|
|
||||||
if tokenizer_files:
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
for tok_file in tokenizer_files:
|
|
||||||
shutil.copy2(tok_file, out_path / tok_file.name)
|
|
||||||
LOG.info("Copied %d tokenizer files", len(tokenizer_files))
|
|
||||||
|
|
||||||
LOG.info("Merge complete. Output saved to %s", out_path)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Merge trained text-only Gemma3 weights back into a multimodal checkpoint."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--original-model",
|
|
||||||
required=True,
|
|
||||||
help="HuggingFace model ID or local path to the original multimodal model",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--trained-model",
|
|
||||||
required=True,
|
|
||||||
help="Local path to the trained text-only model output directory",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--output-dir",
|
|
||||||
required=True,
|
|
||||||
help="Directory to save the merged multimodal checkpoint",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--trust-remote-code",
|
|
||||||
action="store_true",
|
|
||||||
default=False,
|
|
||||||
help="Trust remote code when loading model config",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
merge(
|
|
||||||
original_model=args.original_model,
|
|
||||||
trained_model=args.trained_model,
|
|
||||||
output_dir=args.output_dir,
|
|
||||||
trust_remote_code=args.trust_remote_code,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -258,6 +258,11 @@ class TrainerBuilderBase(abc.ABC):
|
|||||||
bf16 = bf16 if bf16 is not None else False
|
bf16 = bf16 if bf16 is not None else False
|
||||||
training_args_kwargs["bf16"] = bf16
|
training_args_kwargs["bf16"] = bf16
|
||||||
|
|
||||||
|
if self.cfg.fp8:
|
||||||
|
training_args_kwargs["fp8"] = True
|
||||||
|
if self.cfg.fp8_enable_fsdp_float8_all_gather:
|
||||||
|
training_args_kwargs["enable_fsdp_float8_all_gather:"] = True
|
||||||
|
|
||||||
def _configure_scheduler(self, training_args_kwargs: dict):
|
def _configure_scheduler(self, training_args_kwargs: dict):
|
||||||
if self.cfg.lr_scheduler in ["one_cycle", "rex"]:
|
if self.cfg.lr_scheduler in ["one_cycle", "rex"]:
|
||||||
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
training_args_kwargs["lr_scheduler_type"] = "cosine"
|
||||||
|
|||||||
@@ -584,11 +584,9 @@ class AxolotlTrainer(
|
|||||||
|
|
||||||
super().create_accelerator_and_postprocess()
|
super().create_accelerator_and_postprocess()
|
||||||
|
|
||||||
def additional_accelerator_args(
|
def build_fp8_accelerator_args(self) -> dict[str, Any]:
|
||||||
self, fp8: bool = False, enable_fsdp_float8_all_gather: bool = False, **kwargs
|
args = {}
|
||||||
) -> dict[str, Any]:
|
if self.args.fp8:
|
||||||
ret_kwargs = {}
|
|
||||||
if fp8:
|
|
||||||
from accelerate.utils import AORecipeKwargs
|
from accelerate.utils import AORecipeKwargs
|
||||||
from torchao.float8 import Float8LinearConfig
|
from torchao.float8 import Float8LinearConfig
|
||||||
|
|
||||||
@@ -596,15 +594,22 @@ class AxolotlTrainer(
|
|||||||
# scaling strategy. See more details here:
|
# scaling strategy. See more details here:
|
||||||
# https://github.com/pytorch/ao/tree/main/torchao/float8.
|
# https://github.com/pytorch/ao/tree/main/torchao/float8.
|
||||||
config = Float8LinearConfig(
|
config = Float8LinearConfig(
|
||||||
enable_fsdp_float8_all_gather=enable_fsdp_float8_all_gather,
|
enable_fsdp_float8_all_gather=self.args.enable_fsdp_float8_all_gather,
|
||||||
force_recompute_fp8_weight_in_bwd=enable_fsdp_float8_all_gather is True,
|
force_recompute_fp8_weight_in_bwd=self.args.enable_fsdp_float8_all_gather
|
||||||
|
is True,
|
||||||
)
|
)
|
||||||
|
|
||||||
ret_kwargs["mixed_precision"] = "fp8"
|
args["mixed_precision"] = "fp8"
|
||||||
ret_kwargs["kwargs_handlers"] = [AORecipeKwargs(config=config)] # type: ignore
|
args["kwargs_handlers"] = [AORecipeKwargs(config=config)] # type: ignore
|
||||||
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
|
||||||
|
|
||||||
return ret_kwargs
|
return args
|
||||||
|
|
||||||
|
def _build_accelerator_args(self, **kwargs) -> dict[str, Any]:
|
||||||
|
args = super().build_accelerator_args(**kwargs)
|
||||||
|
fp8_args = self.build_fp8_accelerator_args()
|
||||||
|
args.update(fp8_args)
|
||||||
|
return args
|
||||||
|
|
||||||
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
|
def log(self, logs: dict[str, float], start_time: float | None = None) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -263,3 +263,13 @@ class AxolotlTrainingMixins:
|
|||||||
dion_rank_multiple_of: int | None = field(
|
dion_rank_multiple_of: int | None = field(
|
||||||
default=None,
|
default=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
fp8: bool | None = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Whether to use FP8 precision for training"},
|
||||||
|
)
|
||||||
|
|
||||||
|
enable_fsdp_float8_all_gather: bool | None = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Whether to use FSDP with FP8 precision for all_gather"},
|
||||||
|
)
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
|
|
||||||
def patch_llama_like(
|
def patch_llama_like(
|
||||||
self,
|
self,
|
||||||
model_type: str,
|
model_type_to_patch: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Generic patch for model architectures with causal lm similar to llama
|
Generic patch for model architectures with causal lm similar to llama
|
||||||
@@ -112,7 +112,10 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
from cut_cross_entropy.transformers.patch import PATCH_FNS
|
from cut_cross_entropy.transformers.patch import PATCH_FNS
|
||||||
|
|
||||||
def patch_generic(
|
def patch_generic(
|
||||||
maybe_model, patch_options, model_type: str, remote_model_id: str | None
|
maybe_model,
|
||||||
|
patch_options,
|
||||||
|
remote_model_id: str | None,
|
||||||
|
model_type: str,
|
||||||
):
|
):
|
||||||
import cut_cross_entropy.transformers.llama
|
import cut_cross_entropy.transformers.llama
|
||||||
from cut_cross_entropy.transformers.llama import cce_forward
|
from cut_cross_entropy.transformers.llama import cce_forward
|
||||||
@@ -136,11 +139,13 @@ class CutCrossEntropyPlugin(BasePlugin):
|
|||||||
f"Error: {str(e)}"
|
f"Error: {str(e)}"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
if model_type not in PATCH_FNS:
|
if model_type_to_patch not in PATCH_FNS:
|
||||||
LOG.warning_once(
|
LOG.warning_once(
|
||||||
"Setting up generic cce patch for model type: %s", model_type
|
"Setting up generic cce patch for model type: %s", model_type_to_patch
|
||||||
)
|
)
|
||||||
LOG.warning_once(
|
LOG.warning_once(
|
||||||
f"Generic Cut Cross Entropy + {model_type} support is experimental and may not work as expected."
|
f"Generic Cut Cross Entropy + {model_type_to_patch} support is experimental and may not work as expected."
|
||||||
|
)
|
||||||
|
PATCH_FNS[model_type_to_patch] = partial(
|
||||||
|
patch_generic, model_type=model_type_to_patch
|
||||||
)
|
)
|
||||||
PATCH_FNS[model_type] = partial(patch_generic, model_type=model_type)
|
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
# Gemma3 Text-from-Multimodal Plugin
|
|
||||||
|
|
||||||
Load a Gemma3 multimodal checkpoint (e.g. `google/gemma-3-4b-it`) directly into `Gemma3ForCausalLM` for text-only training. This bypasses the multimodal trainer path and enables sample packing and other text-specific optimizations.
|
|
||||||
|
|
||||||
## How it works
|
|
||||||
|
|
||||||
The plugin uses transformers v5's `key_mapping` parameter on `from_pretrained` to remap `model.language_model.*` checkpoint keys to `model.*`, matching what `Gemma3ForCausalLM` expects. Vision tower and projector weights are automatically discarded. On save, transformers reverses the mapping so checkpoints retain the original `model.language_model.*` prefix.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Add the plugin to your YAML config:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
base_model: google/gemma-3-4b-it
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
- axolotl.integrations.gemma3.Gemma3TextFromMultimodalPlugin
|
|
||||||
```
|
|
||||||
|
|
||||||
See `examples/gemma3/gemma-3-4b-qlora.yml` for a complete example.
|
|
||||||
|
|
||||||
## Merging weights back into a multimodal checkpoint
|
|
||||||
|
|
||||||
After training, the saved checkpoint contains only the language model weights. To reconstruct a full `Gemma3ForConditionalGeneration` checkpoint (with the original vision tower and projector), use the merge script:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/merge_gemma3_multimodal_weights.py \
|
|
||||||
--original-model google/gemma-3-4b-it \
|
|
||||||
--trained-model /path/to/trained/output \
|
|
||||||
--output-dir /path/to/merged
|
|
||||||
```
|
|
||||||
|
|
||||||
This combines:
|
|
||||||
- **Trained language model weights** from your output checkpoint
|
|
||||||
- **Original vision tower + projector weights** from the base multimodal model
|
|
||||||
|
|
||||||
The merged checkpoint can be loaded as `Gemma3ForConditionalGeneration` for multimodal inference or further training.
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
"""Gemma3 integration for loading multimodal checkpoints as text-only models."""
|
|
||||||
|
|
||||||
from .args import Gemma3TextFromMultimodalArgs
|
|
||||||
from .plugin import Gemma3TextFromMultimodalPlugin
|
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
"Gemma3TextFromMultimodalArgs",
|
|
||||||
"Gemma3TextFromMultimodalPlugin",
|
|
||||||
]
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
"""Pydantic input args for the Gemma3 text-from-multimodal plugin."""
|
|
||||||
|
|
||||||
from pydantic import BaseModel, model_validator
|
|
||||||
|
|
||||||
from axolotl.utils.logging import get_logger
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Gemma3TextFromMultimodalArgs(BaseModel):
|
|
||||||
"""Configuration args for loading a Gemma3 multimodal checkpoint as text-only."""
|
|
||||||
|
|
||||||
gemma3_text_from_multimodal: bool = True
|
|
||||||
extract_text_config: bool = False
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def set_model_type(cls, data):
|
|
||||||
if not isinstance(data, dict):
|
|
||||||
return data
|
|
||||||
|
|
||||||
if not data.get("gemma3_text_from_multimodal", True):
|
|
||||||
return data
|
|
||||||
|
|
||||||
if not data.get("model_type"):
|
|
||||||
LOG.info(
|
|
||||||
"Gemma3TextFromMultimodalPlugin: auto-setting model_type to Gemma3ForCausalLM"
|
|
||||||
)
|
|
||||||
data["model_type"] = "Gemma3ForCausalLM"
|
|
||||||
|
|
||||||
return data
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
"""Plugin for loading Gemma3 multimodal checkpoints into Gemma3ForCausalLM (text-only).
|
|
||||||
|
|
||||||
Uses transformers v5's ``key_mapping`` parameter on ``from_pretrained`` to remap
|
|
||||||
``model.language_model.*`` keys to ``model.*``, discarding vision tower and projector
|
|
||||||
weights. On save, transformers automatically reverses the mapping so saved
|
|
||||||
checkpoints retain the original ``model.language_model.*`` prefix.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from axolotl.integrations.base import BasePlugin
|
|
||||||
from axolotl.utils.logging import get_logger
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
|
||||||
|
|
||||||
# key_mapping for transformers from_pretrained:
|
|
||||||
# Remap checkpoint keys matching ^model.language_model -> model
|
|
||||||
# Vision tower / projector keys won't match any model parameter and are discarded.
|
|
||||||
GEMMA3_KEY_MAPPING = {"^model.language_model": "model"}
|
|
||||||
|
|
||||||
|
|
||||||
class Gemma3TextFromMultimodalPlugin(BasePlugin):
|
|
||||||
"""Load a Gemma3 multimodal checkpoint as a text-only Gemma3ForCausalLM.
|
|
||||||
|
|
||||||
Hooks
|
|
||||||
-----
|
|
||||||
register(cfg)
|
|
||||||
Runs before config validation. Sets the ``_extract_text_config`` flag,
|
|
||||||
ensures ``model_type`` is ``Gemma3ForCausalLM``, and injects
|
|
||||||
``key_mapping`` into ``model_kwargs`` so that ``from_pretrained`` remaps
|
|
||||||
``model.language_model.*`` → ``model.*``.
|
|
||||||
|
|
||||||
pre_model_load(cfg)
|
|
||||||
Runs after config validation/normalization but before model instantiation.
|
|
||||||
Validates that ``model_config_type`` is ``gemma3_text`` and
|
|
||||||
``is_multimodal`` is False (confirming that ``_extract_text_config``
|
|
||||||
worked correctly).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_input_args(self) -> str:
|
|
||||||
return "axolotl.integrations.gemma3.Gemma3TextFromMultimodalArgs"
|
|
||||||
|
|
||||||
def register(self, cfg: dict):
|
|
||||||
"""Set up config for multimodal → text-only loading.
|
|
||||||
|
|
||||||
This runs before Pydantic validation, so ``cfg`` is a raw dict.
|
|
||||||
"""
|
|
||||||
if not cfg.get("gemma3_text_from_multimodal", True):
|
|
||||||
raise ValueError(
|
|
||||||
"Gemma3TextFromMultimodalPlugin: disabled via config, but plugin selected"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Flag for load_model_config() to extract the text sub-config
|
|
||||||
cfg["extract_text_config"] = True
|
|
||||||
|
|
||||||
# Ensure model_type is set for the text-only model class
|
|
||||||
if not cfg.get("model_type"):
|
|
||||||
cfg["model_type"] = "Gemma3ForCausalLM"
|
|
||||||
|
|
||||||
# Inject key_mapping into model_kwargs so from_pretrained remaps weights
|
|
||||||
model_kwargs = cfg.setdefault("model_kwargs", {})
|
|
||||||
model_kwargs["key_mapping"] = GEMMA3_KEY_MAPPING
|
|
||||||
|
|
||||||
def pre_model_load(self, cfg):
|
|
||||||
"""Validate that config extraction worked before model instantiation."""
|
|
||||||
if not getattr(cfg, "gemma3_text_from_multimodal", True):
|
|
||||||
return
|
|
||||||
|
|
||||||
if cfg.model_config_type != "gemma3_text":
|
|
||||||
LOG.warning(
|
|
||||||
"Gemma3TextFromMultimodalPlugin: expected model_config_type='gemma3_text' "
|
|
||||||
"but got '%s'. The text config extraction may not have worked.",
|
|
||||||
cfg.model_config_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.is_multimodal or cfg.processor_type:
|
|
||||||
raise ValueError(
|
|
||||||
"Multimodal mode is enabled (processor_type set), but "
|
|
||||||
"Gemma3TextFromMultimodalPlugin enabled. "
|
|
||||||
"Please disable one of the two."
|
|
||||||
)
|
|
||||||
|
|
||||||
def post_train(self, cfg, model):
|
|
||||||
"""Log merge command after training completes."""
|
|
||||||
if cfg.adapter:
|
|
||||||
LOG.info(
|
|
||||||
"Adapter training detected. To reconstruct the multimodal checkpoint:\n"
|
|
||||||
" 1. Merge adapter weights into the text-only base model:\n"
|
|
||||||
" axolotl merge_lora <your_config.yml>\n"
|
|
||||||
" 2. Then merge the resulting full model back into the multimodal checkpoint:\n"
|
|
||||||
" python scripts/merge_gemma3_multimodal_weights.py \\\n"
|
|
||||||
" --original-model %s \\\n"
|
|
||||||
" --trained-model %s/merged \\\n"
|
|
||||||
" --output-dir %s/multi-modal/merged",
|
|
||||||
cfg.base_model,
|
|
||||||
cfg.output_dir,
|
|
||||||
cfg.output_dir,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
LOG.info(
|
|
||||||
"To merge trained weights back into the multimodal checkpoint, run:\n"
|
|
||||||
" python scripts/merge_gemma3_multimodal_weights.py \\\n"
|
|
||||||
" --original-model %s \\\n"
|
|
||||||
" --trained-model %s \\\n"
|
|
||||||
" --output-dir %s/multi-modal/merged",
|
|
||||||
cfg.base_model,
|
|
||||||
cfg.output_dir,
|
|
||||||
cfg.output_dir,
|
|
||||||
)
|
|
||||||
@@ -100,7 +100,6 @@ class PatchManager:
|
|||||||
self._apply_fsdp_patches()
|
self._apply_fsdp_patches()
|
||||||
self._apply_adapter_patches()
|
self._apply_adapter_patches()
|
||||||
self._apply_model_specific_patches()
|
self._apply_model_specific_patches()
|
||||||
self._apply_fp8_patches()
|
|
||||||
self._apply_flash_attention_peft_patches()
|
self._apply_flash_attention_peft_patches()
|
||||||
self._apply_gradient_checkpointing_patches()
|
self._apply_gradient_checkpointing_patches()
|
||||||
self._patch_attention()
|
self._patch_attention()
|
||||||
@@ -235,17 +234,6 @@ class PatchManager:
|
|||||||
|
|
||||||
patch_kimi_model()
|
patch_kimi_model()
|
||||||
|
|
||||||
def _apply_fp8_patches(self):
|
|
||||||
"""Apply patches for FP8 support."""
|
|
||||||
if self.cfg.fp8:
|
|
||||||
from axolotl.monkeypatch.trainer_accelerator_args import (
|
|
||||||
patch_create_accelerate_code_for_fp8,
|
|
||||||
)
|
|
||||||
|
|
||||||
patch_create_accelerate_code_for_fp8(
|
|
||||||
self.cfg.fp8_enable_fsdp_float8_all_gather
|
|
||||||
)
|
|
||||||
|
|
||||||
def _apply_flash_attention_peft_patches(self):
|
def _apply_flash_attention_peft_patches(self):
|
||||||
"""Apply patches for Flash Attention with PEFT."""
|
"""Apply patches for Flash Attention with PEFT."""
|
||||||
if self.cfg.adapter:
|
if self.cfg.adapter:
|
||||||
|
|||||||
@@ -204,13 +204,6 @@ def load_model_config(cfg: DictDefault) -> PretrainedConfig | addict.Dict:
|
|||||||
|
|
||||||
check_model_config(cfg, model_config)
|
check_model_config(cfg, model_config)
|
||||||
|
|
||||||
# Extract text config from composite config when explicitly requested
|
|
||||||
# (set by plugins like Gemma3TextFromMultimodalPlugin)
|
|
||||||
if getattr(cfg, "extract_text_config", False) and hasattr(
|
|
||||||
model_config, "get_text_config"
|
|
||||||
):
|
|
||||||
model_config = model_config.get_text_config()
|
|
||||||
|
|
||||||
return model_config
|
return model_config
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,83 +0,0 @@
|
|||||||
"""
|
|
||||||
allow adding additional kwargs to Accelerator init
|
|
||||||
"""
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
|
|
||||||
from transformers import Trainer
|
|
||||||
|
|
||||||
from axolotl.monkeypatch.utils import detab_code
|
|
||||||
from axolotl.utils.logging import get_logger
|
|
||||||
|
|
||||||
LOG = get_logger(__name__)
|
|
||||||
|
|
||||||
ORIGINAL_TRAINER_CODE = """
|
|
||||||
# create accelerator object
|
|
||||||
self.accelerator = Accelerator(**args)
|
|
||||||
"""
|
|
||||||
|
|
||||||
PATCHED_TRAINER_CODE = """
|
|
||||||
if hasattr(self, "additional_accelerator_args"):
|
|
||||||
additional_args = self.additional_accelerator_args(fp8=True, enable_fsdp_float8_all_gather={enable_fsdp_float8_all_gather}, **args)
|
|
||||||
if additional_args:
|
|
||||||
args.update(additional_args)
|
|
||||||
|
|
||||||
# create accelerator object
|
|
||||||
self.accelerator = Accelerator(**args)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_create_accelerate_code() -> str:
|
|
||||||
training_loop = inspect.getsource(Trainer.create_accelerator_and_postprocess)
|
|
||||||
return training_loop
|
|
||||||
|
|
||||||
|
|
||||||
def check_create_accelerate_code_is_patchable() -> bool:
|
|
||||||
create_code = get_create_accelerate_code()
|
|
||||||
create_code, _ = detab_code(create_code)
|
|
||||||
return ORIGINAL_TRAINER_CODE in create_code
|
|
||||||
|
|
||||||
|
|
||||||
def patch_create_accelerate_code_for_fp8(enable_fsdp_float8_all_gather: bool):
|
|
||||||
"""
|
|
||||||
Monkeypatch create_accelerator_and_postprocess so it checks for additional kwargs.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
create_code = get_create_accelerate_code()
|
|
||||||
except OSError:
|
|
||||||
return
|
|
||||||
Trainer._original_create_accelerator_and_postprocess = create_code
|
|
||||||
create_code, _ = detab_code(create_code)
|
|
||||||
if ORIGINAL_TRAINER_CODE not in create_code:
|
|
||||||
return
|
|
||||||
|
|
||||||
patched_trainer_code = PATCHED_TRAINER_CODE.format(
|
|
||||||
enable_fsdp_float8_all_gather=enable_fsdp_float8_all_gather
|
|
||||||
)
|
|
||||||
create_code = create_code.replace(ORIGINAL_TRAINER_CODE, patched_trainer_code)
|
|
||||||
create_code = create_code.replace(
|
|
||||||
"def create_accelerator_and_postprocess(",
|
|
||||||
"def fixed_create_accelerator_and_postprocess(",
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
|
|
||||||
# load imports necessary
|
|
||||||
import transformers.trainer
|
|
||||||
|
|
||||||
items_to_import = []
|
|
||||||
for item in dir(transformers.trainer):
|
|
||||||
if item in create_code:
|
|
||||||
items_to_import.append(item)
|
|
||||||
|
|
||||||
exec(
|
|
||||||
"from transformers.trainer import ("
|
|
||||||
+ ", ".join(x for x in items_to_import)
|
|
||||||
+ ")",
|
|
||||||
globals(),
|
|
||||||
)
|
|
||||||
exec(create_code, globals())
|
|
||||||
LOG.info("patching create_accelerator_and_postprocess to allow for overrides")
|
|
||||||
Trainer.create_accelerator_and_postprocess = (
|
|
||||||
fixed_create_accelerator_and_postprocess
|
|
||||||
)
|
|
||||||
@@ -247,7 +247,7 @@ def drop_long_seq(sample, sequence_len=2048, min_sequence_len=2, raise_on_drop=F
|
|||||||
|
|
||||||
|
|
||||||
def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
||||||
drop_attn_mask = cfg.model_config_type in ["mamba", "gemma3", "gemma3_text"]
|
drop_attn_mask = cfg.model_config_type in ["mamba", "gemma3"]
|
||||||
if drop_attn_mask:
|
if drop_attn_mask:
|
||||||
LOG.info("dropping attention_mask column")
|
LOG.info("dropping attention_mask column")
|
||||||
train_dataset = train_dataset.remove_columns("attention_mask")
|
train_dataset = train_dataset.remove_columns("attention_mask")
|
||||||
|
|||||||
Reference in New Issue
Block a user