feat: add gemma3_text attention handling for lora kernels (#3103)

This commit is contained in:
NanoCode012
2025-08-26 16:47:26 +07:00
committed by GitHub
parent 79ddaebe9a
commit 0de254a0d0

View File

@@ -149,6 +149,11 @@ def get_attention_cls_from_config(cfg: DictDefault) -> Type[nn.Module]:
return MistralAttention
if model_type == "gemma3_text":
from transformers.models.gemma3.modeling_gemma3 import Gemma3Attention
return Gemma3Attention
try:
# Dynamically import the module and attention class
module_path = f"transformers.models.{model_type}.modeling_{model_type}"