Feat: add peft_ensure_weight_tying (#3278)

* feat: upgrade peft to 0.18.0

* feat: add peft_ensure_weight_tying

* fix: default

* chore: adjust kwarg per feedback
This commit is contained in:
NanoCode012
2025-11-28 18:54:48 +07:00
committed by GitHub
parent 8990ca3205
commit b234532d9f
3 changed files with 12 additions and 1 deletions

View File

@@ -11,7 +11,7 @@ liger-kernel==0.6.3
packaging==23.2
huggingface_hub>=0.36.0
peft>=0.17.1
peft>=0.18.0
tokenizers>=0.22.1
transformers==4.57.1
accelerate==1.11.0

View File

@@ -102,6 +102,8 @@ def load_lora(
lora_config_kwargs["layer_replication"] = cfg.peft_layer_replication
if cfg.peft_trainable_token_indices:
lora_config_kwargs["trainable_token_indices"] = cfg.peft_trainable_token_indices
if cfg.peft_ensure_weight_tying is not None:
lora_config_kwargs["ensure_weight_tying"] = cfg.peft_ensure_weight_tying
# Determine the correct PEFT task type
model_cls = type(model).__name__

View File

@@ -100,6 +100,15 @@ class LoraConfig(BaseModel):
)
},
)
peft_ensure_weight_tying: bool | None = Field(
default=None,
json_schema_extra={
"description": (
"Whether to tie adapter weights for tied model weights. "
"See https://github.com/huggingface/peft/issues/2864"
)
},
)
qlora_sharded_model_loading: bool | None = Field(
default=False,