From caa49a9d7d266a59e50d17c6bd882cf8982218d8 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Thu, 6 Feb 2025 00:12:14 +0700 Subject: [PATCH] fix: use existing model config --- src/axolotl/cli/convert_linear_attention.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/axolotl/cli/convert_linear_attention.py b/src/axolotl/cli/convert_linear_attention.py index dae7d7bb0..5045b3545 100644 --- a/src/axolotl/cli/convert_linear_attention.py +++ b/src/axolotl/cli/convert_linear_attention.py @@ -49,12 +49,9 @@ def do_linearize(cfg: DictDefault, cli_args: TrainerCliArgs) -> None: for p in model.parameters(): p.requires_grad = False - # load config - base_config = load_model_config(cfg) - # convert to linear llama linear_llama_config = LinearLlamaConfig.from_llama( - base_config, cfg.attention_config + model.config, cfg.attention_config ) model = LinearLlamaForCausalLM.from_llama( model, config=linear_llama_config, train_attention=True