fix: allow merge lora on pre-quantized model (#2511)

* fix: allow merge lora on pre-quantized model

* fix: remove unused sections per comment
This commit is contained in:
NanoCode012
2025-04-10 01:01:42 +07:00
committed by GitHub
parent 9f986f5e71
commit e0420b3528
2 changed files with 7 additions and 7 deletions

View File

@@ -32,7 +32,13 @@ def do_merge_lora(*, cfg: DictDefault) -> None:
LOG.info("Running merge of LoRA with base model...")
model = model.merge_and_unload(progressbar=True)
model.to(dtype=cfg.torch_dtype)
try:
model.to(dtype=cfg.torch_dtype)
except ValueError as e:
LOG.warning("Failed to convert model to dtype %s", cfg.torch_dtype)
LOG.warning("Ignore this if the base_model is pre-quantized.")
LOG.warning("Error raised: %s", e)
model.generation_config.do_sample = True
if cfg.local_rank == 0:

View File

@@ -151,12 +151,6 @@ def check_model_config(cfg: DictDefault, model_config: PretrainedConfig):
"Please make sure to point to a GPTQ model."
)
if not cfg.gptq and quant_config_exists and not cfg.load_in_4bit:
raise ValueError(
"model_config.quantization_config is set but `gptq` flag is not. "
"Please use the `gptq` flag to train quantized model or point to a non-quantized model."
)
lora_modules_to_save = get_linear_embedding_layers(model_config.model_type)
if (
cfg.adapter