fix for quant config from model (#540)

This commit is contained in:
Wing Lian
2023-09-10 12:40:52 -04:00
committed by GitHub
parent c1921c9acb
commit a94f9cb99e

View File

@@ -160,7 +160,7 @@ def load_model(
model_kwargs["revision"] = cfg.model_revision
if cfg.gptq:
model_config = load_model_config(cfg)
if hasattr(model_config, "quantization_config"):
if not hasattr(model_config, "quantization_config"):
LOG.warning("model config does not contain quantization_config information")
else:
model_kwargs["quantization_config"] = GPTQConfig(