support to disable exllama for gptq (#604)
* support to disable exllama for gptq * update property instead of item * fix config key
This commit is contained in:
@@ -196,6 +196,10 @@ def load_model(
|
||||
if not hasattr(model_config, "quantization_config"):
|
||||
LOG.warning("model config does not contain quantization_config information")
|
||||
else:
|
||||
if cfg.gptq_disable_exllama is not None:
|
||||
model_config.quantization_config[
|
||||
"disable_exllama"
|
||||
] = cfg.gptq_disable_exllama
|
||||
model_kwargs["quantization_config"] = GPTQConfig(
|
||||
**model_config.quantization_config
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user