drop valueerror as this was from when 4bit required gptq
This commit is contained in:
@@ -96,12 +96,6 @@ def check_model_config(cfg: DictDefault, model_config: Union[AutoConfig, DictDef
|
||||
"Please make sure to point to a GPTQ model."
|
||||
)
|
||||
|
||||
if not cfg.gptq and quant_config_exists and not cfg.load_in_4bit:
|
||||
raise ValueError(
|
||||
"model_config.quantization_config is set but `gptq` flag is not. "
|
||||
"Please use the `gptq` flag to train quantized model or point to a non-quantized model."
|
||||
)
|
||||
|
||||
lora_modules_to_save = get_linear_embedding_layers(model_config.model_type)
|
||||
if (
|
||||
cfg.adapter
|
||||
|
||||
Reference in New Issue
Block a user