fix conditional check to prevent always using 4bit

This commit is contained in:
Wing Lian
2023-04-18 00:35:03 -04:00
parent 69164da079
commit 8f36f3cd5a

View File

@@ -85,7 +85,7 @@ def load_model(base_model, base_model_config, model_type, tokenizer_type, cfg, a
raise e
try:
if cfg.load_4bit and "llama" in base_model or "llama" in cfg.model_type.lower():
if cfg.load_4bit and ("llama" in base_model or "llama" in cfg.model_type.lower()):
from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
from huggingface_hub import snapshot_download