don't use llama if trust_remote_code is set since that needs to use AutoModel path

This commit is contained in:
Wing Lian
2023-07-07 21:31:02 -04:00
parent a692ad3f4c
commit 66afb76a15

View File

@@ -202,7 +202,7 @@ def load_model(
else True,
)
load_in_8bit = False
elif cfg.is_llama_derived_model:
elif cfg.is_llama_derived_model and not cfg.trust_remote_code:
from transformers import LlamaForCausalLM
config = LlamaConfig.from_pretrained(base_model_config)