Merge pull request #313 from OpenAccess-AI-Collective/tokenizer-llama2-embeddings

don't resize embeddings to multiples of 32x by default
This commit is contained in:
Wing Lian
2023-07-22 04:09:59 -04:00
committed by GitHub
2 changed files with 8 additions and 1 deletions

View File

@@ -301,7 +301,11 @@ def load_model(
**model_kwargs,
)
embeddings_len = math.ceil(len(tokenizer) / 32) * 32
embeddings_len = (
math.ceil(len(tokenizer) / 32) * 32
if cfg.resize_token_embeddings_to_32x
else len(tokenizer)
)
model.resize_token_embeddings(embeddings_len)
if (