don't resize embeddings to multiples of 32x by default

This commit is contained in:
Wing Lian
2023-07-22 01:52:38 -04:00
parent 1b63bf13bc
commit 1066751358
2 changed files with 8 additions and 1 deletions

View File

@@ -301,7 +301,11 @@ def load_model(
**model_kwargs,
)
embeddings_len = math.ceil(len(tokenizer) / 32) * 32
embeddings_len = (
math.ceil(len(tokenizer) / 32) * 32
if cfg.resize_token_embeddings_to_32x
else len(tokenizer)
)
model.resize_token_embeddings(embeddings_len)
if (