From bdbca8fa6ca01f9e668b1fa886853b247646b2e7 Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Mon, 15 May 2023 14:07:17 -0400 Subject: [PATCH] more fixes --- scripts/finetune.py | 6 +++--- src/axolotl/utils/models.py | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/scripts/finetune.py b/scripts/finetune.py index d6a920f5d..ee055559f 100644 --- a/scripts/finetune.py +++ b/scripts/finetune.py @@ -1,7 +1,6 @@ import importlib import logging import os -import pathlib import random import signal import sys @@ -10,7 +9,6 @@ from typing import Optional import fire import torch -import transformers import yaml from attrdict import AttrDefault @@ -236,7 +234,9 @@ def train( logging.info(f"Training Completed!!! Saving pre-trained model to {cfg.output_dir}") # TODO do we need this fix? https://huggingface.co/docs/accelerate/usage_guides/fsdp#saving-and-loading - model.save_pretrained(cfg.output_dir) + # only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file + if cfg.local_rank == 0: + model.save_pretrained(cfg.output_dir) # trainer.save_model(cfg.output_dir) # TODO this may be needed for deepspeed to work? need to review another time diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index 4d7a45920..4e3b4efd6 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -1,4 +1,5 @@ import logging +import math import os from pathlib import Path from typing import Optional, Tuple, TYPE_CHECKING @@ -180,12 +181,14 @@ def load_model( tokenizer.add_special_tokens({"pad_token": "[PAD]"}) os.environ["TOKENIZERS_PARALLELISM"] = "false" - if cfg.tokens: - for k, v in cfg.tokens.items(): + if cfg.special_tokens: + for k, v in cfg.special_tokens.items(): tokenizer.add_special_tokens({k: v}) + if cfg.tokens: + tokenizer.add_tokens(cfg.tokens) - # this should only be needed if you are messing with new tokens in the vocab - # model.resize_token_embeddings(len(tokenizer)) + embeddings_len = math.ceil(len(tokenizer) / 32) * 32 + model.resize_token_embeddings(embeddings_len) if cfg.adapter and load_in_8bit and not cfg.load_4bit: logging.info("converting PEFT model w/ prepare_model_for_int8_training") @@ -221,6 +224,7 @@ def load_model( requires_grad.append(f"{name}: {param.requires_grad}") if len(requires_grad) == 0: logging.warning("there are no parameters that require gradient updates") + model.config.use_cache = False # TODO resume_from_checkpoint handling return model, tokenizer, lora_config