casts the prepared data to int16 (doesn't help with training memory)

This commit is contained in:
Wing Lian
2023-04-17 21:36:02 -04:00
parent 120e7df7df
commit 2db9436410
2 changed files with 13 additions and 7 deletions

View File

@@ -14,7 +14,6 @@ import transformers
import yaml
from attrdict import AttrDefault
from datasets import load_dataset, IterableDataset, Dataset, load_from_disk
from huggingface_hub.hf_api import DatasetInfo
from torch import nn
from transformers import (
AutoModelForCausalLM,
@@ -169,7 +168,7 @@ def load_model(base_model, base_model_config, model_type, tokenizer_type, cfg, a
if cfg.load_4bit:
# Scales to half
print('Fitting 4bit scales and zeros to half')
logging.info('Fitting 4bit scales and zeros to half')
for n, m in model.named_modules():
if 'Autograd4bitQuantLinear' in str(type(m)) or 'Linear4bitLt' in str(type(m)):
if hasattr(m, "is_v1_model") and m.is_v1_model:

View File

@@ -30,7 +30,6 @@ class TokenizedPromptDataset(IterableDataset):
except InvalidDataException:
pass
# TODO this isn't the best since it can't interleave datasets
class ConstantLengthDataset(IterableDataset):
"""
@@ -40,7 +39,6 @@ class ConstantLengthDataset(IterableDataset):
dataset (dataset.Dataset): Dataset with text files.
seq_length (int): Length of token sequences to return.
"""
def __init__(
self,
tokenizer,
@@ -52,6 +50,15 @@ class ConstantLengthDataset(IterableDataset):
self.datasets: List[IterableDataset] = datasets
self.seq_length = seq_length
vocab_size = len(tokenizer.get_vocab())
if vocab_size <= torch.iinfo(torch.int16).max:
self.tokens_dtype = torch.int16
elif vocab_size <= torch.iinfo(torch.int32).max:
self.tokens_dtype = torch.int32
else:
self.tokens_dtype = torch.int64
def __iter__(self):
buffer = {"input_ids": [], "attention_mask": [], "labels": []}
buffer_len = 0
@@ -105,11 +112,11 @@ class ConstantLengthDataset(IterableDataset):
attention_mask.append(1)
labels.append(self.concat_token_id)
input_ids_with_concat = torch.tensor(input_ids, dtype=torch.long)
input_ids_with_concat = torch.tensor(input_ids, dtype=self.tokens_dtype)
attention_mask_with_concat = torch.tensor(
attention_mask, dtype=torch.long
attention_mask, dtype=self.tokens_dtype
)
labels_with_concat = torch.tensor(labels, dtype=torch.long)
labels_with_concat = torch.tensor(labels, dtype=self.tokens_dtype)
buffer["input_ids"].append(input_ids_with_concat)
buffer["attention_mask"].append(attention_mask_with_concat)