diff --git a/configs/galactica_1_3B.yml b/configs/galactica_1_3B.yml
new file mode 100644
index 000000000..ed722f34e
--- /dev/null
+++ b/configs/galactica_1_3B.yml
@@ -0,0 +1,41 @@
+base_model: facebook/galactica-1.3b
+model_type: AutoModelForCausalLM
+tokenizer_type: AutoTokenizer
+load_in_8bit: false
+datasets:
+ - path: tatsu-lab/alpaca
+ type: alpaca
+dataset_prepared_path: last_run_prepared
+val_set_size: 0.1
+adapter:
+lora_model_dir:
+sequence_len: 1024
+max_packed_sequence_len: 1024
+lora_r: 8
+lora_alpha: 16
+lora_dropout: 0.05
+lora_target_modules:
+ - q_proj
+ - v_proj
+lora_fan_in_fan_out: false
+wandb_project:
+wandb_watch:
+wandb_run_id:
+wandb_log_model: checkpoint
+output_dir: ./lora-llama-alpaca
+batch_size: 32
+micro_batch_size: 16
+num_epochs: 3
+learning_rate: 0.00003
+train_on_inputs: false
+group_by_length: false
+bf16: false
+tf32: false
+early_stopping_patience:
+resume_from_checkpoint:
+local_rank:
+special_tokens:
+ pad_token: "[PAD]"
+ bos_token: ""
+ eos_token: ""
+ unk_token: ""
diff --git a/src/axolotl/utils/data.py b/src/axolotl/utils/data.py
index 0804312e7..d315da98c 100644
--- a/src/axolotl/utils/data.py
+++ b/src/axolotl/utils/data.py
@@ -31,7 +31,7 @@ def load_prepare_datasets(tokenizer, cfg, default_dataset_prepared_path):
ds_hash = str(
md5(
(
- str(max_packed_sequence_len)
+ str(cfg.sequence_len)
+ "@"
+ "|".join(sorted([f"{d.path}:{d.type}" for d in cfg.datasets]))
).encode("utf-8")
@@ -114,21 +114,24 @@ def load_prepare_datasets(tokenizer, cfg, default_dataset_prepared_path):
datasets.append(ds_wrapper)
else:
logging.error(f"unhandled prompt tokenization strategy: {d.type}")
- logging.info("merging and shuffling master dataset")
+ logging.info("tokenizing, merging, and shuffling master dataset")
- dataset = concatenate_datasets(datasets).shuffle(seed=42)
+ samples = []
+ for d in datasets:
+ samples = samples + [i for i in d]
+ dataset = Dataset.from_list(samples).shuffle(seed=42)
if cfg.local_rank == 0:
logging.info(f"Saving merged prepared dataset to disk... {prepared_ds_path}")
dataset.save_to_disk(prepared_ds_path)
- if cfg.max_packed_sequence_len is not None:
- constant_len_dataset = ConstantLengthDataset(
- tokenizer,
- [dataset],
- seq_length=max_packed_sequence_len,
- )
- logging.info("packing master dataset")
- dataset = Dataset.from_list([_ for _ in constant_len_dataset])
+ if cfg.max_packed_sequence_len is not None:
+ constant_len_dataset = ConstantLengthDataset(
+ tokenizer,
+ [dataset],
+ seq_length=max_packed_sequence_len,
+ )
+ logging.info(f"packing master dataset to len: {cfg.max_packed_sequence_len}")
+ dataset = Dataset.from_list([_ for _ in constant_len_dataset])
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
logging.info(f"Using index #{cfg.dataset_shard_idx} of {cfg.dataset_shard_num} shards")
diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py
index 6d7cbfb52..a14f89b19 100644
--- a/src/axolotl/utils/models.py
+++ b/src/axolotl/utils/models.py
@@ -161,6 +161,10 @@ def load_model(
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
os.environ["TOKENIZERS_PARALLELISM"] = "false"
+ if cfg.special_tokens:
+ for k, v in cfg.special_tokens.items():
+ setattr(tokenizer, k, v)
+
if load_in_8bit and not cfg.load_4bit:
logging.info("converting model w/ prepare_model_for_int8_training")
model = prepare_model_for_int8_training(model)