Compare commits
1 Commits
multipack-
...
sdpa-multi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a538be9c2 |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
# These are supported funding model platforms
|
# These are supported funding model platforms
|
||||||
|
|
||||||
github: [winglian, OpenAccess-AI-Collective] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
github: OpenAccess-AI-Collective # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||||
patreon: # Replace with a single Patreon username
|
patreon: # Replace with a single Patreon username
|
||||||
open_collective: # Replace with a single Open Collective username
|
open_collective: # Replace with a single Open Collective username
|
||||||
ko_fi: axolotl_ai # Replace with a single Ko-fi username
|
ko_fi: axolotl_ai # Replace with a single Ko-fi username
|
||||||
|
|||||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -73,7 +73,7 @@ jobs:
|
|||||||
- cuda: 121
|
- cuda: 121
|
||||||
cuda_version: 12.1.0
|
cuda_version: 12.1.0
|
||||||
python_version: "3.10"
|
python_version: "3.10"
|
||||||
pytorch: 2.1.2
|
pytorch: 2.1.1
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
17
README.md
17
README.md
@@ -607,17 +607,6 @@ datasets:
|
|||||||
# For `completion` datsets only, uses the provided field instead of `text` column
|
# For `completion` datsets only, uses the provided field instead of `text` column
|
||||||
field:
|
field:
|
||||||
|
|
||||||
# A list of one or more datasets to eval the model with.
|
|
||||||
# You can use either test_datasets, or val_set_size, but not both.
|
|
||||||
test_datasets:
|
|
||||||
- path: /workspace/data/eval.jsonl
|
|
||||||
ds_type: json
|
|
||||||
# You need to specify a split. For "json" datasets the default split is called "train".
|
|
||||||
split: train
|
|
||||||
type: completion
|
|
||||||
data_files:
|
|
||||||
- /workspace/data/eval.jsonl
|
|
||||||
|
|
||||||
# use RL training: dpo, ipo, kto_pair
|
# use RL training: dpo, ipo, kto_pair
|
||||||
rl:
|
rl:
|
||||||
|
|
||||||
@@ -707,12 +696,6 @@ lora_modules_to_save:
|
|||||||
|
|
||||||
lora_fan_in_fan_out: false
|
lora_fan_in_fan_out: false
|
||||||
|
|
||||||
peft:
|
|
||||||
# Configuration options for loftq initialization for LoRA
|
|
||||||
# https://huggingface.co/docs/peft/developer_guides/quantization#loftq-initialization
|
|
||||||
loftq_config:
|
|
||||||
loftq_bits: # typically 4 bits
|
|
||||||
|
|
||||||
# ReLoRA configuration
|
# ReLoRA configuration
|
||||||
# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed
|
# Must use either 'lora' or 'qlora' adapter, and does not support fsdp or deepspeed
|
||||||
relora_steps: # Number of steps per ReLoRA restart
|
relora_steps: # Number of steps per ReLoRA restart
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ val_set_size: 0.05
|
|||||||
adapter: qlora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
sequence_len: 2048
|
sequence_len: 2048
|
||||||
|
max_packed_sequence_len: 2048
|
||||||
lora_r: 16
|
lora_r: 16
|
||||||
lora_alpha: 32
|
lora_alpha: 32
|
||||||
lora_dropout: 0.05
|
lora_dropout: 0.05
|
||||||
|
|||||||
@@ -67,3 +67,6 @@ weight_decay: 0.1
|
|||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
|
|||||||
@@ -1,70 +0,0 @@
|
|||||||
base_model: NousResearch/Llama-2-7b-hf
|
|
||||||
model_type: LlamaForCausalLM
|
|
||||||
tokenizer_type: LlamaTokenizer
|
|
||||||
is_llama_derived_model: true
|
|
||||||
|
|
||||||
load_in_8bit: false
|
|
||||||
load_in_4bit: false
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
datasets:
|
|
||||||
- path: mhenrichsen/alpaca_2k_test
|
|
||||||
type: alpaca
|
|
||||||
dataset_prepared_path:
|
|
||||||
val_set_size: 0.05
|
|
||||||
output_dir: ./lora-out
|
|
||||||
|
|
||||||
sequence_len: 4096
|
|
||||||
sample_packing: true
|
|
||||||
pad_to_sequence_len: true
|
|
||||||
|
|
||||||
adapter: lora
|
|
||||||
lora_model_dir:
|
|
||||||
lora_r: 32
|
|
||||||
lora_alpha: 16
|
|
||||||
lora_dropout: 0.05
|
|
||||||
lora_target_linear: true
|
|
||||||
lora_fan_in_fan_out:
|
|
||||||
peft:
|
|
||||||
loftq_config:
|
|
||||||
loftq_bits: 4
|
|
||||||
|
|
||||||
wandb_project:
|
|
||||||
wandb_entity:
|
|
||||||
wandb_watch:
|
|
||||||
wandb_name:
|
|
||||||
wandb_log_model:
|
|
||||||
|
|
||||||
gradient_accumulation_steps: 4
|
|
||||||
micro_batch_size: 2
|
|
||||||
num_epochs: 4
|
|
||||||
optimizer: adamw_bnb_8bit
|
|
||||||
lr_scheduler: cosine
|
|
||||||
learning_rate: 0.0002
|
|
||||||
|
|
||||||
train_on_inputs: false
|
|
||||||
group_by_length: false
|
|
||||||
bf16: auto
|
|
||||||
fp16:
|
|
||||||
tf32: false
|
|
||||||
|
|
||||||
gradient_checkpointing: true
|
|
||||||
early_stopping_patience:
|
|
||||||
resume_from_checkpoint:
|
|
||||||
local_rank:
|
|
||||||
logging_steps: 1
|
|
||||||
xformers_attention:
|
|
||||||
flash_attention: true
|
|
||||||
s2_attention:
|
|
||||||
|
|
||||||
warmup_steps: 10
|
|
||||||
evals_per_epoch: 4
|
|
||||||
eval_table_size:
|
|
||||||
eval_table_max_new_tokens: 128
|
|
||||||
saves_per_epoch: 1
|
|
||||||
debug:
|
|
||||||
deepspeed:
|
|
||||||
weight_decay: 0.0
|
|
||||||
fsdp:
|
|
||||||
fsdp_config:
|
|
||||||
special_tokens:
|
|
||||||
@@ -65,3 +65,6 @@ weight_decay: 0.0
|
|||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
|
|||||||
@@ -65,3 +65,6 @@ weight_decay: 0.0
|
|||||||
fsdp:
|
fsdp:
|
||||||
fsdp_config:
|
fsdp_config:
|
||||||
special_tokens:
|
special_tokens:
|
||||||
|
bos_token: "<s>"
|
||||||
|
eos_token: "</s>"
|
||||||
|
unk_token: "<unk>"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
--extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
||||||
packaging==23.2
|
packaging==23.2
|
||||||
peft @ git+https://github.com/huggingface/peft.git
|
peft==0.7.1
|
||||||
transformers==4.37.0
|
transformers==4.37.0
|
||||||
tokenizers==0.15.0
|
tokenizers==0.15.0
|
||||||
bitsandbytes>=0.41.1
|
bitsandbytes>=0.41.1
|
||||||
|
|||||||
3
setup.py
3
setup.py
@@ -27,7 +27,6 @@ def parse_requirements():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
torch_version = version("torch")
|
torch_version = version("torch")
|
||||||
_install_requires.append(f"torch=={torch_version}")
|
|
||||||
if torch_version.startswith("2.1."):
|
if torch_version.startswith("2.1."):
|
||||||
_install_requires.pop(_install_requires.index("xformers==0.0.22"))
|
_install_requires.pop(_install_requires.index("xformers==0.0.22"))
|
||||||
_install_requires.append("xformers>=0.0.23")
|
_install_requires.append("xformers>=0.0.23")
|
||||||
@@ -51,7 +50,7 @@ setup(
|
|||||||
dependency_links=dependency_links,
|
dependency_links=dependency_links,
|
||||||
extras_require={
|
extras_require={
|
||||||
"flash-attn": [
|
"flash-attn": [
|
||||||
"flash-attn==2.5.0",
|
"flash-attn==2.3.3",
|
||||||
],
|
],
|
||||||
"fused-dense-lib": [
|
"fused-dense-lib": [
|
||||||
"fused-dense-lib @ git+https://github.com/Dao-AILab/flash-attention@v2.3.3#subdirectory=csrc/fused_dense_lib",
|
"fused-dense-lib @ git+https://github.com/Dao-AILab/flash-attention@v2.3.3#subdirectory=csrc/fused_dense_lib",
|
||||||
|
|||||||
@@ -8,17 +8,15 @@ import importlib
|
|||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
import sys
|
import sys
|
||||||
import typing
|
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from functools import wraps, partial
|
from functools import wraps
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Optional, Tuple, Type, Union
|
from typing import List, Optional, Type, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
from torch import nn
|
|
||||||
from torch.optim.lr_scheduler import OneCycleLR
|
from torch.optim.lr_scheduler import OneCycleLR
|
||||||
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
|
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
|
||||||
from transformers import (
|
from transformers import (
|
||||||
@@ -31,7 +29,6 @@ from transformers.trainer_utils import seed_worker
|
|||||||
from trl import DPOTrainer
|
from trl import DPOTrainer
|
||||||
|
|
||||||
from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
|
from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
|
||||||
from axolotl.monkeypatch.utils import get_cu_seqlens_from_pos_ids
|
|
||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
EvalFirstStepCallback,
|
EvalFirstStepCallback,
|
||||||
GPUStatsCallback,
|
GPUStatsCallback,
|
||||||
@@ -53,39 +50,15 @@ from axolotl.utils.schedulers import (
|
|||||||
get_cosine_schedule_with_min_lr,
|
get_cosine_schedule_with_min_lr,
|
||||||
get_cosine_schedule_with_quadratic_warmup,
|
get_cosine_schedule_with_quadratic_warmup,
|
||||||
)
|
)
|
||||||
from axolotl.utils.tensors import keep_unpacked_data, split_and_pad_packed
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torch._dynamo # pylint: disable=ungrouped-imports
|
import torch._dynamo # pylint: disable=ungrouped-imports
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
|
||||||
# hacky, but recommended per https://github.com/python/mypy/issues/5837
|
|
||||||
_MixinTrainerBase = Trainer
|
|
||||||
else:
|
|
||||||
_MixinTrainerBase = object
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.core.trainer_builder")
|
LOG = logging.getLogger("axolotl.core.trainer_builder")
|
||||||
|
|
||||||
|
|
||||||
def _sanitize_kwargs_for_tagging(tag_names, kwargs=None):
|
|
||||||
if isinstance(tag_names, str):
|
|
||||||
tag_names = [tag_names]
|
|
||||||
|
|
||||||
if kwargs is not None:
|
|
||||||
if "tags" not in kwargs:
|
|
||||||
kwargs["tags"] = tag_names
|
|
||||||
elif "tags" in kwargs and isinstance(kwargs["tags"], list):
|
|
||||||
kwargs["tags"].extend(tag_names)
|
|
||||||
elif "tags" in kwargs and isinstance(kwargs["tags"], str):
|
|
||||||
tag_names.append(kwargs["tags"])
|
|
||||||
kwargs["tags"] = tag_names
|
|
||||||
|
|
||||||
return kwargs
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class AxolotlTrainingArguments(TrainingArguments):
|
class AxolotlTrainingArguments(TrainingArguments):
|
||||||
"""
|
"""
|
||||||
@@ -164,142 +137,7 @@ class AxolotlTrainingArguments(TrainingArguments):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AxolotlMultiPackTrainerMixin(_MixinTrainerBase): # type: ignore
|
class AxolotlTrainer(Trainer):
|
||||||
"""Trainer Mixin class for dataloaders and samplers"""
|
|
||||||
|
|
||||||
args = None # type: AxolotlTrainingArguments
|
|
||||||
|
|
||||||
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
|
|
||||||
if self.args.sample_packing and not self.args.pretraining:
|
|
||||||
return MultipackBatchSampler(
|
|
||||||
RandomSampler(self.train_dataset),
|
|
||||||
self.args.train_batch_size,
|
|
||||||
drop_last=True,
|
|
||||||
batch_max_len=self._train_batch_size * self.args.max_seq_length,
|
|
||||||
lengths=get_dataset_lengths(self.train_dataset),
|
|
||||||
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
|
||||||
)
|
|
||||||
return super()._get_train_sampler()
|
|
||||||
|
|
||||||
def get_train_dataloader(self) -> DataLoader:
|
|
||||||
if self.args.sample_packing and not self.args.pretraining:
|
|
||||||
train_dataset = self.train_dataset
|
|
||||||
if "length" in train_dataset.features.keys():
|
|
||||||
train_dataset = train_dataset.remove_columns(["length"])
|
|
||||||
data_collator = self.data_collator
|
|
||||||
dataloader_params = {
|
|
||||||
"batch_size": self._train_batch_size,
|
|
||||||
"collate_fn": data_collator,
|
|
||||||
"num_workers": self.args.dataloader_num_workers,
|
|
||||||
"pin_memory": self.args.dataloader_pin_memory,
|
|
||||||
}
|
|
||||||
if self.args.dataloader_prefetch_factor:
|
|
||||||
dataloader_params[
|
|
||||||
"prefetch_factor"
|
|
||||||
] = self.args.dataloader_prefetch_factor
|
|
||||||
|
|
||||||
sampler = self._get_train_sampler()
|
|
||||||
if isinstance(sampler, BatchSampler):
|
|
||||||
dataloader_params["batch_sampler"] = sampler
|
|
||||||
del dataloader_params["batch_size"]
|
|
||||||
else:
|
|
||||||
dataloader_params["sampler"] = sampler
|
|
||||||
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
|
||||||
dataloader_params["worker_init_fn"] = seed_worker
|
|
||||||
|
|
||||||
self.accelerator.even_batches = False
|
|
||||||
return self.accelerator.prepare_data_loader(
|
|
||||||
DataLoader(train_dataset, **dataloader_params)
|
|
||||||
)
|
|
||||||
return super().get_train_dataloader()
|
|
||||||
|
|
||||||
def _get_eval_sampler(
|
|
||||||
self, eval_dataset: Dataset
|
|
||||||
) -> Optional[torch.utils.data.Sampler]:
|
|
||||||
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
|
||||||
return MultipackBatchSampler(
|
|
||||||
SequentialSampler(eval_dataset),
|
|
||||||
self.args.per_device_eval_batch_size,
|
|
||||||
drop_last=True,
|
|
||||||
batch_max_len=self.args.eval_batch_size * self.args.max_seq_length,
|
|
||||||
lengths=get_dataset_lengths(eval_dataset),
|
|
||||||
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
|
||||||
)
|
|
||||||
return super()._get_eval_sampler(eval_dataset)
|
|
||||||
|
|
||||||
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
|
||||||
if self.args.sample_packing and self.args.eval_sample_packing is False:
|
|
||||||
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
|
||||||
self.eval_data_collator
|
|
||||||
)
|
|
||||||
dataloader = super().get_eval_dataloader(eval_dataset)
|
|
||||||
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
|
||||||
self.train_data_collator
|
|
||||||
)
|
|
||||||
return dataloader
|
|
||||||
|
|
||||||
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
|
||||||
eval_dataset = (
|
|
||||||
eval_dataset if eval_dataset is not None else self.eval_dataset
|
|
||||||
)
|
|
||||||
|
|
||||||
eval_sampler = self._get_eval_sampler(eval_dataset)
|
|
||||||
eval_dataset = eval_dataset.remove_columns(["length"])
|
|
||||||
data_collator = self.data_collator
|
|
||||||
dataloader_params = {
|
|
||||||
"batch_size": self.args.eval_batch_size,
|
|
||||||
"collate_fn": data_collator,
|
|
||||||
"num_workers": self.args.dataloader_num_workers,
|
|
||||||
"pin_memory": self.args.dataloader_pin_memory,
|
|
||||||
}
|
|
||||||
if self.args.dataloader_prefetch_factor:
|
|
||||||
dataloader_params[
|
|
||||||
"prefetch_factor"
|
|
||||||
] = self.args.dataloader_prefetch_factor
|
|
||||||
|
|
||||||
if isinstance(eval_sampler, BatchSampler):
|
|
||||||
dataloader_params["batch_sampler"] = eval_sampler
|
|
||||||
del dataloader_params["batch_size"]
|
|
||||||
else:
|
|
||||||
dataloader_params["sampler"] = eval_sampler
|
|
||||||
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
|
||||||
|
|
||||||
self.accelerator.even_batches = False
|
|
||||||
return self.accelerator.prepare_data_loader(
|
|
||||||
DataLoader(eval_dataset, **dataloader_params)
|
|
||||||
)
|
|
||||||
|
|
||||||
return super().get_eval_dataloader(eval_dataset)
|
|
||||||
|
|
||||||
def _get_bench_sampler(
|
|
||||||
self, bench_dataset: Dataset
|
|
||||||
) -> Optional[torch.utils.data.Sampler]:
|
|
||||||
if self.args.world_size <= 1:
|
|
||||||
return SequentialSampler(bench_dataset)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_bench_dataloader(
|
|
||||||
self,
|
|
||||||
bench_dataset: Dataset,
|
|
||||||
) -> DataLoader:
|
|
||||||
dataloader_params = {
|
|
||||||
"batch_size": self.args.eval_batch_size,
|
|
||||||
"collate_fn": self.bench_data_collator,
|
|
||||||
"num_workers": self.args.dataloader_num_workers,
|
|
||||||
"pin_memory": self.args.dataloader_pin_memory,
|
|
||||||
}
|
|
||||||
if self.args.dataloader_prefetch_factor:
|
|
||||||
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
|
||||||
|
|
||||||
if not isinstance(bench_dataset, torch.utils.data.IterableDataset):
|
|
||||||
dataloader_params["sampler"] = self._get_bench_sampler(bench_dataset)
|
|
||||||
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
|
||||||
|
|
||||||
return DataLoader(bench_dataset, **dataloader_params)
|
|
||||||
# return self.accelerator.prepare(DataLoader(bench_dataset, **dataloader_params))
|
|
||||||
|
|
||||||
|
|
||||||
class AxolotlTrainer(AxolotlMultiPackTrainerMixin, Trainer):
|
|
||||||
"""
|
"""
|
||||||
Extend the base Trainer for axolotl helpers
|
Extend the base Trainer for axolotl helpers
|
||||||
"""
|
"""
|
||||||
@@ -373,6 +211,135 @@ class AxolotlTrainer(AxolotlMultiPackTrainerMixin, Trainer):
|
|||||||
|
|
||||||
return self.lr_scheduler
|
return self.lr_scheduler
|
||||||
|
|
||||||
|
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
|
||||||
|
if self.args.sample_packing and not self.args.pretraining:
|
||||||
|
return MultipackBatchSampler(
|
||||||
|
RandomSampler(self.train_dataset),
|
||||||
|
self.args.train_batch_size,
|
||||||
|
drop_last=True,
|
||||||
|
batch_max_len=self._train_batch_size * self.args.max_seq_length,
|
||||||
|
lengths=get_dataset_lengths(self.train_dataset),
|
||||||
|
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
||||||
|
)
|
||||||
|
return super()._get_train_sampler()
|
||||||
|
|
||||||
|
def _get_eval_sampler(
|
||||||
|
self, eval_dataset: Dataset
|
||||||
|
) -> Optional[torch.utils.data.Sampler]:
|
||||||
|
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
||||||
|
return MultipackBatchSampler(
|
||||||
|
SequentialSampler(eval_dataset),
|
||||||
|
self.args.per_device_eval_batch_size,
|
||||||
|
drop_last=True,
|
||||||
|
batch_max_len=self.args.eval_batch_size * self.args.max_seq_length,
|
||||||
|
lengths=get_dataset_lengths(eval_dataset),
|
||||||
|
packing_efficiency_estimate=self.args.sample_packing_efficiency,
|
||||||
|
)
|
||||||
|
return super()._get_eval_sampler(eval_dataset)
|
||||||
|
|
||||||
|
def get_train_dataloader(self) -> DataLoader:
|
||||||
|
if self.args.sample_packing and not self.args.pretraining:
|
||||||
|
train_dataset = self.train_dataset
|
||||||
|
if "length" in train_dataset.features.keys():
|
||||||
|
train_dataset = train_dataset.remove_columns(["length"])
|
||||||
|
data_collator = self.data_collator
|
||||||
|
dataloader_params = {
|
||||||
|
"batch_size": self._train_batch_size,
|
||||||
|
"collate_fn": data_collator,
|
||||||
|
"num_workers": self.args.dataloader_num_workers,
|
||||||
|
"pin_memory": self.args.dataloader_pin_memory,
|
||||||
|
}
|
||||||
|
if self.args.dataloader_prefetch_factor:
|
||||||
|
dataloader_params[
|
||||||
|
"prefetch_factor"
|
||||||
|
] = self.args.dataloader_prefetch_factor
|
||||||
|
|
||||||
|
sampler = self._get_train_sampler()
|
||||||
|
if isinstance(sampler, BatchSampler):
|
||||||
|
dataloader_params["batch_sampler"] = sampler
|
||||||
|
del dataloader_params["batch_size"]
|
||||||
|
else:
|
||||||
|
dataloader_params["sampler"] = sampler
|
||||||
|
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
||||||
|
dataloader_params["worker_init_fn"] = seed_worker
|
||||||
|
|
||||||
|
self.accelerator.even_batches = False
|
||||||
|
return self.accelerator.prepare_data_loader(
|
||||||
|
DataLoader(train_dataset, **dataloader_params)
|
||||||
|
)
|
||||||
|
return super().get_train_dataloader()
|
||||||
|
|
||||||
|
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
||||||
|
if self.args.sample_packing and self.args.eval_sample_packing is False:
|
||||||
|
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
||||||
|
self.eval_data_collator
|
||||||
|
)
|
||||||
|
dataloader = super().get_eval_dataloader(eval_dataset)
|
||||||
|
self.data_collator = ( # pylint: disable=attribute-defined-outside-init
|
||||||
|
self.train_data_collator
|
||||||
|
)
|
||||||
|
return dataloader
|
||||||
|
|
||||||
|
if self.args.sample_packing and self.args.eval_sample_packing is not False:
|
||||||
|
eval_dataset = (
|
||||||
|
eval_dataset if eval_dataset is not None else self.eval_dataset
|
||||||
|
)
|
||||||
|
|
||||||
|
eval_sampler = self._get_eval_sampler(eval_dataset)
|
||||||
|
eval_dataset = eval_dataset.remove_columns(["length"])
|
||||||
|
data_collator = self.data_collator
|
||||||
|
dataloader_params = {
|
||||||
|
"batch_size": self.args.eval_batch_size,
|
||||||
|
"collate_fn": data_collator,
|
||||||
|
"num_workers": self.args.dataloader_num_workers,
|
||||||
|
"pin_memory": self.args.dataloader_pin_memory,
|
||||||
|
}
|
||||||
|
if self.args.dataloader_prefetch_factor:
|
||||||
|
dataloader_params[
|
||||||
|
"prefetch_factor"
|
||||||
|
] = self.args.dataloader_prefetch_factor
|
||||||
|
|
||||||
|
if isinstance(eval_sampler, BatchSampler):
|
||||||
|
dataloader_params["batch_sampler"] = eval_sampler
|
||||||
|
del dataloader_params["batch_size"]
|
||||||
|
else:
|
||||||
|
dataloader_params["sampler"] = eval_sampler
|
||||||
|
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
||||||
|
|
||||||
|
self.accelerator.even_batches = False
|
||||||
|
return self.accelerator.prepare_data_loader(
|
||||||
|
DataLoader(eval_dataset, **dataloader_params)
|
||||||
|
)
|
||||||
|
|
||||||
|
return super().get_eval_dataloader(eval_dataset)
|
||||||
|
|
||||||
|
def _get_bench_sampler(
|
||||||
|
self, bench_dataset: Dataset
|
||||||
|
) -> Optional[torch.utils.data.Sampler]:
|
||||||
|
if self.args.world_size <= 1:
|
||||||
|
return SequentialSampler(bench_dataset)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_bench_dataloader(
|
||||||
|
self,
|
||||||
|
bench_dataset: Dataset,
|
||||||
|
) -> DataLoader:
|
||||||
|
dataloader_params = {
|
||||||
|
"batch_size": self.args.eval_batch_size,
|
||||||
|
"collate_fn": self.bench_data_collator,
|
||||||
|
"num_workers": self.args.dataloader_num_workers,
|
||||||
|
"pin_memory": self.args.dataloader_pin_memory,
|
||||||
|
}
|
||||||
|
if self.args.dataloader_prefetch_factor:
|
||||||
|
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
||||||
|
|
||||||
|
if not isinstance(bench_dataset, torch.utils.data.IterableDataset):
|
||||||
|
dataloader_params["sampler"] = self._get_bench_sampler(bench_dataset)
|
||||||
|
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
||||||
|
|
||||||
|
return DataLoader(bench_dataset, **dataloader_params)
|
||||||
|
# return self.accelerator.prepare(DataLoader(bench_dataset, **dataloader_params))
|
||||||
|
|
||||||
def compute_loss(self, model, inputs, return_outputs=False):
|
def compute_loss(self, model, inputs, return_outputs=False):
|
||||||
# use one's weighted cross entropy loss calc
|
# use one's weighted cross entropy loss calc
|
||||||
# if self.args.sample_packing:
|
# if self.args.sample_packing:
|
||||||
@@ -382,13 +349,30 @@ class AxolotlTrainer(AxolotlMultiPackTrainerMixin, Trainer):
|
|||||||
# return (loss, outputs) if return_outputs else loss
|
# return (loss, outputs) if return_outputs else loss
|
||||||
return super().compute_loss(model, inputs, return_outputs=return_outputs)
|
return super().compute_loss(model, inputs, return_outputs=return_outputs)
|
||||||
|
|
||||||
|
def _sanitize_kwargs_for_tagging(self, tag_names, kwargs=None):
|
||||||
|
if isinstance(tag_names, str):
|
||||||
|
tag_names = [tag_names]
|
||||||
|
|
||||||
|
if kwargs is not None:
|
||||||
|
if "tags" not in kwargs:
|
||||||
|
kwargs["tags"] = tag_names
|
||||||
|
elif "tags" in kwargs and isinstance(kwargs["tags"], list):
|
||||||
|
kwargs["tags"].extend(tag_names)
|
||||||
|
elif "tags" in kwargs and isinstance(kwargs["tags"], str):
|
||||||
|
tag_names.append(kwargs["tags"])
|
||||||
|
kwargs["tags"] = tag_names
|
||||||
|
|
||||||
|
return kwargs
|
||||||
|
|
||||||
@wraps(Trainer.push_to_hub)
|
@wraps(Trainer.push_to_hub)
|
||||||
def push_to_hub(self, *args, **kwargs) -> str:
|
def push_to_hub(self, *args, **kwargs) -> str:
|
||||||
"""
|
"""
|
||||||
Overwrite the `push_to_hub` method in order to force-add the tags when pushing the
|
Overwrite the `push_to_hub` method in order to force-add the tags when pushing the
|
||||||
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
|
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
|
||||||
"""
|
"""
|
||||||
kwargs = _sanitize_kwargs_for_tagging(tag_names=self.tag_names, kwargs=kwargs)
|
kwargs = self._sanitize_kwargs_for_tagging(
|
||||||
|
tag_names=self.tag_names, kwargs=kwargs
|
||||||
|
)
|
||||||
|
|
||||||
return super().push_to_hub(*args, **kwargs)
|
return super().push_to_hub(*args, **kwargs)
|
||||||
|
|
||||||
@@ -487,77 +471,6 @@ class ReLoRATrainer(AxolotlTrainer):
|
|||||||
return self.lr_scheduler
|
return self.lr_scheduler
|
||||||
|
|
||||||
|
|
||||||
class AxolotlDPOTrainer(AxolotlMultiPackTrainerMixin, DPOTrainer):
|
|
||||||
"""
|
|
||||||
Extend the base DPOTrainer for axolotl helpers
|
|
||||||
"""
|
|
||||||
|
|
||||||
tag_names = ["axolotl", "dpo"]
|
|
||||||
|
|
||||||
@wraps(DPOTrainer.push_to_hub)
|
|
||||||
def push_to_hub(self, *args, **kwargs) -> str:
|
|
||||||
"""
|
|
||||||
Overwrite the `push_to_hub` method in order to force-add the tags when pushing the
|
|
||||||
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
|
|
||||||
"""
|
|
||||||
kwargs = _sanitize_kwargs_for_tagging(tag_names=self.tag_names, kwargs=kwargs)
|
|
||||||
|
|
||||||
return super().push_to_hub(*args, **kwargs)
|
|
||||||
|
|
||||||
def tokenize_row(self, feature, *args, **kwargs) -> Dict:
|
|
||||||
# check if dataset is already tokenized
|
|
||||||
if not self.is_encoder_decoder:
|
|
||||||
keys = [
|
|
||||||
"chosen_input_ids",
|
|
||||||
"chosen_attention_mask",
|
|
||||||
"chosen_labels",
|
|
||||||
"rejected_input_ids",
|
|
||||||
"rejected_attention_mask",
|
|
||||||
"rejected_labels",
|
|
||||||
]
|
|
||||||
if all(k in feature.keys() for k in keys):
|
|
||||||
return feature
|
|
||||||
else:
|
|
||||||
keys = [
|
|
||||||
"chosen_labels",
|
|
||||||
"rejected_labels",
|
|
||||||
"prompt_input_ids",
|
|
||||||
"prompt_attention_mask",
|
|
||||||
]
|
|
||||||
if all(k in feature.keys() for k in keys):
|
|
||||||
return feature
|
|
||||||
return super().tokenize_row(feature, *args, **kwargs)
|
|
||||||
|
|
||||||
def concatenated_forward(
|
|
||||||
self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]
|
|
||||||
) -> Tuple[
|
|
||||||
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
|
|
||||||
]:
|
|
||||||
all_logits = model(
|
|
||||||
batch["input_ids"],
|
|
||||||
attention_mask=batch["attention_mask"],
|
|
||||||
position_ids=batch["position_ids"],
|
|
||||||
).logits
|
|
||||||
cu_seqlens, max_seqlen = get_cu_seqlens_from_pos_ids(batch["position_ids"])
|
|
||||||
logits_keep_fn = partial(keep_unpacked_data, pad_val=None, pairs=True)
|
|
||||||
unpacked_logits = split_and_pad_packed(all_logits, cu_seqlens, max_seqlen, logits_keep_fn)
|
|
||||||
labels_keep_fn = partial(keep_unpacked_data, pad_val=-100, pairs=True)
|
|
||||||
unpacked_labels = split_and_pad_packed(batch["labels"], cu_seqlens, max_seqlen, labels_keep_fn)
|
|
||||||
unpacked_logps = self.get_batch_logps(
|
|
||||||
unpacked_logits,
|
|
||||||
unpacked_labels,
|
|
||||||
average_log_prob=self.loss_type == "ipo",
|
|
||||||
is_encoder_decoder=self.is_encoder_decoder,
|
|
||||||
label_pad_token_id=self.label_pad_token_id,
|
|
||||||
)
|
|
||||||
chosen_logps = unpacked_logps[::2]
|
|
||||||
rejected_logps = unpacked_logps[1::2]
|
|
||||||
chosen_logits = unpacked_logits[::2]
|
|
||||||
rejected_logits = unpacked_logits[1::2]
|
|
||||||
|
|
||||||
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits)
|
|
||||||
|
|
||||||
|
|
||||||
class TrainerBuilderBase(abc.ABC):
|
class TrainerBuilderBase(abc.ABC):
|
||||||
"""
|
"""
|
||||||
Base class for trainer builder
|
Base class for trainer builder
|
||||||
@@ -805,7 +718,7 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
elif self.cfg.sample_packing and self.cfg.eval_sample_packing is False:
|
elif self.cfg.sample_packing and self.cfg.eval_sample_packing is False:
|
||||||
training_arguments_kwargs["dataloader_drop_last"] = True
|
training_arguments_kwargs["dataloader_drop_last"] = True
|
||||||
|
|
||||||
if not self.cfg.test_datasets and self.cfg.val_set_size == 0:
|
if self.cfg.val_set_size == 0:
|
||||||
# no eval set, so don't eval
|
# no eval set, so don't eval
|
||||||
training_arguments_kwargs["evaluation_strategy"] = "no"
|
training_arguments_kwargs["evaluation_strategy"] = "no"
|
||||||
elif self.cfg.eval_steps:
|
elif self.cfg.eval_steps:
|
||||||
@@ -892,7 +805,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
self.cfg.load_best_model_at_end is not False
|
self.cfg.load_best_model_at_end is not False
|
||||||
or self.cfg.early_stopping_patience
|
or self.cfg.early_stopping_patience
|
||||||
)
|
)
|
||||||
and not self.cfg.test_datasets
|
|
||||||
and self.cfg.val_set_size > 0
|
and self.cfg.val_set_size > 0
|
||||||
and self.cfg.save_steps
|
and self.cfg.save_steps
|
||||||
and self.cfg.eval_steps
|
and self.cfg.eval_steps
|
||||||
@@ -1164,7 +1076,7 @@ class HFDPOTrainerBuilder(TrainerBuilderBase):
|
|||||||
dpo_trainer_kwargs[
|
dpo_trainer_kwargs[
|
||||||
"precompute_ref_log_probs"
|
"precompute_ref_log_probs"
|
||||||
] = self.cfg.precompute_ref_log_probs
|
] = self.cfg.precompute_ref_log_probs
|
||||||
dpo_trainer = AxolotlDPOTrainer(
|
dpo_trainer = DPOTrainer(
|
||||||
self.model,
|
self.model,
|
||||||
self.model_ref,
|
self.model_ref,
|
||||||
args=training_args,
|
args=training_args,
|
||||||
@@ -1178,7 +1090,6 @@ class HFDPOTrainerBuilder(TrainerBuilderBase):
|
|||||||
callbacks=self.get_callbacks(),
|
callbacks=self.get_callbacks(),
|
||||||
**dpo_trainer_kwargs,
|
**dpo_trainer_kwargs,
|
||||||
)
|
)
|
||||||
setattr(dpo_trainer, "use_dpo_data_collator", True)
|
|
||||||
dpo_trainer = self.hook_post_create_trainer(dpo_trainer)
|
dpo_trainer = self.hook_post_create_trainer(dpo_trainer)
|
||||||
for callback in self.get_post_trainer_create_callbacks(dpo_trainer):
|
for callback in self.get_post_trainer_create_callbacks(dpo_trainer):
|
||||||
dpo_trainer.add_callback(callback)
|
dpo_trainer.add_callback(callback)
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ def _prepare_decoder_attention_mask(
|
|||||||
sliding_window,
|
sliding_window,
|
||||||
): # pylint: disable=unused-argument
|
): # pylint: disable=unused-argument
|
||||||
# [bsz, seq_len]
|
# [bsz, seq_len]
|
||||||
if attention_mask is None or sliding_window is None:
|
if attention_mask is None:
|
||||||
return attention_mask
|
return attention_mask
|
||||||
|
|
||||||
# NOTE: attention mask and sliding masks are only broadcastable in certain scenarios.
|
# NOTE: attention mask and sliding masks are only broadcastable in certain scenarios.
|
||||||
@@ -151,7 +151,7 @@ def flashattn_forward(
|
|||||||
)
|
)
|
||||||
|
|
||||||
use_sliding_windows = (
|
use_sliding_windows = (
|
||||||
getattr(self.config, "sliding_window") is not None
|
hasattr(self.config, "sliding_window") is not None
|
||||||
and kv_seq_len > self.config.sliding_window
|
and kv_seq_len > self.config.sliding_window
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -178,9 +178,6 @@ class V2BatchSamplerDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
|
|||||||
features = [chunked_data]
|
features = [chunked_data]
|
||||||
return super().__call__(features, return_tensors=return_tensors)
|
return super().__call__(features, return_tensors=return_tensors)
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BatchSamplerDPODataCollatorWithPadding:
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class MambaDataCollator:
|
class MambaDataCollator:
|
||||||
|
|||||||
@@ -232,6 +232,9 @@ def validate_config(cfg):
|
|||||||
"eval_batch_size != micro_batch_size. This can lead to VRAM instability."
|
"eval_batch_size != micro_batch_size. This can lead to VRAM instability."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if cfg.load_4bit:
|
||||||
|
raise ValueError("cfg.load_4bit parameter has been deprecated")
|
||||||
|
|
||||||
if cfg.adapter == "qlora":
|
if cfg.adapter == "qlora":
|
||||||
if cfg.merge_lora:
|
if cfg.merge_lora:
|
||||||
# can't merge qlora if loaded in 8bit or 4bit
|
# can't merge qlora if loaded in 8bit or 4bit
|
||||||
@@ -257,8 +260,7 @@ def validate_config(cfg):
|
|||||||
if cfg.flash_attn_fuse_qkv or cfg.flash_attn_fuse_mlp:
|
if cfg.flash_attn_fuse_qkv or cfg.flash_attn_fuse_mlp:
|
||||||
raise ValueError("Fused modules are not supported with QLoRA")
|
raise ValueError("Fused modules are not supported with QLoRA")
|
||||||
|
|
||||||
loftq = cfg.peft and cfg.peft.loftq_config and cfg.peft.loftq_config.loftq_bits
|
if not cfg.load_in_8bit and cfg.adapter == "lora":
|
||||||
if not cfg.load_in_8bit and cfg.adapter == "lora" and not loftq:
|
|
||||||
LOG.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|
LOG.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|
||||||
|
|
||||||
if cfg.adapter == "lora" and (cfg.flash_attn_fuse_qkv or cfg.flash_attn_fuse_mlp):
|
if cfg.adapter == "lora" and (cfg.flash_attn_fuse_qkv or cfg.flash_attn_fuse_mlp):
|
||||||
@@ -338,11 +340,6 @@ def validate_config(cfg):
|
|||||||
"push_to_hub_model_id is deprecated. Please use hub_model_id instead."
|
"push_to_hub_model_id is deprecated. Please use hub_model_id instead."
|
||||||
)
|
)
|
||||||
|
|
||||||
if cfg.hub_model_id and not (cfg.save_steps or cfg.saves_per_epoch):
|
|
||||||
LOG.warning(
|
|
||||||
"hub_model_id is set without any models being saved. To save a model, set either save_steps or saves_per_epoch."
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.gptq and cfg.model_revision:
|
if cfg.gptq and cfg.model_revision:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"model_revision is not supported for GPTQ models. "
|
"model_revision is not supported for GPTQ models. "
|
||||||
|
|||||||
@@ -440,7 +440,7 @@ def load_prepare_datasets(
|
|||||||
split="train",
|
split="train",
|
||||||
) -> Tuple[Dataset, Dataset, List[Prompter]]:
|
) -> Tuple[Dataset, Dataset, List[Prompter]]:
|
||||||
dataset, prompters = load_tokenized_prepared_datasets(
|
dataset, prompters = load_tokenized_prepared_datasets(
|
||||||
tokenizer, cfg, default_dataset_prepared_path, split=split
|
tokenizer, cfg, default_dataset_prepared_path
|
||||||
)
|
)
|
||||||
|
|
||||||
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
|
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import bitsandbytes as bnb
|
|||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from optimum.bettertransformer import BetterTransformer
|
from optimum.bettertransformer import BetterTransformer
|
||||||
from peft import LoftQConfig, PeftConfig, prepare_model_for_kbit_training
|
from peft import PeftConfig, prepare_model_for_kbit_training
|
||||||
from peft.tuners.lora import QuantLinear
|
from peft.tuners.lora import QuantLinear
|
||||||
from transformers import ( # noqa: F401
|
from transformers import ( # noqa: F401
|
||||||
AddedToken,
|
AddedToken,
|
||||||
@@ -667,17 +667,13 @@ def load_model(
|
|||||||
# Qwen doesn't play nicely with LoRA if this is enabled
|
# Qwen doesn't play nicely with LoRA if this is enabled
|
||||||
skip_prepare_model_for_kbit_training = True
|
skip_prepare_model_for_kbit_training = True
|
||||||
|
|
||||||
loftq_bits = cfg.peft and cfg.peft.loftq_config and cfg.peft.loftq_config.loftq_bits
|
if (cfg.adapter == "lora" and load_in_8bit) or (
|
||||||
if cfg.adapter == "lora" and loftq_bits:
|
cfg.adapter == "qlora" and cfg.load_in_4bit
|
||||||
skip_prepare_model_for_kbit_training = True
|
):
|
||||||
|
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
||||||
if cfg.adapter in ["lora", "qlora"]:
|
|
||||||
if cfg.gradient_checkpointing:
|
if cfg.gradient_checkpointing:
|
||||||
model.gradient_checkpointing_enable()
|
model.gradient_checkpointing_enable()
|
||||||
if (
|
if not skip_prepare_model_for_kbit_training:
|
||||||
cfg.load_in_8bit or cfg.load_in_4bit
|
|
||||||
) and not skip_prepare_model_for_kbit_training:
|
|
||||||
LOG.info("converting PEFT model w/ prepare_model_for_kbit_training")
|
|
||||||
model = prepare_model_for_kbit_training(
|
model = prepare_model_for_kbit_training(
|
||||||
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
model, use_gradient_checkpointing=cfg.gradient_checkpointing
|
||||||
)
|
)
|
||||||
@@ -704,7 +700,6 @@ def load_model(
|
|||||||
model, lora_config = load_adapter(model, cfg, cfg.adapter)
|
model, lora_config = load_adapter(model, cfg, cfg.adapter)
|
||||||
|
|
||||||
if cfg.ddp and not load_in_8bit and not (cfg.rl and cfg.load_in_4bit):
|
if cfg.ddp and not load_in_8bit and not (cfg.rl and cfg.load_in_4bit):
|
||||||
# TODO revaldate this conditional
|
|
||||||
model.to(f"cuda:{cfg.local_rank}")
|
model.to(f"cuda:{cfg.local_rank}")
|
||||||
|
|
||||||
if torch.cuda.device_count() > 1 and int(os.getenv("WORLD_SIZE", "1")) == 1:
|
if torch.cuda.device_count() > 1 and int(os.getenv("WORLD_SIZE", "1")) == 1:
|
||||||
@@ -756,7 +751,7 @@ def load_llama_adapter(model, cfg):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if cfg.lora_model_dir:
|
if cfg.lora_model_dir:
|
||||||
LOG.debug("Loading pretrained PEFT - llama_adapter")
|
LOG.debug("Loading pretained PEFT - llama_adapter")
|
||||||
model = PeftModel.from_pretrained(
|
model = PeftModel.from_pretrained(
|
||||||
model,
|
model,
|
||||||
cfg.lora_model_dir,
|
cfg.lora_model_dir,
|
||||||
@@ -802,12 +797,6 @@ def load_lora(model, cfg, inference=False, config_only=False):
|
|||||||
LOG.info(f"found linear modules: {repr(linear_names)}")
|
LOG.info(f"found linear modules: {repr(linear_names)}")
|
||||||
lora_target_modules = list(set(lora_target_modules + linear_names))
|
lora_target_modules = list(set(lora_target_modules + linear_names))
|
||||||
|
|
||||||
lora_config_kwargs = {}
|
|
||||||
loftq_bits = cfg.peft and cfg.peft.loftq_config and cfg.peft.loftq_config.loftq_bits
|
|
||||||
if loftq_bits:
|
|
||||||
lora_config_kwargs["loftq_config"] = LoftQConfig(loftq_bits=loftq_bits)
|
|
||||||
lora_config_kwargs["init_lora_weights"] = "loftq"
|
|
||||||
|
|
||||||
lora_config = LoraConfig(
|
lora_config = LoraConfig(
|
||||||
r=cfg.lora_r,
|
r=cfg.lora_r,
|
||||||
lora_alpha=cfg.lora_alpha,
|
lora_alpha=cfg.lora_alpha,
|
||||||
@@ -818,14 +807,13 @@ def load_lora(model, cfg, inference=False, config_only=False):
|
|||||||
modules_to_save=cfg.lora_modules_to_save if cfg.lora_modules_to_save else None,
|
modules_to_save=cfg.lora_modules_to_save if cfg.lora_modules_to_save else None,
|
||||||
bias="none",
|
bias="none",
|
||||||
task_type="CAUSAL_LM",
|
task_type="CAUSAL_LM",
|
||||||
**lora_config_kwargs,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if config_only:
|
if config_only:
|
||||||
return None, lora_config
|
return None, lora_config
|
||||||
|
|
||||||
if cfg.lora_model_dir:
|
if cfg.lora_model_dir:
|
||||||
LOG.debug("Loading pretrained PEFT - LoRA")
|
LOG.debug("Loading pretained PEFT - LoRA")
|
||||||
model_kwargs: Any = {}
|
model_kwargs: Any = {}
|
||||||
if cfg.lora_on_cpu:
|
if cfg.lora_on_cpu:
|
||||||
model_kwargs["max_memory"] = {"cpu": "256GiB"}
|
model_kwargs["max_memory"] = {"cpu": "256GiB"}
|
||||||
|
|||||||
@@ -1,61 +0,0 @@
|
|||||||
import torch
|
|
||||||
import torch.nn.functional as F
|
|
||||||
|
|
||||||
|
|
||||||
def keep_unpacked_data(data: torch.Tensor, index=None, nonzero_total=None, pad_val= None, pairs=False):
|
|
||||||
# pad val could be padding token (input_ids), -100 (labels), or 0 (attention_mask)
|
|
||||||
if index >= nonzero_total:
|
|
||||||
return False
|
|
||||||
if pairs and (index // 2) >= (nonzero_total // 2):
|
|
||||||
return False
|
|
||||||
if pad_val and (data == pad_val).all(dim=0).all():
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def split_and_pad_packed(tensor, cu_seqlens, max_seqlen, keep_fn=None):
|
|
||||||
split_tensors = []
|
|
||||||
|
|
||||||
counts = count_nonzero_sequences(cu_seqlens)
|
|
||||||
# Iterate over each batch
|
|
||||||
for i in range(tensor.size(0)):
|
|
||||||
seq_lens = cu_seqlens[i]
|
|
||||||
start_idx = 0
|
|
||||||
|
|
||||||
# Iterate over the cumulative sequence lengths
|
|
||||||
for j, end_idx in enumerate(seq_lens[1:]):
|
|
||||||
if end_idx == start_idx:
|
|
||||||
break
|
|
||||||
# Extract and pad the current sequence
|
|
||||||
current_seq = tensor[i, start_idx:end_idx]
|
|
||||||
keep = True
|
|
||||||
if keep_fn:
|
|
||||||
keep = keep_fn(current_seq, index=j, nonzero_total=counts[i])
|
|
||||||
if not keep:
|
|
||||||
continue
|
|
||||||
padding_size = max_seqlen - current_seq.size(0)
|
|
||||||
padded_seq = F.pad(current_seq, (0, 0) * (current_seq.dim() - 2) + (0, padding_size))
|
|
||||||
|
|
||||||
# Append the padded sequence to the list
|
|
||||||
split_tensors.append(padded_seq)
|
|
||||||
|
|
||||||
# Update start index for the next sequence
|
|
||||||
start_idx = end_idx
|
|
||||||
|
|
||||||
# Stack the padded tensors
|
|
||||||
return torch.stack(split_tensors, dim=0)
|
|
||||||
|
|
||||||
|
|
||||||
def count_nonzero_sequences(cu_seqlens: torch.Tensor) -> torch.LongTensor:
|
|
||||||
diffs = torch.diff(cu_seqlens, dim=1, prepend=torch.zeros(cu_seqlens.shape[0], 1, dtype=cu_seqlens.dtype))
|
|
||||||
valid_lengths = diffs != 0
|
|
||||||
counts = valid_lengths.sum(dim=1).long()
|
|
||||||
|
|
||||||
return counts
|
|
||||||
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
# Example tensor with dimensions [batch_size, seq_len, other_dimensions...]
|
|
||||||
# example_tensor = torch.randn(batch_size, seq_len, other_dimensions...)
|
|
||||||
# cu_seqlens, max_seqlen = get_cu_seqlens_from_pos_ids(batch["position_ids"])
|
|
||||||
# split_padded_tensor = split_and_pad_packed(example_tensor, cu_seqlens, max_seqlen)
|
|
||||||
@@ -39,6 +39,32 @@ class TestExpandMask(unittest.TestCase):
|
|||||||
# Check that the output matches the expected output
|
# Check that the output matches the expected output
|
||||||
self.assertTrue(torch.allclose(_expand_mask(mask, dtype), expected_output))
|
self.assertTrue(torch.allclose(_expand_mask(mask, dtype), expected_output))
|
||||||
|
|
||||||
|
def test_output_multipack(self):
|
||||||
|
mask = torch.tensor([[1, 1, 1, 0], [2, 2, 3, 3]])
|
||||||
|
dtype = torch.float32
|
||||||
|
expected_output = torch.tensor(
|
||||||
|
[
|
||||||
|
[
|
||||||
|
[
|
||||||
|
[0.0000e00, -3.4028e38, -3.4028e38, -3.4028e38],
|
||||||
|
[0.0000e00, 0.0000e00, -3.4028e38, -3.4028e38],
|
||||||
|
[0.0000e00, 0.0000e00, 0.0000e00, -3.4028e38],
|
||||||
|
[-3.4028e38, -3.4028e38, -3.4028e38, -3.4028e38],
|
||||||
|
]
|
||||||
|
],
|
||||||
|
[
|
||||||
|
[
|
||||||
|
[0.0000e00, -3.4028e38, -3.4028e38, -3.4028e38],
|
||||||
|
[0.0000e00, 0.0000e00, -3.4028e38, -3.4028e38],
|
||||||
|
[-3.4028e38, -3.4028e38, 0.0000e00, -3.4028e38],
|
||||||
|
[-3.4028e38, -3.4028e38, 0.0000e00, 0.0000e00],
|
||||||
|
]
|
||||||
|
],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
# Check that the output matches the expected output
|
||||||
|
self.assertTrue(torch.allclose(_expand_mask(mask, dtype), expected_output))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -26,12 +26,21 @@ class BaseValidation(unittest.TestCase):
|
|||||||
self._caplog = caplog
|
self._caplog = caplog
|
||||||
|
|
||||||
|
|
||||||
# pylint: disable=too-many-public-methods
|
|
||||||
class ValidationTest(BaseValidation):
|
class ValidationTest(BaseValidation):
|
||||||
"""
|
"""
|
||||||
Test the validation module
|
Test the validation module
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def test_load_4bit_deprecate(self):
|
||||||
|
cfg = DictDefault(
|
||||||
|
{
|
||||||
|
"load_4bit": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
validate_config(cfg)
|
||||||
|
|
||||||
def test_batch_size_unused_warning(self):
|
def test_batch_size_unused_warning(self):
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
@@ -689,22 +698,6 @@ class ValidationTest(BaseValidation):
|
|||||||
):
|
):
|
||||||
validate_config(cfg)
|
validate_config(cfg)
|
||||||
|
|
||||||
def test_hub_model_id_save_value_warns(self):
|
|
||||||
cfg = DictDefault({"hub_model_id": "test"})
|
|
||||||
|
|
||||||
with self._caplog.at_level(logging.WARNING):
|
|
||||||
validate_config(cfg)
|
|
||||||
assert (
|
|
||||||
"set without any models being saved" in self._caplog.records[0].message
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_hub_model_id_save_value(self):
|
|
||||||
cfg = DictDefault({"hub_model_id": "test", "saves_per_epoch": 4})
|
|
||||||
|
|
||||||
with self._caplog.at_level(logging.WARNING):
|
|
||||||
validate_config(cfg)
|
|
||||||
assert len(self._caplog.records) == 0
|
|
||||||
|
|
||||||
|
|
||||||
class ValidationCheckModelConfig(BaseValidation):
|
class ValidationCheckModelConfig(BaseValidation):
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user