Compare commits

...

10 Commits
tui ... llava

Author SHA1 Message Date
Wing Lian
b52e61a574 pretrain fixes for mm 2023-10-30 11:03:55 -04:00
Wing Lian
53f93f67bb fix to set training args so projector properly saves 2023-10-29 06:08:38 -04:00
Wing Lian
ef95ea2977 additional args for parity, fix to properly save projector during pretrain 2023-10-29 05:12:34 -04:00
Wing Lian
1321608dc4 add docs and tweak yml 2023-10-28 13:07:59 -04:00
Wing Lian
7ff30c4033 wip 2023-10-25 09:19:19 -04:00
Wing Lian
faa46fbcf8 fix code for llava parity, add llama yml 2023-10-24 09:45:47 -04:00
Wing Lian
fdc3e4d505 more fixes to try to get mm working 2023-10-23 23:15:33 -04:00
Wing Lian
b885169229 handle load_model splat 2023-10-23 21:55:05 -04:00
Wing Lian
ab9d12ce34 handle dataset loading for multimodal 2023-10-23 21:44:07 -04:00
Wing Lian
866774737b WIP llaval support 2023-10-23 20:29:49 -04:00
13 changed files with 681 additions and 30 deletions

36
docs/llava.md Normal file
View File

@@ -0,0 +1,36 @@
# LLaVA
### Installing dependencies
```shell
git clone https://github.com/haotian-liu/LLaVA.git
cd LLaVA
pip install --no-deps -e .
```
### Downloading assets
LLaVA doesn't support remote datasets, so both the JSON and image assets need to be downloaded locally
```shell
mkdir llava
mkdir data
cd llava
curl -L -O https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain/resolve/main/images.zip
unzip images.zip
cd ../data
curl -L -O https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain/resolve/main/blip_laion_cc_sbu_558k.json
```
### Pretraining
Pretraining aligns the vision model with the language model.
```shell
accelerate launch -m axolotl.cli.train_mm examples/multimodal/pretrain-llava-llama.yml
```
### Finetuning
TBD

View File

@@ -0,0 +1,66 @@
base_model: NousResearch/Llama-2-7b-hf
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true
# multimodal pretrain
multimodal: true
mm_vision_tower: openai/clip-vit-large-patch14
tune_mm_mlp_adapter: true
mm_freeze_backbone: true
mm_vision_select_layer: -2
mm_projector_type: mlp2x_gelu
mm_image_folder: ./llava/
mm_use_im_patch_token: false
load_in_8bit: false
load_in_4bit: false
strict: false
datasets:
- path: ./data/blip_laion_cc_sbu_558k.json
dataset_prepared_path:
val_set_size: 0.0
output_dir: ./out
sequence_len: 2048
sample_packing: false
pad_to_sequence_len: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_run_id:
wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 1
optimizer: adamw_torch
lr_scheduler: cosine
learning_rate: 0.002
train_on_inputs: false
group_by_length: false
bf16: true
fp16: false
tf32: false
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps:
save_steps: 0.1
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: "<unk>"

View File

@@ -0,0 +1,66 @@
base_model: mistralai/Mistral-7B-v0.1
model_type: MistralForCausalLM
tokenizer_type: LlamaTokenizer
is_mistral_derived_model: true
# multimodal pretrain
multimodal: true
mm_vision_tower: openai/clip-vit-large-patch14
tune_mm_mlp_adapter: true
mm_freeze_backbone: true
mm_vision_select_layer: -2
mm_projector_type: mlp2x_gelu
mm_image_folder: ./llava/
mm_use_im_patch_token: false
load_in_8bit: false
load_in_4bit: false
strict: false
datasets:
- path: ./data/blip_laion_cc_sbu_558k.json
dataset_prepared_path:
val_set_size: 0.0
output_dir: ./out
sequence_len: 2048
sample_packing: false
pad_to_sequence_len: true
wandb_project:
wandb_entity:
wandb_watch:
wandb_run_id:
wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 2
num_epochs: 1
optimizer: adamw_torch
lr_scheduler: cosine
learning_rate: 0.002
train_on_inputs: false
group_by_length: false
bf16: true
fp16: false
tf32: false
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
warmup_steps: 10
eval_steps:
save_steps:
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: "<unk>"

View File

@@ -2,6 +2,7 @@
import importlib
import logging
import math
import os
import random
import sys
@@ -215,6 +216,46 @@ def load_cfg(config: Path = Path("examples/"), **kwargs):
return cfg
def load_mm_dataset(
*,
cfg: DictDefault,
cli_args: TrainerCliArgs, # pylint: disable=unused-argument
model,
):
# pylint: disable=duplicate-code
from llava.train.train import DataArguments, LazySupervisedDataset
vision_tower = model.get_vision_tower()
data_args = DataArguments(
data_path=cfg.datasets[0]["path"],
lazy_preprocess=cfg.mm_lazy_preprocess
if cfg.mm_lazy_preprocess is not None
else True,
is_multimodal=True,
image_folder=cfg.mm_image_folder or None,
image_aspect_ratio=cfg.mm_image_aspect_ratio or "square",
image_grid_pinpoints=cfg.mm_image_grid_pinpoints or None,
)
data_args.image_processor = vision_tower.image_processor
data_args.mm_use_im_start_end = cfg.mm_use_im_start_end or False
tokenizer = load_tokenizer(cfg)
train_dataset = LazySupervisedDataset(
tokenizer=tokenizer,
data_path=data_args.data_path,
data_args=data_args,
)
total_num_steps = int(
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
)
return TrainDatasetMeta(
train_dataset=train_dataset,
eval_dataset=None,
total_num_steps=total_num_steps,
)
def load_datasets(
*,
cfg: DictDefault,

View File

@@ -0,0 +1,59 @@
"""
CLI to run training on a model
"""
import logging
from pathlib import Path
import fire
import torch
import transformers
from colorama import Fore
from axolotl.cli import (
check_accelerate_default_config,
check_user_token,
load_cfg,
load_mm_dataset,
print_axolotl_text_art,
)
from axolotl.common.cli import TrainerCliArgs
from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
from axolotl.train import train
from axolotl.utils.models import load_model, load_tokenizer
LOG = logging.getLogger("axolotl.cli.train")
def do_cli(config: Path = Path("examples/"), **kwargs):
# pylint: disable=duplicate-code
print_axolotl_text_art()
parsed_cfg = load_cfg(config, **kwargs)
check_accelerate_default_config()
check_user_token()
parser = transformers.HfArgumentParser((TrainerCliArgs))
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
return_remaining_strings=True
)
if parsed_cli_args.prepare_ds_only and not parsed_cfg.dataset_prepared_path:
msg = (
Fore.RED
+ "--prepare_ds_only called without dataset_prepared_path set."
+ Fore.RESET
)
LOG.warning(msg)
parsed_cfg.dataset_prepared_path = DEFAULT_DATASET_PREPARED_PATH
tokenizer = load_tokenizer(parsed_cfg)
model, _ = load_model(parsed_cfg, tokenizer)
dataset_meta = load_mm_dataset(
cfg=parsed_cfg, cli_args=parsed_cli_args, model=model
)
del model
torch.cuda.empty_cache()
if parsed_cli_args.prepare_ds_only:
return
train(cfg=parsed_cfg, cli_args=parsed_cli_args, dataset_meta=dataset_meta)
if __name__ == "__main__":
fire.Fire(do_cli)

View File

@@ -40,6 +40,14 @@ try:
except ImportError:
pass
try:
from llava.train.llava_trainer import get_mm_adapter_state_maybe_zero_3
except ImportError:
def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
raise ImportError("missing LLaVA package")
LOG = logging.getLogger("axolotl.core.trainer_builder")
@@ -102,6 +110,17 @@ class AxolotlTrainingArguments(TrainingArguments):
bench_source_max_len: int = field(
default=2048, metadata={"help": "Maximum source sequence length for bench."}
)
tune_mm_mlp_adapter: bool = field(
default=False,
metadata={"help": "Whether to train the multimodal projector adapter"},
)
freeze_mm_mlp_adapter: bool = field(
default=False,
metadata={"help": "Whether to freeze the multimodal projector adapter"},
)
mm_projector_lr: Optional[float] = field(
default=None, metadata={"help": "Learning rate for the multimodal projector"}
)
class AxolotlTrainer(Trainer):
@@ -243,6 +262,41 @@ class AxolotlTrainer(Trainer):
# return (loss, outputs) if return_outputs else loss
return super().compute_loss(model, inputs, return_outputs=return_outputs)
def _save_checkpoint(self, model, trial, metrics=None):
if getattr(self.args, "tune_mm_mlp_adapter", False):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
weights_to_save = self._get_mm_mlp_adapter_weights()
if self.args.local_rank in (0, -1):
self.model.config.save_pretrained(output_dir)
torch.save(
weights_to_save, os.path.join(output_dir, "mm_projector.bin")
)
else:
super()._save_checkpoint(model, trial, metrics)
def _get_mm_mlp_adapter_weights(self):
# Only save Adapter
keys_to_match = ["mm_projector", "vision_resampler"]
if getattr(self.args, "use_im_start_end", False):
keys_to_match.extend(["embed_tokens", "embed_in"])
return get_mm_adapter_state_maybe_zero_3(
self.model.named_parameters(), keys_to_match
)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
if getattr(self.args, "tune_mm_mlp_adapter", False):
pass
else:
super()._save(output_dir, state_dict)
class OneCycleLRSchedulerTrainer(AxolotlTrainer):
"""
@@ -610,8 +664,17 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
training_arguments_kwargs[
"sample_packing_seq_len_multiplier"
] = self.cfg.micro_batch_size
training_arguments_kwargs["relora_steps"] = self.cfg.relora_steps
training_arguments_kwargs["relora_warmup_steps"] = self.cfg.relora_warmup_steps
# multimodal: llava
training_arguments_kwargs["tune_mm_mlp_adapter"] = self.cfg.tune_mm_mlp_adapter
training_arguments_kwargs[
"freeze_mm_mlp_adapter"
] = self.cfg.freeze_mm_mlp_adapter
training_arguments_kwargs["mm_projector_lr"] = self.cfg.mm_projector_lr
training_arguments_kwargs = self.hook_pre_create_training_args(
training_arguments_kwargs
)
@@ -628,18 +691,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
sys.path.append(self.cfg.torchdistx_path)
importlib.import_module("torchdistx")
data_collator_kwargs = {
"padding": True, # True/"longest" is the default
}
if self.cfg.pad_to_sequence_len:
data_collator_kwargs["pad_to_multiple_of"] = 64 * math.ceil(
self.cfg.sequence_len / 64
)
else:
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
data_collator_kwargs["pad_to_multiple_of"] = 64
if self.cfg.is_llama_derived_model and self.cfg.landmark_attention:
from axolotl.monkeypatch.llama_landmark_attn import (
add_mem_tokens,
@@ -664,22 +715,15 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
trainer_kwargs, trainer_cls = self.hook_pre_create_trainer(
trainer_kwargs, trainer_cls
)
trainer_collator_kwargs = self.build_data_collator()
trainer = trainer_cls(
model=self.model,
train_dataset=self.train_dataset,
eval_dataset=self.eval_dataset,
args=training_args,
data_collator=DataCollatorForSeq2Seq(
self.tokenizer,
return_tensors="pt",
**data_collator_kwargs,
),
bench_data_collator=transformers.DataCollatorForSeq2Seq(
self.tokenizer,
return_tensors="pt",
**data_collator_kwargs,
),
callbacks=self.get_callbacks(),
**trainer_collator_kwargs,
**trainer_kwargs,
)
trainer = self.hook_post_create_trainer(trainer)
@@ -687,3 +731,41 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
trainer.add_callback(callback)
return trainer
def build_data_collator(self):
data_collator_kwargs = {
"padding": True, # True/"longest" is the default
}
if self.cfg.pad_to_sequence_len:
data_collator_kwargs["pad_to_multiple_of"] = 64 * math.ceil(
self.cfg.sequence_len / 64
)
else:
# A100 is best at 64, while others at 8. Let's use the larger so we don't have to check
# https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html
data_collator_kwargs["pad_to_multiple_of"] = 64
collator_kwargs = {}
if self.cfg.multimodal:
from llava.train.train import DataCollatorForSupervisedDataset
collator_kwargs["data_collator"] = DataCollatorForSupervisedDataset(
tokenizer=self.tokenizer,
)
else:
collator_kwargs["data_collator"] = DataCollatorForSeq2Seq(
self.tokenizer,
return_tensors="pt",
**data_collator_kwargs,
)
if self.cfg.do_bench_eval:
collator_kwargs[
"bench_data_collator"
] = transformers.DataCollatorForSeq2Seq(
self.tokenizer,
return_tensors="pt",
**data_collator_kwargs,
)
return collator_kwargs

View File

View File

@@ -0,0 +1,167 @@
"""
LLaVA Mistral classes
"""
from typing import List, Optional, Tuple, Union
import torch
from llava.model.llava_arch import LlavaMetaForCausalLM, LlavaMetaModel
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import (
AutoConfig,
AutoModelForCausalLM,
MistralConfig,
MistralForCausalLM,
MistralModel,
)
from transformers.modeling_outputs import CausalLMOutputWithPast
class LlavaMistralConfig(MistralConfig):
"""
HF Transformers Config for Mistral w LLaVA
"""
model_type = "llava_mistral"
class LlavaMistralModel(LlavaMetaModel, MistralModel):
"""
HF Transformers Model for Mistral w LLaVA
"""
config_class = LlavaMistralConfig
def __init__(
self, config: LlavaMistralConfig
): # pylint: disable=useless-parent-delegation
super().__init__(config)
class LlavaMistralForCausalLM(MistralForCausalLM, LlavaMetaForCausalLM):
"""
HF Transformers Causal Model for Mistral w LLaVA
"""
config_class = LlavaMistralConfig
def __init__(self, config: LlavaMistralConfig):
super().__init__(config)
self.model = LlavaMistralModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
(
input_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels,
) = self.prepare_inputs_labels_for_multimodal(
input_ids, attention_mask, past_key_values, labels, images
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model/pipeline parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
**kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
"images": kwargs.get("images", None),
}
)
return model_inputs
AutoConfig.register("llava_mistral", LlavaMistralConfig)
AutoModelForCausalLM.register(LlavaMistralConfig, LlavaMistralForCausalLM)

View File

@@ -20,6 +20,14 @@ from axolotl.utils.dict import DictDefault
from axolotl.utils.models import load_model, load_tokenizer
from axolotl.utils.trainer import setup_trainer
try:
from llava.train.train import safe_save_model_for_hf_trainer
except ImportError:
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
raise ImportError("missing LLaVA package")
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
src_dir = os.path.join(project_root, "src")
sys.path.insert(0, src_dir)
@@ -137,6 +145,8 @@ def train(
# only save on rank 0, otherwise it corrupts output on multi-GPU when multiple processes attempt to write the same file
if cfg.fsdp:
trainer.save_model(cfg.output_dir)
elif cfg.multimodal:
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=cfg.output_dir)
elif cfg.deepspeed and is_deepspeed_zero3_enabled():
# Copied over from: https://github.com/huggingface/accelerate/blob/5ae611118057232f441055f7ef9ba0b0f2b8d533/docs/source/usage_guides/deepspeed.md#saving-and-loading
trainer.accelerator.wait_for_everyone()
@@ -149,14 +159,14 @@ def train(
# The model name saved is `pytorch_model.bin`
unwrapped_model.save_pretrained(
cfg.output_dir,
is_main_process=trainer.accelerator.is_main_process,
is_main_process=trainer.args.should_save,
save_function=trainer.accelerator.save,
state_dict=trainer.accelerator.get_state_dict(trainer.model_wrapped),
)
elif cfg.local_rank == 0:
elif trainer.args.should_save:
if cfg.flash_optimum:
model = BetterTransformer.reverse(model)
# TODO figure out if `trainer.save_model(cfg.output_dir)` is sufficient here
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
if not cfg.hub_model_id:

View File

@@ -369,6 +369,15 @@ def validate_config(cfg):
"If you want to full finetune, please turn off load_in_8bit and load_in_4bit."
)
if cfg.multimodal:
try:
import llava # noqa: F401 # pylint:disable=unused-import
except ImportError as exc:
LOG.warning(
"LLaVA package required for multimodal training. See docs/llava.md for more information."
)
raise exc
# TODO
# MPT 7b
# https://github.com/facebookresearch/bitsandbytes/issues/25

View File

@@ -54,8 +54,19 @@ def md5(to_hash: str, encoding: str = "utf-8") -> str:
return hashlib.md5(to_hash.encode(encoding)).hexdigest() # nosec
def prepare_dataset(cfg, tokenizer):
if not cfg.pretraining_dataset:
def prepare_dataset(cfg, tokenizer, model=None):
if cfg.multimodal:
if not model:
raise ValueError("missing model argument")
from llava.train.train import LazySupervisedDataset
with zero_first(is_main_process()):
eval_dataset = None
train_dataset = LazySupervisedDataset(
tokenizer=tokenizer,
)
elif not cfg.pretraining_dataset:
with zero_first(is_main_process()):
train_dataset, eval_dataset = load_prepare_datasets(
tokenizer, cfg, DEFAULT_DATASET_PREPARED_PATH

View File

@@ -255,7 +255,102 @@ def load_model(
model_kwargs["use_flash_attention_2"] = True
try:
if cfg.is_llama_derived_model and not cfg.trust_remote_code and not cfg.gptq:
if cfg.multimodal:
from llava.train.train import DataArguments, ModelArguments
if cfg.is_llama_derived_model:
from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
model = LlavaLlamaForCausalLM.from_pretrained(
cfg.base_model,
)
elif cfg.is_mistral_derived_model:
from axolotl.models.llava.llava_mistral import LlavaMistralForCausalLM
model = LlavaMistralForCausalLM.from_pretrained(
cfg.base_model,
)
else:
raise NotImplementedError(
"unhandled model architecture for multimodal training"
)
if cfg.mm_freeze_backbone:
model.model.requires_grad_(False)
if cfg.gradient_checkpointing:
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(
module,
input,
output,
): # pylint: disable=redefined-builtin,unused-argument
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(
make_inputs_require_grad
)
model_args = ModelArguments(
model_name_or_path=cfg.base_model,
version="v0",
freeze_backbone=cfg.mm_freeze_backbone or False,
tune_mm_mlp_adapter=cfg.tune_mm_mlp_adapter or False,
vision_tower=cfg.mm_vision_tower,
mm_vision_select_layer=cfg.mm_vision_select_layer or -1,
pretrain_mm_mlp_adapter=cfg.pretrain_mm_mlp_adapter,
mm_projector_type=cfg.mm_projector_type or "linear",
mm_use_im_start_end=cfg.mm_use_im_start_end or False,
mm_use_im_patch_token=cfg.mm_use_im_patch_token,
mm_vision_select_feature=cfg.mm_vision_select_feature or "patch",
)
if cfg.mm_vision_tower is not None:
model.get_model().initialize_vision_modules(
model_args=model_args, fsdp=cfg.fsdp
)
vision_tower = model.get_vision_tower()
vision_tower.to(dtype=cfg.torch_dtype, device=cfg.device)
# pylint: disable=duplicate-code
data_args = DataArguments(
data_path=cfg.datasets[0]["path"],
lazy_preprocess=cfg.mm_lazy_preprocess
if cfg.mm_lazy_preprocess is not None
else True,
is_multimodal=True,
image_folder=cfg.mm_image_folder or None,
image_aspect_ratio=cfg.mm_image_aspect_ratio or "square",
image_grid_pinpoints=cfg.mm_image_grid_pinpoints or None,
)
data_args.image_processor = vision_tower.image_processor
model.config.image_aspect_ratio = data_args.image_aspect_ratio
model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
model.config.tune_mm_mlp_adapter = cfg.tune_mm_mlp_adapter
if cfg.tune_mm_mlp_adapter:
model.requires_grad_(False)
for (
p # pylint: disable=invalid-name
) in model.get_model().mm_projector.parameters():
p.requires_grad = True
model.config.freeze_mm_mlp_adapter = cfg.freeze_mm_mlp_adapter
if cfg.freeze_mm_mlp_adapter:
for (
p # pylint: disable=invalid-name
) in model.get_model().mm_projector.parameters():
p.requires_grad = False
model.config.mm_use_im_start_end = (
data_args.mm_use_im_start_end
) = cfg.mm_use_im_start_end
model.config.mm_use_im_patch_token = cfg.mm_use_im_patch_token
model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
elif cfg.is_llama_derived_model and not cfg.trust_remote_code and not cfg.gptq:
from transformers import LlamaForCausalLM
config_kwargs = {}
@@ -520,7 +615,14 @@ def load_llama_adapter(model, cfg):
def find_all_linear_names(model):
cls = (bnb.nn.Linear4bit, bnb.nn.Linear8bitLt, torch.nn.Linear, QuantLinear)
lora_module_names = set()
multimodal_keywords = [
"mm_projector",
"vision_tower",
"vision_resampler",
] # for LLaVA
for name, module in model.named_modules():
if any(mm_keyword in name for mm_keyword in multimodal_keywords):
continue
if (
isinstance(module, cls)
or "Linear" in module.__class__.__name__

View File

@@ -13,7 +13,7 @@ import torch.distributed as dist
from datasets import set_caching_enabled
from torch.utils.data import DistributedSampler, RandomSampler
from axolotl.core.trainer_builder import HFCausalTrainerBuilder
from axolotl.core.trainer_builder import AxolotlTrainer, HFCausalTrainerBuilder
from axolotl.utils.collators import DataCollatorForSeq2Seq
from axolotl.utils.dataloader import MultipackDistributedDataloader
from axolotl.utils.distributed import (
@@ -259,7 +259,9 @@ def setup_fsdp_envs(cfg):
] = cfg.fsdp_config.fsdp_transformer_layer_cls_to_wrap
def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps):
def setup_trainer(
cfg, train_dataset, eval_dataset, model, tokenizer, total_num_steps
) -> AxolotlTrainer:
if cfg.fsdp:
setup_fsdp_envs(cfg)
elif cfg.deepspeed: