Compare commits
12 Commits
fsdp-fix
...
save_only_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3202f19f52 | ||
|
|
5ed29393e3 | ||
|
|
da9b1a3196 | ||
|
|
057fa44191 | ||
|
|
8fa0785f74 | ||
|
|
4313b1a6a0 | ||
|
|
7f17eff81a | ||
|
|
ff01c45127 | ||
|
|
2fa65b9599 | ||
|
|
9430b6e868 | ||
|
|
934fc851da | ||
|
|
bda48f0150 |
@@ -16,7 +16,7 @@ sequence_len: 1024 # supports up to 32k
|
|||||||
sample_packing: false
|
sample_packing: false
|
||||||
pad_to_sequence_len: false
|
pad_to_sequence_len: false
|
||||||
|
|
||||||
adapter: lora
|
adapter: qlora
|
||||||
lora_model_dir:
|
lora_model_dir:
|
||||||
lora_r: 32
|
lora_r: 32
|
||||||
lora_alpha: 16
|
lora_alpha: 16
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ from huggingface_hub import HfApi
|
|||||||
from huggingface_hub.utils import LocalTokenNotFoundError
|
from huggingface_hub.utils import LocalTokenNotFoundError
|
||||||
from transformers import GenerationConfig, TextIteratorStreamer, TextStreamer
|
from transformers import GenerationConfig, TextIteratorStreamer, TextStreamer
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
from transformers.utils.import_utils import _is_package_available
|
||||||
|
|
||||||
from axolotl.common.cli import TrainerCliArgs, load_model_and_tokenizer
|
from axolotl.common.cli import TrainerCliArgs, load_model_and_tokenizer
|
||||||
from axolotl.logging_config import configure_logging
|
from axolotl.logging_config import configure_logging
|
||||||
@@ -62,6 +63,20 @@ def print_axolotl_text_art(suffix=None):
|
|||||||
if is_main_process():
|
if is_main_process():
|
||||||
print(ascii_art)
|
print(ascii_art)
|
||||||
|
|
||||||
|
print_dep_versions()
|
||||||
|
|
||||||
|
|
||||||
|
def print_dep_versions():
|
||||||
|
packages = ["accelerate", "peft", "transformers", "trl", "torch", "bitsandbytes"]
|
||||||
|
max_len = max(len(pkg) for pkg in packages)
|
||||||
|
if is_main_process():
|
||||||
|
print("*" * 40)
|
||||||
|
print("**** Axolotl Dependency Versions *****")
|
||||||
|
for pkg in packages:
|
||||||
|
version = _is_package_available(pkg, return_version=True)
|
||||||
|
print(f"{pkg: >{max_len}}: {version[1]: <15}")
|
||||||
|
print("*" * 40)
|
||||||
|
|
||||||
|
|
||||||
def check_remote_config(config: Union[str, Path]):
|
def check_remote_config(config: Union[str, Path]):
|
||||||
# Check if the config is a valid HTTPS URL to a .yml or .yaml file
|
# Check if the config is a valid HTTPS URL to a .yml or .yaml file
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ from torch.optim.lr_scheduler import OneCycleLR
|
|||||||
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
|
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
|
||||||
from transformers import (
|
from transformers import (
|
||||||
EarlyStoppingCallback,
|
EarlyStoppingCallback,
|
||||||
|
PreTrainedModel,
|
||||||
Trainer,
|
Trainer,
|
||||||
TrainerCallback,
|
TrainerCallback,
|
||||||
TrainingArguments,
|
TrainingArguments,
|
||||||
@@ -35,6 +36,7 @@ from trl.trainer.utils import pad_to_length
|
|||||||
from axolotl.loraplus import create_loraplus_optimizer
|
from axolotl.loraplus import create_loraplus_optimizer
|
||||||
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
from axolotl.monkeypatch.multipack import SUPPORTED_MULTIPACK_MODEL_TYPES
|
||||||
from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
|
from axolotl.monkeypatch.relora import ReLoRACallback, ReLoRAScheduler
|
||||||
|
from axolotl.utils import is_mlflow_available
|
||||||
from axolotl.utils.callbacks import (
|
from axolotl.utils.callbacks import (
|
||||||
EvalFirstStepCallback,
|
EvalFirstStepCallback,
|
||||||
GPUStatsCallback,
|
GPUStatsCallback,
|
||||||
@@ -70,10 +72,6 @@ except ImportError:
|
|||||||
LOG = logging.getLogger("axolotl.core.trainer_builder")
|
LOG = logging.getLogger("axolotl.core.trainer_builder")
|
||||||
|
|
||||||
|
|
||||||
def is_mlflow_available():
|
|
||||||
return importlib.util.find_spec("mlflow") is not None
|
|
||||||
|
|
||||||
|
|
||||||
def _sanitize_kwargs_for_tagging(tag_names, kwargs=None):
|
def _sanitize_kwargs_for_tagging(tag_names, kwargs=None):
|
||||||
if isinstance(tag_names, str):
|
if isinstance(tag_names, str):
|
||||||
tag_names = [tag_names]
|
tag_names = [tag_names]
|
||||||
@@ -802,6 +800,15 @@ class AxolotlDPOTrainer(DPOTrainer):
|
|||||||
|
|
||||||
return super().push_to_hub(*args, **kwargs)
|
return super().push_to_hub(*args, **kwargs)
|
||||||
|
|
||||||
|
def tokenize_row(
|
||||||
|
self, feature, model: Optional[Union[PreTrainedModel, torch.nn.Module]] = None
|
||||||
|
) -> Dict:
|
||||||
|
res = super().tokenize_row(feature, model=model)
|
||||||
|
if self.tokenizer.bos_token_id is None and res["prompt_input_ids"][0] is None:
|
||||||
|
for key in res.keys():
|
||||||
|
res[key] = res[key][1:]
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class TrainerBuilderBase(abc.ABC):
|
class TrainerBuilderBase(abc.ABC):
|
||||||
"""
|
"""
|
||||||
@@ -933,7 +940,16 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
callbacks = []
|
callbacks = []
|
||||||
if self.cfg.use_wandb and self.cfg.eval_table_size > 0:
|
if self.cfg.use_wandb and self.cfg.eval_table_size > 0:
|
||||||
LogPredictionCallback = log_prediction_callback_factory(
|
LogPredictionCallback = log_prediction_callback_factory(
|
||||||
trainer, self.tokenizer
|
trainer, self.tokenizer, "wandb"
|
||||||
|
)
|
||||||
|
callbacks.append(LogPredictionCallback(self.cfg))
|
||||||
|
if (
|
||||||
|
self.cfg.use_mlflow
|
||||||
|
and is_mlflow_available()
|
||||||
|
and self.cfg.eval_table_size > 0
|
||||||
|
):
|
||||||
|
LogPredictionCallback = log_prediction_callback_factory(
|
||||||
|
trainer, self.tokenizer, "mlflow"
|
||||||
)
|
)
|
||||||
callbacks.append(LogPredictionCallback(self.cfg))
|
callbacks.append(LogPredictionCallback(self.cfg))
|
||||||
|
|
||||||
@@ -1042,6 +1058,9 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
if self.cfg.save_safetensors is not None:
|
if self.cfg.save_safetensors is not None:
|
||||||
training_arguments_kwargs["save_safetensors"] = self.cfg.save_safetensors
|
training_arguments_kwargs["save_safetensors"] = self.cfg.save_safetensors
|
||||||
|
|
||||||
|
if self.cfg.save_only_model is not None:
|
||||||
|
training_arguments_kwargs["save_only_model"] = self.cfg.save_only_model
|
||||||
|
|
||||||
if self.cfg.sample_packing_eff_est:
|
if self.cfg.sample_packing_eff_est:
|
||||||
training_arguments_kwargs[
|
training_arguments_kwargs[
|
||||||
"sample_packing_efficiency"
|
"sample_packing_efficiency"
|
||||||
|
|||||||
@@ -0,0 +1,8 @@
|
|||||||
|
"""
|
||||||
|
Basic utils for Axolotl
|
||||||
|
"""
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
|
||||||
|
def is_mlflow_available():
|
||||||
|
return importlib.util.find_spec("mlflow") is not None
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import logging
|
|||||||
import os
|
import os
|
||||||
from shutil import copyfile
|
from shutil import copyfile
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import NamedTemporaryFile
|
||||||
from typing import TYPE_CHECKING, Dict, List
|
from typing import TYPE_CHECKING, Any, Dict, List
|
||||||
|
|
||||||
import evaluate
|
import evaluate
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -27,7 +27,9 @@ from transformers import (
|
|||||||
)
|
)
|
||||||
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, IntervalStrategy
|
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, IntervalStrategy
|
||||||
|
|
||||||
|
from axolotl.utils import is_mlflow_available
|
||||||
from axolotl.utils.bench import log_gpu_memory_usage
|
from axolotl.utils.bench import log_gpu_memory_usage
|
||||||
|
from axolotl.utils.config.models.input.v0_4_1 import AxolotlInputConfig
|
||||||
from axolotl.utils.distributed import (
|
from axolotl.utils.distributed import (
|
||||||
barrier,
|
barrier,
|
||||||
broadcast_dict,
|
broadcast_dict,
|
||||||
@@ -540,7 +542,7 @@ def causal_lm_bench_eval_callback_factory(trainer: Trainer, tokenizer):
|
|||||||
return CausalLMBenchEvalCallback
|
return CausalLMBenchEvalCallback
|
||||||
|
|
||||||
|
|
||||||
def log_prediction_callback_factory(trainer: Trainer, tokenizer):
|
def log_prediction_callback_factory(trainer: Trainer, tokenizer, logger: str):
|
||||||
class LogPredictionCallback(TrainerCallback):
|
class LogPredictionCallback(TrainerCallback):
|
||||||
"""Callback to log prediction values during each evaluation"""
|
"""Callback to log prediction values during each evaluation"""
|
||||||
|
|
||||||
@@ -597,15 +599,13 @@ def log_prediction_callback_factory(trainer: Trainer, tokenizer):
|
|||||||
return ranges
|
return ranges
|
||||||
|
|
||||||
def log_table_from_dataloader(name: str, table_dataloader):
|
def log_table_from_dataloader(name: str, table_dataloader):
|
||||||
table = wandb.Table( # type: ignore[attr-defined]
|
table_data: Dict[str, List[Any]] = {
|
||||||
columns=[
|
"id": [],
|
||||||
"id",
|
"Prompt": [],
|
||||||
"Prompt",
|
"Correct Completion": [],
|
||||||
"Correct Completion",
|
"Predicted Completion (model.generate)": [],
|
||||||
"Predicted Completion (model.generate)",
|
"Predicted Completion (trainer.prediction_step)": [],
|
||||||
"Predicted Completion (trainer.prediction_step)",
|
}
|
||||||
]
|
|
||||||
)
|
|
||||||
row_index = 0
|
row_index = 0
|
||||||
|
|
||||||
for batch in tqdm(table_dataloader):
|
for batch in tqdm(table_dataloader):
|
||||||
@@ -709,16 +709,29 @@ def log_prediction_callback_factory(trainer: Trainer, tokenizer):
|
|||||||
) in zip(
|
) in zip(
|
||||||
prompt_texts, completion_texts, predicted_texts, pred_step_texts
|
prompt_texts, completion_texts, predicted_texts, pred_step_texts
|
||||||
):
|
):
|
||||||
table.add_data(
|
table_data["id"].append(row_index)
|
||||||
row_index,
|
table_data["Prompt"].append(prompt_text)
|
||||||
prompt_text,
|
table_data["Correct Completion"].append(completion_text)
|
||||||
completion_text,
|
table_data["Predicted Completion (model.generate)"].append(
|
||||||
prediction_text,
|
prediction_text
|
||||||
pred_step_text,
|
|
||||||
)
|
)
|
||||||
|
table_data[
|
||||||
|
"Predicted Completion (trainer.prediction_step)"
|
||||||
|
].append(pred_step_text)
|
||||||
row_index += 1
|
row_index += 1
|
||||||
|
if logger == "wandb":
|
||||||
|
wandb.run.log({f"{name} - Predictions vs Ground Truth": pd.DataFrame(table_data)}) # type: ignore[attr-defined]
|
||||||
|
elif logger == "mlflow" and is_mlflow_available():
|
||||||
|
import mlflow
|
||||||
|
|
||||||
wandb.run.log({f"{name} - Predictions vs Ground Truth": table}) # type: ignore[attr-defined]
|
tracking_uri = AxolotlInputConfig(
|
||||||
|
**self.cfg.to_dict()
|
||||||
|
).mlflow_tracking_uri
|
||||||
|
mlflow.log_table(
|
||||||
|
data=table_data,
|
||||||
|
artifact_file="PredictionsVsGroundTruth.json",
|
||||||
|
tracking_uri=tracking_uri,
|
||||||
|
)
|
||||||
|
|
||||||
if is_main_process():
|
if is_main_process():
|
||||||
log_table_from_dataloader("Eval", eval_dataloader)
|
log_table_from_dataloader("Eval", eval_dataloader)
|
||||||
@@ -748,6 +761,11 @@ class SaveAxolotlConfigtoWandBCallback(TrainerCallback):
|
|||||||
mode="w", delete=False, suffix=".yml", prefix="axolotl_config_"
|
mode="w", delete=False, suffix=".yml", prefix="axolotl_config_"
|
||||||
) as temp_file:
|
) as temp_file:
|
||||||
copyfile(self.axolotl_config_path, temp_file.name)
|
copyfile(self.axolotl_config_path, temp_file.name)
|
||||||
|
artifact = wandb.Artifact(
|
||||||
|
f"config-{wandb.run.id}", type="axolotl-config"
|
||||||
|
)
|
||||||
|
artifact.add_file(temp_file.name)
|
||||||
|
wandb.log_artifact(artifact)
|
||||||
wandb.save(temp_file.name)
|
wandb.save(temp_file.name)
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"The Axolotl config has been saved to the WandB run under files."
|
"The Axolotl config has been saved to the WandB run under files."
|
||||||
|
|||||||
@@ -98,6 +98,7 @@ class SFTDataset(BaseModel):
|
|||||||
ds_type: Optional[str] = None
|
ds_type: Optional[str] = None
|
||||||
train_on_split: Optional[str] = None
|
train_on_split: Optional[str] = None
|
||||||
|
|
||||||
|
field: Optional[str] = None
|
||||||
field_human: Optional[str] = None
|
field_human: Optional[str] = None
|
||||||
field_model: Optional[str] = None
|
field_model: Optional[str] = None
|
||||||
|
|
||||||
@@ -242,17 +243,6 @@ class LoraConfig(BaseModel):
|
|||||||
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def validate_quantized_dora(cls, data):
|
|
||||||
if data.get("peft_use_dora") and (
|
|
||||||
data.get("load_in_8bit") or data.get("load_in_4bit")
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
"`peft_use_dora` is not currently compatible with quantized weights."
|
|
||||||
)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class ReLoRAConfig(BaseModel):
|
class ReLoRAConfig(BaseModel):
|
||||||
"""ReLoRA configuration subset"""
|
"""ReLoRA configuration subset"""
|
||||||
@@ -365,6 +355,7 @@ class ModelOutputConfig(BaseModel):
|
|||||||
hub_model_id: Optional[str] = None
|
hub_model_id: Optional[str] = None
|
||||||
hub_strategy: Optional[str] = None
|
hub_strategy: Optional[str] = None
|
||||||
save_safetensors: Optional[bool] = None
|
save_safetensors: Optional[bool] = None
|
||||||
|
save_only_model: Optional[bool] = None
|
||||||
|
|
||||||
|
|
||||||
class MLFlowConfig(BaseModel):
|
class MLFlowConfig(BaseModel):
|
||||||
@@ -664,8 +655,8 @@ class AxolotlInputConfig(
|
|||||||
and not data.get("flash_attention")
|
and not data.get("flash_attention")
|
||||||
and not data.get("sdp_attention")
|
and not data.get("sdp_attention")
|
||||||
):
|
):
|
||||||
raise ValueError(
|
LOG.warning(
|
||||||
"sample_packing requires flash_attention or sdp_attention to be set to true"
|
"sample_packing without flash_attention or sdp_attention does not handle cross-attention."
|
||||||
)
|
)
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|||||||
@@ -379,14 +379,15 @@ def load_tokenized_prepared_datasets(
|
|||||||
d_base_type = d_type_split[0]
|
d_base_type = d_type_split[0]
|
||||||
d_prompt_style = d_type_split[1] if len(d_type_split) > 1 else None
|
d_prompt_style = d_type_split[1] if len(d_type_split) > 1 else None
|
||||||
|
|
||||||
if config_dataset.split and config_dataset.split in ds:
|
if isinstance(ds, DatasetDict):
|
||||||
ds = ds[config_dataset.split]
|
if config_dataset.split and config_dataset.split in ds:
|
||||||
elif split in ds:
|
ds = ds[config_dataset.split]
|
||||||
ds = ds[split]
|
elif split in ds:
|
||||||
elif isinstance(ds, DatasetDict):
|
ds = ds[split]
|
||||||
raise ValueError(
|
else:
|
||||||
f"no {split} split found for dataset {config_dataset.path}, you may specify a split with 'split: `"
|
raise ValueError(
|
||||||
)
|
f"no {split} split found for dataset {config_dataset.path}, you may specify a split with 'split: `"
|
||||||
|
)
|
||||||
|
|
||||||
# support for using a subset of the data
|
# support for using a subset of the data
|
||||||
if config_dataset.shards:
|
if config_dataset.shards:
|
||||||
|
|||||||
@@ -459,7 +459,7 @@ def load_model(
|
|||||||
"bnb_4bit_quant_type": "nf4",
|
"bnb_4bit_quant_type": "nf4",
|
||||||
"bnb_4bit_quant_storage": torch.bfloat16,
|
"bnb_4bit_quant_storage": torch.bfloat16,
|
||||||
}
|
}
|
||||||
if not cfg.deepspeed and cfg.model_config_type in ("jamba", "qwen2_moe"):
|
if not cfg.deepspeed:
|
||||||
# for some reason, this causes the loss to be off by an order of magnitude
|
# for some reason, this causes the loss to be off by an order of magnitude
|
||||||
# but deepspeed needs this still in bfloat16
|
# but deepspeed needs this still in bfloat16
|
||||||
bnb_config["bnb_4bit_quant_storage"] = torch.float32
|
bnb_config["bnb_4bit_quant_storage"] = torch.float32
|
||||||
@@ -902,7 +902,12 @@ def load_lora(model, cfg, inference=False, config_only=False):
|
|||||||
model = get_peft_model(model, lora_config)
|
model = get_peft_model(model, lora_config)
|
||||||
|
|
||||||
if rank == 0:
|
if rank == 0:
|
||||||
model.print_trainable_parameters()
|
try:
|
||||||
|
model.print_trainable_parameters()
|
||||||
|
except AttributeError as exc:
|
||||||
|
LOG.warning(
|
||||||
|
"Exception caught during model.print_trainable_parameters(): %s", exc
|
||||||
|
)
|
||||||
elif cfg.fsdp and cfg.adapter == "qlora":
|
elif cfg.fsdp and cfg.adapter == "qlora":
|
||||||
setup_quantized_peft_meta_for_training(model)
|
setup_quantized_peft_meta_for_training(model)
|
||||||
|
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ def calculate_total_num_steps(cfg, train_dataset, update=True):
|
|||||||
.apply(lambda x: len(x)) # pylint: disable=unnecessary-lambda
|
.apply(lambda x: len(x)) # pylint: disable=unnecessary-lambda
|
||||||
.values
|
.values
|
||||||
)
|
)
|
||||||
LOG.debug(f"total_num_tokens: {total_num_tokens}", main_process_only=True)
|
LOG.debug(f"total_num_tokens: {total_num_tokens:_}", main_process_only=True)
|
||||||
if update:
|
if update:
|
||||||
cfg.total_num_tokens = total_num_tokens
|
cfg.total_num_tokens = total_num_tokens
|
||||||
|
|
||||||
@@ -212,7 +212,7 @@ def calculate_total_num_steps(cfg, train_dataset, update=True):
|
|||||||
.sum()
|
.sum()
|
||||||
)
|
)
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"`total_supervised_tokens: {total_supervised_tokens}`",
|
f"`total_supervised_tokens: {total_supervised_tokens:_}`",
|
||||||
main_process_only=True,
|
main_process_only=True,
|
||||||
)
|
)
|
||||||
if update:
|
if update:
|
||||||
@@ -239,7 +239,7 @@ def calculate_total_num_steps(cfg, train_dataset, update=True):
|
|||||||
* cfg.num_epochs
|
* cfg.num_epochs
|
||||||
)
|
)
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
f"total_num_tokens: {cfg.total_num_tokens}, total_num_steps: {total_num_steps}",
|
f"total_num_tokens: {cfg.total_num_tokens:_}, total_num_steps: {total_num_steps:_}",
|
||||||
main_process_only=True,
|
main_process_only=True,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|||||||
Reference in New Issue
Block a user