Compare commits
60 Commits
kd-trainer
...
kd-trainer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab491804e0 | ||
|
|
f7334a1719 | ||
|
|
c45ab03487 | ||
|
|
0da0cd02e5 | ||
|
|
dd48ce7365 | ||
|
|
6fbc35762b | ||
|
|
71cb5b98c9 | ||
|
|
890d85f267 | ||
|
|
7dc137ed5b | ||
|
|
a31ec4d9b3 | ||
|
|
7e7762f40b | ||
|
|
1ffca753ca | ||
|
|
01d31587fe | ||
|
|
9b7d3894c0 | ||
|
|
1baffa54b1 | ||
|
|
2045ff2b7a | ||
|
|
93903f4aa5 | ||
|
|
b5b3452b2b | ||
|
|
6bbe3ac641 | ||
|
|
9ed455ef8c | ||
|
|
66823c113c | ||
|
|
e976de4d8f | ||
|
|
8eb82bba40 | ||
|
|
9fe36db215 | ||
|
|
9dcc879e04 | ||
|
|
1e577a29a8 | ||
|
|
4037fdb43a | ||
|
|
385c60cd9b | ||
|
|
06370b386a | ||
|
|
3da6a652fa | ||
|
|
84547c724d | ||
|
|
51547c656a | ||
|
|
7c4ae15942 | ||
|
|
cdb167e7f7 | ||
|
|
52f1d7aee2 | ||
|
|
319c3531e7 | ||
|
|
87eb6a3324 | ||
|
|
f03fa703b7 | ||
|
|
53ec07d44c | ||
|
|
8d77dc385e | ||
|
|
8b0104fa7c | ||
|
|
546ad007ec | ||
|
|
868a49cb96 | ||
|
|
4a12b1b22e | ||
|
|
973ed841cd | ||
|
|
9c0470130b | ||
|
|
0da2b7c7cc | ||
|
|
7c813a1d27 | ||
|
|
0a08bb4f78 | ||
|
|
8075a92a33 | ||
|
|
ba6eacd167 | ||
|
|
e2fae47114 | ||
|
|
7d281b71dc | ||
|
|
b080c53afc | ||
|
|
1ea225129f | ||
|
|
e2aba41939 | ||
|
|
21caaaa2e9 | ||
|
|
08d9f582e4 | ||
|
|
39daeb2c79 | ||
|
|
02c9898a95 |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -25,6 +25,7 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.3.1
|
pytorch: 2.3.1
|
||||||
axolotl_extras: mamba-ssm
|
axolotl_extras: mamba-ssm
|
||||||
|
is_latest: true
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -35,7 +36,6 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
is_latest: true
|
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -92,6 +92,7 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.3.1
|
pytorch: 2.3.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
|
is_latest: true
|
||||||
- cuda: 124
|
- cuda: 124
|
||||||
cuda_version: 12.4.1
|
cuda_version: 12.4.1
|
||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
@@ -102,7 +103,6 @@ jobs:
|
|||||||
python_version: "3.11"
|
python_version: "3.11"
|
||||||
pytorch: 2.5.1
|
pytorch: 2.5.1
|
||||||
axolotl_extras:
|
axolotl_extras:
|
||||||
is_latest: true
|
|
||||||
runs-on: axolotl-gpu-runner
|
runs-on: axolotl-gpu-runner
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ VOLUME_CONFIG = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
N_GPUS = int(os.environ.get("N_GPUS", 1))
|
||||||
GPU_CONFIG = modal.gpu.L40S(count=N_GPUS)
|
GPU_CONFIG = modal.gpu.A10G(count=N_GPUS)
|
||||||
|
|
||||||
|
|
||||||
def run_cmd(cmd: str, run_folder: str):
|
def run_cmd(cmd: str, run_folder: str):
|
||||||
|
|||||||
@@ -19,14 +19,7 @@ For pretraining, there is no prompt template or roles. The only required field
|
|||||||
Axolotl usually loads the entire dataset into memory. This will be challenging for large datasets. Use the following config to enable streaming:
|
Axolotl usually loads the entire dataset into memory. This will be challenging for large datasets. Use the following config to enable streaming:
|
||||||
|
|
||||||
```{.yaml filename="config.yaml"}
|
```{.yaml filename="config.yaml"}
|
||||||
pretraining_dataset:
|
pretraining_dataset: # hf path only
|
||||||
- name:
|
|
||||||
path:
|
|
||||||
split:
|
|
||||||
text_column: # column in dataset with the data, usually `text`
|
|
||||||
type: pretrain
|
|
||||||
trust_remote_code:
|
|
||||||
skip: # number of rows of data to skip over from the beginning
|
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
52
scripts/finetune.py
Normal file
52
scripts/finetune.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
"""Prepare and train a model on a dataset. Can also infer from a model or merge lora"""
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import fire
|
||||||
|
import transformers
|
||||||
|
|
||||||
|
from axolotl.cli import (
|
||||||
|
check_accelerate_default_config,
|
||||||
|
check_user_token,
|
||||||
|
do_inference,
|
||||||
|
do_merge_lora,
|
||||||
|
load_cfg,
|
||||||
|
load_datasets,
|
||||||
|
print_axolotl_text_art,
|
||||||
|
)
|
||||||
|
from axolotl.cli.shard import shard
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
|
from axolotl.train import train
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl.scripts.finetune")
|
||||||
|
|
||||||
|
|
||||||
|
def do_cli(config: Path = Path("examples/"), **kwargs):
|
||||||
|
print_axolotl_text_art()
|
||||||
|
LOG.warning(
|
||||||
|
str(
|
||||||
|
PendingDeprecationWarning(
|
||||||
|
"scripts/finetune.py will be replaced with calling axolotl.cli.train"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
|
check_accelerate_default_config()
|
||||||
|
check_user_token()
|
||||||
|
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||||
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
|
return_remaining_strings=True
|
||||||
|
)
|
||||||
|
if parsed_cli_args.inference:
|
||||||
|
do_inference(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
elif parsed_cli_args.merge_lora:
|
||||||
|
do_merge_lora(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
elif parsed_cli_args.shard:
|
||||||
|
shard(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
else:
|
||||||
|
dataset_meta = load_datasets(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
train(cfg=parsed_cfg, cli_args=parsed_cli_args, dataset_meta=dataset_meta)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
fire.Fire(do_cli)
|
||||||
@@ -1,5 +1,568 @@
|
|||||||
"""Axolotl CLI module initialization."""
|
"""Prepare and train a model on a dataset. Can also infer from a model or merge lora"""
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
import os
|
import os
|
||||||
|
import random
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from threading import Thread
|
||||||
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import torch
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
# add src to the pythonpath so we don't need to pip install this
|
||||||
|
from accelerate.commands.config import config_args
|
||||||
|
from art import text2art
|
||||||
|
from huggingface_hub import HfApi
|
||||||
|
from huggingface_hub.utils import LocalTokenNotFoundError
|
||||||
|
from transformers import GenerationConfig, TextIteratorStreamer, TextStreamer
|
||||||
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
from transformers.utils.import_utils import _is_package_available
|
||||||
|
|
||||||
|
from axolotl.common.cli import TrainerCliArgs, load_model_and_tokenizer
|
||||||
|
from axolotl.logging_config import configure_logging
|
||||||
|
from axolotl.train import TrainDatasetMeta
|
||||||
|
from axolotl.utils.chat_templates import (
|
||||||
|
get_chat_template,
|
||||||
|
get_chat_template_from_config,
|
||||||
|
)
|
||||||
|
from axolotl.utils.comet_ import setup_comet_env_vars
|
||||||
|
from axolotl.utils.config import (
|
||||||
|
normalize_cfg_datasets,
|
||||||
|
normalize_config,
|
||||||
|
prepare_plugins,
|
||||||
|
validate_config,
|
||||||
|
)
|
||||||
|
from axolotl.utils.data import load_prepare_dpo_datasets, prepare_dataset
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.distributed import is_main_process
|
||||||
|
from axolotl.utils.mlflow_ import setup_mlflow_env_vars
|
||||||
|
from axolotl.utils.models import load_processor, load_tokenizer
|
||||||
|
from axolotl.utils.tokenization import check_dataset_labels
|
||||||
|
from axolotl.utils.trainer import prepare_opinionated_env, prepare_optim_env
|
||||||
|
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
||||||
|
|
||||||
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
|
src_dir = os.path.join(project_root, "src")
|
||||||
|
sys.path.insert(0, src_dir)
|
||||||
|
|
||||||
|
configure_logging()
|
||||||
|
LOG = logging.getLogger("axolotl.scripts")
|
||||||
|
|
||||||
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||||
|
|
||||||
|
AXOLOTL_LOGO = """
|
||||||
|
#@@ #@@ @@# @@#
|
||||||
|
@@ @@ @@ @@ =@@# @@ #@ =@@#.
|
||||||
|
@@ #@@@@@@@@@ @@ #@#@= @@ #@ .=@@
|
||||||
|
#@@@@@@@@@@@@@@@@@ =@# @# ##= ## =####=+ @@ =#####+ =#@@###. @@
|
||||||
|
@@@@@@@@@@/ +@@/ +@@ #@ =@= #@= @@ =@#+ +#@# @@ =@#+ +#@# #@. @@
|
||||||
|
@@@@@@@@@@ ##@@ ##@@ =@# @# =@# @# @@ @@ @@ @@ #@ #@ @@
|
||||||
|
@@@@@@@@@@@@@@@@@@@@ #@=+++#@= =@@# @@ @@ @@ @@ #@ #@ @@
|
||||||
|
=@#=====@@ =@# @# @@ @@ @@ @@ #@ #@ @@
|
||||||
|
@@@@@@@@@@@@@@@@ @@@@ #@ #@= #@= +@@ #@# =@# @@. =@# =@# #@. @@
|
||||||
|
=@# @# #@= #@ =#@@@@#= +#@@= +#@@@@#= .##@@+ @@
|
||||||
|
@@@@ @@@@@@@@@@@@@@@@
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def print_legacy_axolotl_text_art(suffix=None):
|
||||||
|
font = "nancyj"
|
||||||
|
ascii_text = " axolotl"
|
||||||
|
if suffix:
|
||||||
|
ascii_text += f" x {suffix}"
|
||||||
|
ascii_art = text2art(ascii_text, font=font)
|
||||||
|
|
||||||
|
if is_main_process():
|
||||||
|
print(ascii_art)
|
||||||
|
|
||||||
|
print_dep_versions()
|
||||||
|
|
||||||
|
|
||||||
|
def print_axolotl_text_art(
|
||||||
|
**kwargs, # pylint: disable=unused-argument
|
||||||
|
):
|
||||||
|
if is_main_process():
|
||||||
|
print(AXOLOTL_LOGO)
|
||||||
|
|
||||||
|
|
||||||
|
def print_dep_versions():
|
||||||
|
packages = ["accelerate", "peft", "transformers", "trl", "torch", "bitsandbytes"]
|
||||||
|
max_len = max(len(pkg) for pkg in packages)
|
||||||
|
if is_main_process():
|
||||||
|
print("*" * 40)
|
||||||
|
print("**** Axolotl Dependency Versions *****")
|
||||||
|
for pkg in packages:
|
||||||
|
pkg_version = _is_package_available(pkg, return_version=True)
|
||||||
|
print(f"{pkg: >{max_len}}: {pkg_version[1]: <15}")
|
||||||
|
print("*" * 40)
|
||||||
|
|
||||||
|
|
||||||
|
def check_remote_config(config: Union[str, Path]):
|
||||||
|
# Check if the config is a valid HTTPS URL to a .yml or .yaml file
|
||||||
|
if not (isinstance(config, str) and config.startswith("https://")):
|
||||||
|
return config # Return the original value if it's not a valid URL
|
||||||
|
|
||||||
|
filename = os.path.basename(urlparse(config).path)
|
||||||
|
temp_dir = tempfile.mkdtemp()
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(config, timeout=30)
|
||||||
|
response.raise_for_status() # Check for HTTP errors
|
||||||
|
|
||||||
|
content = response.content
|
||||||
|
try:
|
||||||
|
# Try parsing as JSON first to catch cases where JSON content is mistakenly considered YAML
|
||||||
|
json.loads(content)
|
||||||
|
# Log a warning but do not raise an error; JSON is technically valid YAML - this can happen when you forget to point to a raw github link
|
||||||
|
LOG.warning(
|
||||||
|
f"Warning: The content of the file at {config} is JSON, which is technically valid YAML but might not be intended."
|
||||||
|
)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# If it's not valid JSON, verify it's valid YAML
|
||||||
|
try:
|
||||||
|
yaml.safe_load(content)
|
||||||
|
except yaml.YAMLError as err:
|
||||||
|
raise ValueError(
|
||||||
|
f"Failed to parse the content at {config} as YAML: {err}"
|
||||||
|
) from err
|
||||||
|
|
||||||
|
# Write the content to a file if it's valid YAML (or JSON treated as YAML)
|
||||||
|
output_path = Path(temp_dir) / filename
|
||||||
|
with open(output_path, "wb") as file:
|
||||||
|
file.write(content)
|
||||||
|
LOG.info(
|
||||||
|
f"Using the following config obtained from {config}: \n\n{content.decode('utf-8')}\n"
|
||||||
|
)
|
||||||
|
return output_path
|
||||||
|
|
||||||
|
except requests.RequestException as err:
|
||||||
|
# This catches all requests-related exceptions including HTTPError
|
||||||
|
raise RuntimeError(f"Failed to download {config}: {err}") from err
|
||||||
|
except Exception as err:
|
||||||
|
# Catch-all for any other exceptions
|
||||||
|
raise err
|
||||||
|
|
||||||
|
|
||||||
|
def get_multi_line_input() -> Optional[str]:
|
||||||
|
print("Give me an instruction (Ctrl + D to submit): ")
|
||||||
|
instruction = ""
|
||||||
|
for line in sys.stdin:
|
||||||
|
instruction += line # pylint: disable=consider-using-join
|
||||||
|
# instruction = pathlib.Path("/proc/self/fd/0").read_text()
|
||||||
|
return instruction
|
||||||
|
|
||||||
|
|
||||||
|
def do_merge_lora(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs,
|
||||||
|
):
|
||||||
|
model, tokenizer = load_model_and_tokenizer(cfg=cfg, cli_args=cli_args)
|
||||||
|
safe_serialization = cfg.save_safetensors is True
|
||||||
|
|
||||||
|
LOG.info("running merge of LoRA with base model")
|
||||||
|
model = model.merge_and_unload(progressbar=True)
|
||||||
|
try:
|
||||||
|
model.to(dtype=cfg.torch_dtype)
|
||||||
|
except RuntimeError:
|
||||||
|
pass
|
||||||
|
model.generation_config.do_sample = True
|
||||||
|
|
||||||
|
if cfg.local_rank == 0:
|
||||||
|
LOG.info(f"saving merged model to: {str(Path(cfg.output_dir) / 'merged')}")
|
||||||
|
model.save_pretrained(
|
||||||
|
str(Path(cfg.output_dir) / "merged"),
|
||||||
|
safe_serialization=safe_serialization,
|
||||||
|
progressbar=True,
|
||||||
|
)
|
||||||
|
tokenizer.save_pretrained(str(Path(cfg.output_dir) / "merged"))
|
||||||
|
|
||||||
|
|
||||||
|
def do_inference(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs,
|
||||||
|
):
|
||||||
|
model, tokenizer = load_model_and_tokenizer(cfg=cfg, cli_args=cli_args)
|
||||||
|
prompter = cli_args.prompter
|
||||||
|
|
||||||
|
prompter_module = None
|
||||||
|
chat_template_str = None
|
||||||
|
if prompter:
|
||||||
|
prompter_module = getattr(
|
||||||
|
importlib.import_module("axolotl.prompters"), prompter
|
||||||
|
)
|
||||||
|
elif cfg.chat_template:
|
||||||
|
chat_template_str = get_chat_template(cfg.chat_template)
|
||||||
|
elif cfg.datasets[0].type == "chat_template":
|
||||||
|
chat_template_str = get_chat_template_from_config(
|
||||||
|
cfg=cfg, ds_cfg=cfg.datasets[0], tokenizer=tokenizer
|
||||||
|
)
|
||||||
|
|
||||||
|
model = model.to(cfg.device, dtype=cfg.torch_dtype)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
print("=" * 80)
|
||||||
|
# support for multiline inputs
|
||||||
|
instruction = get_multi_line_input()
|
||||||
|
if not instruction:
|
||||||
|
return
|
||||||
|
|
||||||
|
if prompter_module:
|
||||||
|
prompt: str = next(
|
||||||
|
prompter_module().build_prompt(instruction=instruction.strip("\n"))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
prompt = instruction.strip()
|
||||||
|
|
||||||
|
if chat_template_str:
|
||||||
|
batch = tokenizer.apply_chat_template(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": prompt,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
return_tensors="pt",
|
||||||
|
add_special_tokens=True,
|
||||||
|
add_generation_prompt=True,
|
||||||
|
chat_template=chat_template_str,
|
||||||
|
tokenize=True,
|
||||||
|
return_dict=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
||||||
|
|
||||||
|
print("=" * 40)
|
||||||
|
model.eval()
|
||||||
|
with torch.no_grad():
|
||||||
|
generation_config = GenerationConfig(
|
||||||
|
repetition_penalty=1.1,
|
||||||
|
max_new_tokens=1024,
|
||||||
|
temperature=0.9,
|
||||||
|
top_p=0.95,
|
||||||
|
top_k=40,
|
||||||
|
bos_token_id=tokenizer.bos_token_id,
|
||||||
|
eos_token_id=tokenizer.eos_token_id,
|
||||||
|
pad_token_id=tokenizer.pad_token_id,
|
||||||
|
do_sample=True,
|
||||||
|
use_cache=True,
|
||||||
|
return_dict_in_generate=True,
|
||||||
|
output_attentions=False,
|
||||||
|
output_hidden_states=False,
|
||||||
|
output_scores=False,
|
||||||
|
)
|
||||||
|
streamer = TextStreamer(tokenizer)
|
||||||
|
generated = model.generate(
|
||||||
|
inputs=batch["input_ids"].to(cfg.device),
|
||||||
|
generation_config=generation_config,
|
||||||
|
streamer=streamer,
|
||||||
|
)
|
||||||
|
print("=" * 40)
|
||||||
|
print(tokenizer.decode(generated["sequences"].cpu().tolist()[0]))
|
||||||
|
|
||||||
|
|
||||||
|
def do_inference_gradio(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs,
|
||||||
|
):
|
||||||
|
import gradio as gr
|
||||||
|
|
||||||
|
model, tokenizer = load_model_and_tokenizer(cfg=cfg, cli_args=cli_args)
|
||||||
|
prompter = cli_args.prompter
|
||||||
|
|
||||||
|
prompter_module = None
|
||||||
|
chat_template_str = None
|
||||||
|
if prompter:
|
||||||
|
prompter_module = getattr(
|
||||||
|
importlib.import_module("axolotl.prompters"), prompter
|
||||||
|
)
|
||||||
|
elif cfg.chat_template:
|
||||||
|
chat_template_str = get_chat_template(cfg.chat_template, tokenizer=tokenizer)
|
||||||
|
|
||||||
|
model = model.to(cfg.device, dtype=cfg.torch_dtype)
|
||||||
|
|
||||||
|
def generate(instruction):
|
||||||
|
if not instruction:
|
||||||
|
return
|
||||||
|
if prompter_module:
|
||||||
|
# pylint: disable=stop-iteration-return
|
||||||
|
prompt: str = next(
|
||||||
|
prompter_module().build_prompt(instruction=instruction.strip("\n"))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
prompt = instruction.strip()
|
||||||
|
|
||||||
|
if chat_template_str:
|
||||||
|
batch = tokenizer.apply_chat_template(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": prompt,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
return_tensors="pt",
|
||||||
|
add_special_tokens=True,
|
||||||
|
add_generation_prompt=True,
|
||||||
|
chat_template=chat_template_str,
|
||||||
|
tokenize=True,
|
||||||
|
return_dict=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
with torch.no_grad():
|
||||||
|
generation_config = GenerationConfig(
|
||||||
|
repetition_penalty=1.1,
|
||||||
|
max_new_tokens=cfg.get("gradio_max_new_tokens", 1024),
|
||||||
|
temperature=cfg.get("gradio_temperature", 0.9),
|
||||||
|
top_p=0.95,
|
||||||
|
top_k=40,
|
||||||
|
bos_token_id=tokenizer.bos_token_id,
|
||||||
|
eos_token_id=tokenizer.eos_token_id,
|
||||||
|
pad_token_id=tokenizer.pad_token_id,
|
||||||
|
do_sample=True,
|
||||||
|
use_cache=True,
|
||||||
|
return_dict_in_generate=True,
|
||||||
|
output_attentions=False,
|
||||||
|
output_hidden_states=False,
|
||||||
|
output_scores=False,
|
||||||
|
)
|
||||||
|
streamer = TextIteratorStreamer(tokenizer)
|
||||||
|
generation_kwargs = {
|
||||||
|
"inputs": batch["input_ids"].to(cfg.device),
|
||||||
|
"attention_mask": batch["attention_mask"].to(cfg.device),
|
||||||
|
"generation_config": generation_config,
|
||||||
|
"streamer": streamer,
|
||||||
|
}
|
||||||
|
|
||||||
|
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
all_text = ""
|
||||||
|
|
||||||
|
for new_text in streamer:
|
||||||
|
all_text += new_text
|
||||||
|
yield all_text
|
||||||
|
|
||||||
|
demo = gr.Interface(
|
||||||
|
fn=generate,
|
||||||
|
inputs="textbox",
|
||||||
|
outputs="text",
|
||||||
|
title=cfg.get("gradio_title", "Axolotl Gradio Interface"),
|
||||||
|
)
|
||||||
|
|
||||||
|
demo.queue().launch(
|
||||||
|
show_api=False,
|
||||||
|
share=cfg.get("gradio_share", True),
|
||||||
|
server_name=cfg.get("gradio_server_name", "127.0.0.1"),
|
||||||
|
server_port=cfg.get("gradio_server_port", None),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def choose_config(path: Path):
|
||||||
|
yaml_files = list(path.glob("*.yml"))
|
||||||
|
|
||||||
|
if not yaml_files:
|
||||||
|
raise ValueError(
|
||||||
|
"No YAML config files found in the specified directory. Are you using a .yml extension?"
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(yaml_files) == 1:
|
||||||
|
print(f"Using default YAML file '{yaml_files[0]}'")
|
||||||
|
return str(yaml_files[0])
|
||||||
|
|
||||||
|
print("Choose a YAML file:")
|
||||||
|
for idx, file in enumerate(yaml_files):
|
||||||
|
print(f"{idx + 1}. {file}")
|
||||||
|
|
||||||
|
chosen_file = None
|
||||||
|
while chosen_file is None:
|
||||||
|
try:
|
||||||
|
choice = int(input("Enter the number of your choice: "))
|
||||||
|
if 1 <= choice <= len(yaml_files):
|
||||||
|
chosen_file = str(yaml_files[choice - 1])
|
||||||
|
else:
|
||||||
|
print("Invalid choice. Please choose a number from the list.")
|
||||||
|
except ValueError:
|
||||||
|
print("Invalid input. Please enter a number.")
|
||||||
|
|
||||||
|
return chosen_file
|
||||||
|
|
||||||
|
|
||||||
|
def check_not_in(list1: List[str], list2: Union[Dict[str, Any], List[str]]) -> bool:
|
||||||
|
return not any(el in list2 for el in list1)
|
||||||
|
|
||||||
|
|
||||||
|
def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs):
|
||||||
|
config = check_remote_config(config)
|
||||||
|
if Path(config).is_dir():
|
||||||
|
config = choose_config(Path(config))
|
||||||
|
|
||||||
|
# load the config from the yaml file
|
||||||
|
with open(config, encoding="utf-8") as file:
|
||||||
|
cfg: DictDefault = DictDefault(yaml.safe_load(file))
|
||||||
|
# if there are any options passed in the cli, if it is something that seems valid from the yaml,
|
||||||
|
# then overwrite the value
|
||||||
|
cfg_keys = cfg.keys()
|
||||||
|
for k, _ in kwargs.items():
|
||||||
|
# if not strict, allow writing to cfg even if it's not in the yml already
|
||||||
|
if k in cfg_keys or not cfg.strict:
|
||||||
|
# handle booleans
|
||||||
|
if isinstance(cfg[k], bool):
|
||||||
|
cfg[k] = bool(kwargs[k])
|
||||||
|
else:
|
||||||
|
cfg[k] = kwargs[k]
|
||||||
|
|
||||||
|
cfg.axolotl_config_path = config
|
||||||
|
|
||||||
|
try:
|
||||||
|
device_props = torch.cuda.get_device_properties("cuda")
|
||||||
|
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
||||||
|
except: # pylint: disable=bare-except # noqa: E722
|
||||||
|
gpu_version = None
|
||||||
|
|
||||||
|
prepare_plugins(cfg)
|
||||||
|
|
||||||
|
cfg = validate_config(
|
||||||
|
cfg,
|
||||||
|
capabilities={
|
||||||
|
"bf16": is_torch_bf16_gpu_available(),
|
||||||
|
"n_gpu": int(os.environ.get("WORLD_SIZE", 1)),
|
||||||
|
"compute_capability": gpu_version,
|
||||||
|
},
|
||||||
|
env_capabilities={
|
||||||
|
"torch_version": str(torch.__version__).split("+", maxsplit=1)[0],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
prepare_optim_env(cfg)
|
||||||
|
|
||||||
|
prepare_opinionated_env(cfg)
|
||||||
|
|
||||||
|
normalize_config(cfg)
|
||||||
|
|
||||||
|
normalize_cfg_datasets(cfg)
|
||||||
|
|
||||||
|
setup_wandb_env_vars(cfg)
|
||||||
|
|
||||||
|
setup_mlflow_env_vars(cfg)
|
||||||
|
|
||||||
|
setup_comet_env_vars(cfg)
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
|
||||||
|
|
||||||
|
def load_datasets(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs,
|
||||||
|
) -> TrainDatasetMeta:
|
||||||
|
tokenizer = load_tokenizer(cfg)
|
||||||
|
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
||||||
|
|
||||||
|
train_dataset, eval_dataset, total_num_steps, prompters = prepare_dataset(
|
||||||
|
cfg,
|
||||||
|
tokenizer,
|
||||||
|
processor=processor,
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
cli_args.debug
|
||||||
|
or cfg.debug
|
||||||
|
or cli_args.debug_text_only
|
||||||
|
or int(cli_args.debug_num_examples) > 0
|
||||||
|
):
|
||||||
|
LOG.info("check_dataset_labels...")
|
||||||
|
check_dataset_labels(
|
||||||
|
train_dataset.select(
|
||||||
|
[
|
||||||
|
random.randrange(0, len(train_dataset) - 1) # nosec
|
||||||
|
for _ in range(cli_args.debug_num_examples)
|
||||||
|
]
|
||||||
|
),
|
||||||
|
tokenizer,
|
||||||
|
num_examples=cli_args.debug_num_examples,
|
||||||
|
text_only=cli_args.debug_text_only,
|
||||||
|
)
|
||||||
|
|
||||||
|
LOG.info("printing prompters...")
|
||||||
|
for prompter in prompters:
|
||||||
|
LOG.info(prompter)
|
||||||
|
|
||||||
|
return TrainDatasetMeta(
|
||||||
|
train_dataset=train_dataset,
|
||||||
|
eval_dataset=eval_dataset,
|
||||||
|
total_num_steps=total_num_steps,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_rl_datasets(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs, # pylint: disable=unused-argument
|
||||||
|
) -> TrainDatasetMeta:
|
||||||
|
train_dataset, eval_dataset = load_prepare_dpo_datasets(cfg)
|
||||||
|
total_num_steps = int(
|
||||||
|
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
||||||
|
)
|
||||||
|
|
||||||
|
if cli_args.debug or cfg.debug:
|
||||||
|
LOG.info("check_dataset_labels...")
|
||||||
|
|
||||||
|
tokenizer = load_tokenizer(cfg)
|
||||||
|
check_dataset_labels(
|
||||||
|
train_dataset.select(
|
||||||
|
[
|
||||||
|
random.randrange(0, len(train_dataset) - 1) # nosec
|
||||||
|
for _ in range(cli_args.debug_num_examples)
|
||||||
|
]
|
||||||
|
),
|
||||||
|
tokenizer,
|
||||||
|
num_examples=cli_args.debug_num_examples,
|
||||||
|
text_only=cli_args.debug_text_only,
|
||||||
|
rl_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return TrainDatasetMeta(
|
||||||
|
train_dataset=train_dataset,
|
||||||
|
eval_dataset=eval_dataset,
|
||||||
|
total_num_steps=total_num_steps,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_accelerate_default_config():
|
||||||
|
if Path(config_args.default_yaml_config_file).exists():
|
||||||
|
LOG.warning(
|
||||||
|
f"accelerate config file found at {config_args.default_yaml_config_file}. This can lead to unexpected errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def check_user_token():
|
||||||
|
# Skip check if HF_HUB_OFFLINE is set to True
|
||||||
|
if os.getenv("HF_HUB_OFFLINE") == "1":
|
||||||
|
LOG.info(
|
||||||
|
"Skipping HuggingFace token verification because HF_HUB_OFFLINE is set to True. Only local files will be used."
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Verify if token is valid
|
||||||
|
api = HfApi()
|
||||||
|
try:
|
||||||
|
user_info = api.whoami()
|
||||||
|
return bool(user_info)
|
||||||
|
except LocalTokenNotFoundError:
|
||||||
|
LOG.warning(
|
||||||
|
"Error verifying HuggingFace token. Remember to log in using `huggingface-cli login` and get your access token from https://huggingface.co/settings/tokens if you want to use gated models or datasets."
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|||||||
@@ -1,49 +0,0 @@
|
|||||||
"""Module for axolotl CLI command arguments."""
|
|
||||||
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PreprocessCliArgs:
|
|
||||||
"""Dataclass with CLI arguments for `axolotl preprocess` command."""
|
|
||||||
|
|
||||||
debug: bool = field(default=False)
|
|
||||||
debug_text_only: bool = field(default=False)
|
|
||||||
debug_num_examples: int = field(default=1)
|
|
||||||
prompter: Optional[str] = field(default=None)
|
|
||||||
download: Optional[bool] = field(default=True)
|
|
||||||
iterable: Optional[bool] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "Use IterableDataset for streaming processing of large datasets"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TrainerCliArgs:
|
|
||||||
"""Dataclass with CLI arguments for `axolotl train` command."""
|
|
||||||
|
|
||||||
debug: bool = field(default=False)
|
|
||||||
debug_text_only: bool = field(default=False)
|
|
||||||
debug_num_examples: int = field(default=0)
|
|
||||||
merge_lora: bool = field(default=False)
|
|
||||||
prompter: Optional[str] = field(default=None)
|
|
||||||
shard: bool = field(default=False)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class EvaluateCliArgs:
|
|
||||||
"""Dataclass with CLI arguments for `axolotl evaluate` command."""
|
|
||||||
|
|
||||||
debug: bool = field(default=False)
|
|
||||||
debug_text_only: bool = field(default=False)
|
|
||||||
debug_num_examples: int = field(default=0)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class InferenceCliArgs:
|
|
||||||
"""Dataclass with CLI arguments for `axolotl inference` command."""
|
|
||||||
|
|
||||||
prompter: Optional[str] = field(default=None)
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
"""Axolotl ASCII logo utils."""
|
|
||||||
|
|
||||||
from axolotl.utils.distributed import is_main_process
|
|
||||||
|
|
||||||
AXOLOTL_LOGO = """
|
|
||||||
#@@ #@@ @@# @@#
|
|
||||||
@@ @@ @@ @@ =@@# @@ #@ =@@#.
|
|
||||||
@@ #@@@@@@@@@ @@ #@#@= @@ #@ .=@@
|
|
||||||
#@@@@@@@@@@@@@@@@@ =@# @# ##= ## =####=+ @@ =#####+ =#@@###. @@
|
|
||||||
@@@@@@@@@@/ +@@/ +@@ #@ =@= #@= @@ =@#+ +#@# @@ =@#+ +#@# #@. @@
|
|
||||||
@@@@@@@@@@ ##@@ ##@@ =@# @# =@# @# @@ @@ @@ @@ #@ #@ @@
|
|
||||||
@@@@@@@@@@@@@@@@@@@@ #@=+++#@= =@@# @@ @@ @@ @@ #@ #@ @@
|
|
||||||
=@#=====@@ =@# @# @@ @@ @@ @@ #@ #@ @@
|
|
||||||
@@@@@@@@@@@@@@@@ @@@@ #@ #@= #@= +@@ #@# =@# @@. =@# =@# #@. @@
|
|
||||||
=@# @# #@= #@ =#@@@@#= +#@@= +#@@@@#= .##@@+ @@
|
|
||||||
@@@@ @@@@@@@@@@@@@@@@
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def print_axolotl_text_art():
|
|
||||||
"""Prints axolotl ASCII art."""
|
|
||||||
if is_main_process():
|
|
||||||
print(AXOLOTL_LOGO)
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
"""Various checks for Axolotl CLI."""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from accelerate.commands.config import config_args
|
|
||||||
from huggingface_hub import HfApi
|
|
||||||
from huggingface_hub.utils import LocalTokenNotFoundError
|
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def check_accelerate_default_config() -> None:
|
|
||||||
"""Logs at warning level if no accelerate config file is found."""
|
|
||||||
if Path(config_args.default_yaml_config_file).exists():
|
|
||||||
LOG.warning(
|
|
||||||
f"accelerate config file found at {config_args.default_yaml_config_file}. This can lead to unexpected errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_user_token() -> bool:
|
|
||||||
"""Checks for HF user info. Check is skipped if HF_HUB_OFFLINE=1.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Boolean indicating successful check (i.e., HF_HUB_OFFLINE=1 or HF user info is retrieved).
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
LocalTokenNotFoundError: If HF user info can't be retrieved.
|
|
||||||
"""
|
|
||||||
# Skip check if HF_HUB_OFFLINE is set to True
|
|
||||||
if os.getenv("HF_HUB_OFFLINE") == "1":
|
|
||||||
LOG.info(
|
|
||||||
"Skipping HuggingFace token verification because HF_HUB_OFFLINE is set to True. Only local files will be used."
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Verify if token is valid
|
|
||||||
api = HfApi()
|
|
||||||
try:
|
|
||||||
user_info = api.whoami()
|
|
||||||
return bool(user_info)
|
|
||||||
except LocalTokenNotFoundError:
|
|
||||||
LOG.warning(
|
|
||||||
"Error verifying HuggingFace token. Remember to log in using `huggingface-cli login` and get your access token from https://huggingface.co/settings/tokens if you want to use gated models or datasets."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
@@ -1,217 +0,0 @@
|
|||||||
"""Configuration loading and processing."""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Union
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
import requests
|
|
||||||
import torch
|
|
||||||
import yaml
|
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
|
||||||
|
|
||||||
from axolotl.integrations.base import PluginManager
|
|
||||||
from axolotl.utils.comet_ import setup_comet_env_vars
|
|
||||||
from axolotl.utils.config import (
|
|
||||||
normalize_cfg_datasets,
|
|
||||||
normalize_config,
|
|
||||||
validate_config,
|
|
||||||
)
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.mlflow_ import setup_mlflow_env_vars
|
|
||||||
from axolotl.utils.trainer import prepare_opinionated_env, prepare_optim_env
|
|
||||||
from axolotl.utils.wandb_ import setup_wandb_env_vars
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def check_remote_config(config: Union[str, Path]) -> Union[str, Path]:
|
|
||||||
"""
|
|
||||||
First, determines if the passed config is a valid HTTPS URL. Then, attempts to query
|
|
||||||
for it and parse its content, first as JSON, then as YAML (YAML is preferred).
|
|
||||||
Finally, the parsed content is written to a local file and its path is returned.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: HTTPS URL to a YAML or JSON file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Either the original `config` if it's not a valid HTTPS URL, or the path to the
|
|
||||||
downloaded remote config.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the remote configuration is neither valid JSON or YAML.
|
|
||||||
RuntimeError: If some request-related exception occurs from the file download.
|
|
||||||
Exception: Catch-all for any other exception.
|
|
||||||
"""
|
|
||||||
# Check if the config is a valid HTTPS URL to a .yml or .yaml file
|
|
||||||
if not (isinstance(config, str) and config.startswith("https://")):
|
|
||||||
return config # Return the original value if it's not a valid URL
|
|
||||||
|
|
||||||
filename = os.path.basename(urlparse(config).path)
|
|
||||||
temp_dir = tempfile.mkdtemp()
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.get(config, timeout=30)
|
|
||||||
response.raise_for_status() # Check for HTTP errors
|
|
||||||
|
|
||||||
content = response.content
|
|
||||||
try:
|
|
||||||
# Try parsing as JSON first to catch cases where JSON content is mistakenly
|
|
||||||
# considered YAML.
|
|
||||||
json.loads(content)
|
|
||||||
|
|
||||||
# Log a warning but do not raise an error; JSON is technically valid YAML.
|
|
||||||
# This can happen when you forget to point to a raw GitHub link.
|
|
||||||
LOG.warning(
|
|
||||||
f"Warning: The content of the file at {config} is JSON, which is technically valid YAML but might not be intended."
|
|
||||||
)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
# If it's not valid JSON, verify it's valid YAML
|
|
||||||
try:
|
|
||||||
yaml.safe_load(content)
|
|
||||||
except yaml.YAMLError as err:
|
|
||||||
raise ValueError(
|
|
||||||
f"Failed to parse the content at {config} as YAML: {err}"
|
|
||||||
) from err
|
|
||||||
|
|
||||||
# Write the content to a file if it's valid YAML (or JSON treated as YAML)
|
|
||||||
output_path = Path(temp_dir) / filename
|
|
||||||
with open(output_path, "wb") as file:
|
|
||||||
file.write(content)
|
|
||||||
LOG.info(
|
|
||||||
f"Using the following config obtained from {config}: \n\n{content.decode('utf-8')}\n"
|
|
||||||
)
|
|
||||||
return output_path
|
|
||||||
|
|
||||||
except requests.RequestException as err:
|
|
||||||
# This catches all requests-related exceptions including HTTPError
|
|
||||||
raise RuntimeError(f"Failed to download {config}: {err}") from err
|
|
||||||
except Exception as err:
|
|
||||||
# Catch-all for any other exceptions
|
|
||||||
raise err
|
|
||||||
|
|
||||||
|
|
||||||
def choose_config(path: Path) -> str:
|
|
||||||
"""
|
|
||||||
Helper method for choosing a `axolotl` config YAML file (considering only files
|
|
||||||
ending with `.yml` or `.yaml`). If more than one config file exists in the passed
|
|
||||||
`path`, the user is prompted to choose one.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
path: Directory in which config file(s) are stored.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path to either (1) the sole YAML file, or (2) if more than one YAML files exist,
|
|
||||||
the user-selected YAML file.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If no YAML files are found in the given `path`.
|
|
||||||
"""
|
|
||||||
yaml_files = list(path.glob("*.yml")) + list(path.glob("*.yaml"))
|
|
||||||
|
|
||||||
if not yaml_files:
|
|
||||||
raise ValueError(
|
|
||||||
"No YAML config files found in the specified directory. Are you using a .yml extension?"
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(yaml_files) == 1:
|
|
||||||
print(f"Using default YAML file '{yaml_files[0]}'")
|
|
||||||
return str(yaml_files[0])
|
|
||||||
|
|
||||||
print("Choose a YAML file:")
|
|
||||||
for idx, file in enumerate(yaml_files):
|
|
||||||
print(f"{idx + 1}. {file}")
|
|
||||||
|
|
||||||
chosen_file = None
|
|
||||||
while chosen_file is None:
|
|
||||||
try:
|
|
||||||
choice = int(input("Enter the number of your choice: "))
|
|
||||||
if 1 <= choice <= len(yaml_files):
|
|
||||||
chosen_file = str(yaml_files[choice - 1])
|
|
||||||
else:
|
|
||||||
print("Invalid choice. Please choose a number from the list.")
|
|
||||||
except ValueError:
|
|
||||||
print("Invalid input. Please enter a number.")
|
|
||||||
|
|
||||||
return chosen_file
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_plugins(cfg: DictDefault):
|
|
||||||
"""
|
|
||||||
Registers the plugins for the given configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
"""
|
|
||||||
if cfg.get("plugins"):
|
|
||||||
plugin_manager = PluginManager.get_instance()
|
|
||||||
for plugin_name in cfg["plugins"]:
|
|
||||||
plugin_manager.register(plugin_name)
|
|
||||||
|
|
||||||
|
|
||||||
def load_cfg(config: Union[str, Path] = Path("examples/"), **kwargs) -> DictDefault:
|
|
||||||
"""
|
|
||||||
Loads the `axolotl` configuration stored at `config`, validates it, and performs
|
|
||||||
various setup.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path (local or remote) to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
`DictDefault` mapping configuration keys to values.
|
|
||||||
"""
|
|
||||||
config = check_remote_config(config)
|
|
||||||
if Path(config).is_dir():
|
|
||||||
config = choose_config(Path(config))
|
|
||||||
|
|
||||||
# Load the config from the yaml file
|
|
||||||
with open(config, encoding="utf-8") as file:
|
|
||||||
cfg: DictDefault = DictDefault(yaml.safe_load(file))
|
|
||||||
|
|
||||||
# If there are any options passed in the cli, if it is something that seems valid
|
|
||||||
# from the yaml, then overwrite the value
|
|
||||||
cfg_keys = cfg.keys()
|
|
||||||
for k, _ in kwargs.items():
|
|
||||||
# if not strict, allow writing to cfg even if it's not in the yml already
|
|
||||||
if k in cfg_keys or not cfg.strict:
|
|
||||||
# handle booleans
|
|
||||||
if isinstance(cfg[k], bool):
|
|
||||||
cfg[k] = bool(kwargs[k])
|
|
||||||
else:
|
|
||||||
cfg[k] = kwargs[k]
|
|
||||||
|
|
||||||
cfg.axolotl_config_path = config
|
|
||||||
|
|
||||||
try:
|
|
||||||
device_props = torch.cuda.get_device_properties("cuda")
|
|
||||||
gpu_version = "sm_" + str(device_props.major) + str(device_props.minor)
|
|
||||||
except: # pylint: disable=bare-except # noqa: E722
|
|
||||||
gpu_version = None
|
|
||||||
|
|
||||||
prepare_plugins(cfg)
|
|
||||||
|
|
||||||
cfg = validate_config(
|
|
||||||
cfg,
|
|
||||||
capabilities={
|
|
||||||
"bf16": is_torch_bf16_gpu_available(),
|
|
||||||
"n_gpu": int(os.environ.get("WORLD_SIZE", 1)),
|
|
||||||
"compute_capability": gpu_version,
|
|
||||||
},
|
|
||||||
env_capabilities={
|
|
||||||
"torch_version": str(torch.__version__).split("+", maxsplit=1)[0]
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
prepare_optim_env(cfg)
|
|
||||||
prepare_opinionated_env(cfg)
|
|
||||||
normalize_config(cfg)
|
|
||||||
normalize_cfg_datasets(cfg)
|
|
||||||
setup_wandb_env_vars(cfg)
|
|
||||||
setup_mlflow_env_vars(cfg)
|
|
||||||
setup_comet_env_vars(cfg)
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
"""CLI to run evaluation on a model."""
|
"""
|
||||||
|
CLI to run training on a model
|
||||||
|
"""
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
@@ -8,48 +9,35 @@ import fire
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from transformers.hf_argparser import HfArgumentParser
|
from transformers.hf_argparser import HfArgumentParser
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import (
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
check_accelerate_default_config,
|
||||||
from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
check_user_token,
|
||||||
from axolotl.cli.config import load_cfg
|
load_cfg,
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
load_datasets,
|
||||||
|
load_rl_datasets,
|
||||||
|
print_axolotl_text_art,
|
||||||
|
)
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.evaluate import evaluate
|
from axolotl.evaluate import evaluate
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger("axolotl.cli.evaluate")
|
||||||
|
|
||||||
|
|
||||||
def do_evaluate(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
def do_evaluate(cfg, cli_args) -> None:
|
||||||
"""
|
|
||||||
Evaluates a `transformers` model by first loading the dataset(s) specified in the
|
|
||||||
`axolotl` config, and then calling `axolotl.evaluate.evaluate`, which computes
|
|
||||||
evaluation metrics on the given dataset(s) and writes them to disk.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
cli_args: CLI arguments.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
check_user_token()
|
check_user_token()
|
||||||
|
|
||||||
if cfg.rl:
|
if cfg.rl: # and cfg.rl != "orpo":
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
else:
|
else:
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
evaluate(cfg=cfg, dataset_meta=dataset_meta)
|
evaluate(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
|
|
||||||
|
|
||||||
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
||||||
"""
|
|
||||||
Parses `axolotl` config, CLI args, and calls `do_evaluate`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
parsed_cfg = load_cfg(config, **kwargs)
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
parser = HfArgumentParser(TrainerCliArgs)
|
parser = HfArgumentParser(TrainerCliArgs)
|
||||||
|
|||||||
@@ -1,267 +1,32 @@
|
|||||||
"""CLI to run inference on a trained model."""
|
"""
|
||||||
|
CLI to run inference on a trained model
|
||||||
import importlib
|
"""
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from threading import Thread
|
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
import fire
|
import fire
|
||||||
import torch
|
|
||||||
import transformers
|
import transformers
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from transformers import GenerationConfig, TextIteratorStreamer, TextStreamer
|
|
||||||
|
|
||||||
from axolotl.cli.args import InferenceCliArgs
|
from axolotl.cli import (
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
do_inference,
|
||||||
from axolotl.cli.config import load_cfg
|
do_inference_gradio,
|
||||||
from axolotl.cli.utils import load_model_and_tokenizer
|
load_cfg,
|
||||||
from axolotl.utils.chat_templates import (
|
print_axolotl_text_art,
|
||||||
get_chat_template,
|
|
||||||
get_chat_template_from_config,
|
|
||||||
)
|
)
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_multi_line_input() -> str:
|
def do_cli(config: Union[Path, str] = Path("examples/"), gradio=False, **kwargs):
|
||||||
"""
|
|
||||||
Gets multi-line input from terminal.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Possibly multi-line, possibly empty stdin input as a string.
|
|
||||||
"""
|
|
||||||
print("Give me an instruction (Ctrl + D to submit): ")
|
|
||||||
|
|
||||||
instruction = ""
|
|
||||||
for line in sys.stdin:
|
|
||||||
instruction += line # pylint: disable=consider-using-join
|
|
||||||
|
|
||||||
return instruction
|
|
||||||
|
|
||||||
|
|
||||||
def do_inference(
|
|
||||||
*,
|
|
||||||
cfg: DictDefault,
|
|
||||||
cli_args: InferenceCliArgs,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Runs inference on the command line in a loop. User input is accepted, a chat template
|
|
||||||
is (optionally) applied, and the model specified in the `axolotl` config is used to
|
|
||||||
generate completions according to a default generation config.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
cli_args: Inference-specific CLI arguments.
|
|
||||||
"""
|
|
||||||
model, tokenizer = load_model_and_tokenizer(cfg=cfg, inference=True)
|
|
||||||
prompter = cli_args.prompter
|
|
||||||
|
|
||||||
prompter_module = None
|
|
||||||
chat_template_str = None
|
|
||||||
if prompter:
|
|
||||||
prompter_module = getattr(
|
|
||||||
importlib.import_module("axolotl.prompters"), prompter
|
|
||||||
)
|
|
||||||
elif cfg.chat_template:
|
|
||||||
chat_template_str = get_chat_template(cfg.chat_template)
|
|
||||||
elif cfg.datasets[0].type == "chat_template":
|
|
||||||
chat_template_str = get_chat_template_from_config(
|
|
||||||
cfg=cfg, ds_cfg=cfg.datasets[0], tokenizer=tokenizer
|
|
||||||
)
|
|
||||||
|
|
||||||
model = model.to(cfg.device, dtype=cfg.torch_dtype)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
print("=" * 80)
|
|
||||||
# support for multiline inputs
|
|
||||||
instruction = get_multi_line_input()
|
|
||||||
if not instruction:
|
|
||||||
return
|
|
||||||
|
|
||||||
if prompter_module:
|
|
||||||
prompt: str = next(
|
|
||||||
prompter_module().build_prompt(instruction=instruction.strip("\n"))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
prompt = instruction.strip()
|
|
||||||
|
|
||||||
if chat_template_str:
|
|
||||||
batch = tokenizer.apply_chat_template(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt,
|
|
||||||
}
|
|
||||||
],
|
|
||||||
return_tensors="pt",
|
|
||||||
add_special_tokens=True,
|
|
||||||
add_generation_prompt=True,
|
|
||||||
chat_template=chat_template_str,
|
|
||||||
tokenize=True,
|
|
||||||
return_dict=True,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
|
||||||
|
|
||||||
print("=" * 40)
|
|
||||||
model.eval()
|
|
||||||
with torch.no_grad():
|
|
||||||
generation_config = GenerationConfig(
|
|
||||||
repetition_penalty=1.1,
|
|
||||||
max_new_tokens=1024,
|
|
||||||
temperature=0.9,
|
|
||||||
top_p=0.95,
|
|
||||||
top_k=40,
|
|
||||||
bos_token_id=tokenizer.bos_token_id,
|
|
||||||
eos_token_id=tokenizer.eos_token_id,
|
|
||||||
pad_token_id=tokenizer.pad_token_id,
|
|
||||||
do_sample=True,
|
|
||||||
use_cache=True,
|
|
||||||
return_dict_in_generate=True,
|
|
||||||
output_attentions=False,
|
|
||||||
output_hidden_states=False,
|
|
||||||
output_scores=False,
|
|
||||||
)
|
|
||||||
streamer = TextStreamer(tokenizer)
|
|
||||||
generated = model.generate(
|
|
||||||
inputs=batch["input_ids"].to(cfg.device),
|
|
||||||
generation_config=generation_config,
|
|
||||||
streamer=streamer,
|
|
||||||
)
|
|
||||||
print("=" * 40)
|
|
||||||
print(tokenizer.decode(generated["sequences"].cpu().tolist()[0]))
|
|
||||||
|
|
||||||
|
|
||||||
def do_inference_gradio(
|
|
||||||
*,
|
|
||||||
cfg: DictDefault,
|
|
||||||
cli_args: InferenceCliArgs,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Runs inference in a Gradio interface. User input is accepted, a chat template is
|
|
||||||
(optionally) applied, and the model specified in the `axolotl` config is used to
|
|
||||||
generate completions according to a default generation config.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
cli_args: Inference-specific CLI arguments.
|
|
||||||
"""
|
|
||||||
import gradio as gr
|
|
||||||
|
|
||||||
model, tokenizer = load_model_and_tokenizer(cfg=cfg, inference=True)
|
|
||||||
prompter = cli_args.prompter
|
|
||||||
|
|
||||||
prompter_module = None
|
|
||||||
chat_template_str = None
|
|
||||||
if prompter:
|
|
||||||
prompter_module = getattr(
|
|
||||||
importlib.import_module("axolotl.prompters"), prompter
|
|
||||||
)
|
|
||||||
elif cfg.chat_template:
|
|
||||||
chat_template_str = get_chat_template(cfg.chat_template, tokenizer=tokenizer)
|
|
||||||
|
|
||||||
model = model.to(cfg.device, dtype=cfg.torch_dtype)
|
|
||||||
|
|
||||||
def generate(instruction):
|
|
||||||
if not instruction:
|
|
||||||
return
|
|
||||||
if prompter_module:
|
|
||||||
# pylint: disable=stop-iteration-return
|
|
||||||
prompt: str = next(
|
|
||||||
prompter_module().build_prompt(instruction=instruction.strip("\n"))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
prompt = instruction.strip()
|
|
||||||
|
|
||||||
if chat_template_str:
|
|
||||||
batch = tokenizer.apply_chat_template(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt,
|
|
||||||
}
|
|
||||||
],
|
|
||||||
return_tensors="pt",
|
|
||||||
add_special_tokens=True,
|
|
||||||
add_generation_prompt=True,
|
|
||||||
chat_template=chat_template_str,
|
|
||||||
tokenize=True,
|
|
||||||
return_dict=True,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
|
||||||
|
|
||||||
model.eval()
|
|
||||||
with torch.no_grad():
|
|
||||||
generation_config = GenerationConfig(
|
|
||||||
repetition_penalty=1.1,
|
|
||||||
max_new_tokens=cfg.get("gradio_max_new_tokens", 1024),
|
|
||||||
temperature=cfg.get("gradio_temperature", 0.9),
|
|
||||||
top_p=0.95,
|
|
||||||
top_k=40,
|
|
||||||
bos_token_id=tokenizer.bos_token_id,
|
|
||||||
eos_token_id=tokenizer.eos_token_id,
|
|
||||||
pad_token_id=tokenizer.pad_token_id,
|
|
||||||
do_sample=True,
|
|
||||||
use_cache=True,
|
|
||||||
return_dict_in_generate=True,
|
|
||||||
output_attentions=False,
|
|
||||||
output_hidden_states=False,
|
|
||||||
output_scores=False,
|
|
||||||
)
|
|
||||||
streamer = TextIteratorStreamer(tokenizer)
|
|
||||||
generation_kwargs = {
|
|
||||||
"inputs": batch["input_ids"].to(cfg.device),
|
|
||||||
"attention_mask": batch["attention_mask"].to(cfg.device),
|
|
||||||
"generation_config": generation_config,
|
|
||||||
"streamer": streamer,
|
|
||||||
}
|
|
||||||
|
|
||||||
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
|
||||||
thread.start()
|
|
||||||
|
|
||||||
all_text = ""
|
|
||||||
|
|
||||||
for new_text in streamer:
|
|
||||||
all_text += new_text
|
|
||||||
yield all_text
|
|
||||||
|
|
||||||
demo = gr.Interface(
|
|
||||||
fn=generate,
|
|
||||||
inputs="textbox",
|
|
||||||
outputs="text",
|
|
||||||
title=cfg.get("gradio_title", "Axolotl Gradio Interface"),
|
|
||||||
)
|
|
||||||
|
|
||||||
demo.queue().launch(
|
|
||||||
show_api=False,
|
|
||||||
share=cfg.get("gradio_share", True),
|
|
||||||
server_name=cfg.get("gradio_server_name", "127.0.0.1"),
|
|
||||||
server_port=cfg.get("gradio_server_port", None),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def do_cli(
|
|
||||||
config: Union[Path, str] = Path("examples/"), gradio: bool = False, **kwargs
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Parses axolotl config, CLI args, and calls `do_inference` or `do_inference_gradio`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
parsed_cfg = load_cfg(config, inference=True, **kwargs)
|
parsed_cfg = load_cfg(config, inference=True, **kwargs)
|
||||||
parsed_cfg.sample_packing = False
|
parsed_cfg.sample_packing = False
|
||||||
parser = transformers.HfArgumentParser(InferenceCliArgs)
|
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
return_remaining_strings=True
|
return_remaining_strings=True
|
||||||
)
|
)
|
||||||
|
parsed_cli_args.inference = True
|
||||||
|
|
||||||
if gradio:
|
if gradio:
|
||||||
do_inference_gradio(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
do_inference_gradio(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
|||||||
@@ -1,20 +1,18 @@
|
|||||||
"""Click CLI definitions for various axolotl commands."""
|
"""CLI definition for various axolotl commands."""
|
||||||
# pylint: disable=redefined-outer-name
|
# pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
import subprocess # nosec B404
|
import subprocess # nosec B404
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
import axolotl
|
import axolotl
|
||||||
from axolotl.cli.args import EvaluateCliArgs, PreprocessCliArgs, TrainerCliArgs
|
|
||||||
from axolotl.cli.utils import (
|
from axolotl.cli.utils import (
|
||||||
add_options_from_config,
|
add_options_from_config,
|
||||||
add_options_from_dataclass,
|
add_options_from_dataclass,
|
||||||
build_command,
|
build_command,
|
||||||
fetch_from_github,
|
fetch_from_github,
|
||||||
filter_none_kwargs,
|
|
||||||
)
|
)
|
||||||
|
from axolotl.common.cli import EvaluateCliArgs, PreprocessCliArgs, TrainerCliArgs
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
||||||
from axolotl.utils.config.models.input.v0_4_1 import AxolotlInputConfig
|
from axolotl.utils.config.models.input.v0_4_1 import AxolotlInputConfig
|
||||||
|
|
||||||
@@ -27,23 +25,20 @@ def cli():
|
|||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.argument("config", type=click.Path(exists=True, path_type=str))
|
@click.argument("config", type=click.Path(exists=True, path_type=str))
|
||||||
|
@click.option(
|
||||||
|
"--iterable/--no-iterable",
|
||||||
|
default=False,
|
||||||
|
help="Use IterableDataset for streaming processing of large datasets",
|
||||||
|
)
|
||||||
@add_options_from_dataclass(PreprocessCliArgs)
|
@add_options_from_dataclass(PreprocessCliArgs)
|
||||||
@add_options_from_config(AxolotlInputConfig)
|
@add_options_from_config(AxolotlInputConfig)
|
||||||
@filter_none_kwargs
|
def preprocess(config: str, iterable: bool, **kwargs):
|
||||||
def preprocess(config: str, **kwargs) -> None:
|
"""Preprocess datasets before training."""
|
||||||
"""
|
|
||||||
Preprocess datasets before training.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
|
||||||
config options.
|
|
||||||
"""
|
|
||||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||||
|
|
||||||
from axolotl.cli.preprocess import do_cli
|
from axolotl.cli.preprocess import do_cli
|
||||||
|
|
||||||
do_cli(config=config, **kwargs)
|
do_cli(config=config, iterable=iterable, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@@ -55,17 +50,10 @@ def preprocess(config: str, **kwargs) -> None:
|
|||||||
)
|
)
|
||||||
@add_options_from_dataclass(TrainerCliArgs)
|
@add_options_from_dataclass(TrainerCliArgs)
|
||||||
@add_options_from_config(AxolotlInputConfig)
|
@add_options_from_config(AxolotlInputConfig)
|
||||||
@filter_none_kwargs
|
def train(config: str, accelerate: bool, **kwargs):
|
||||||
def train(config: str, accelerate: bool, **kwargs) -> None:
|
"""Train or fine-tune a model."""
|
||||||
"""
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||||
Train or fine-tune a model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
accelerate: Whether to use `accelerate` launcher.
|
|
||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
|
||||||
config options.
|
|
||||||
"""
|
|
||||||
# Enable expandable segments for cuda allocation to improve VRAM usage
|
# Enable expandable segments for cuda allocation to improve VRAM usage
|
||||||
set_pytorch_cuda_alloc_conf()
|
set_pytorch_cuda_alloc_conf()
|
||||||
|
|
||||||
@@ -90,17 +78,10 @@ def train(config: str, accelerate: bool, **kwargs) -> None:
|
|||||||
)
|
)
|
||||||
@add_options_from_dataclass(EvaluateCliArgs)
|
@add_options_from_dataclass(EvaluateCliArgs)
|
||||||
@add_options_from_config(AxolotlInputConfig)
|
@add_options_from_config(AxolotlInputConfig)
|
||||||
@filter_none_kwargs
|
def evaluate(config: str, accelerate: bool, **kwargs):
|
||||||
def evaluate(config: str, accelerate: bool, **kwargs) -> None:
|
"""Evaluate a model."""
|
||||||
"""
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||||
Evaluate a model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
accelerate: Whether to use `accelerate` launcher.
|
|
||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
|
||||||
config options.
|
|
||||||
"""
|
|
||||||
if accelerate:
|
if accelerate:
|
||||||
base_cmd = ["accelerate", "launch", "-m", "axolotl.cli.evaluate"]
|
base_cmd = ["accelerate", "launch", "-m", "axolotl.cli.evaluate"]
|
||||||
if config:
|
if config:
|
||||||
@@ -120,33 +101,81 @@ def evaluate(config: str, accelerate: bool, **kwargs) -> None:
|
|||||||
default=False,
|
default=False,
|
||||||
help="Use accelerate launch for multi-GPU inference",
|
help="Use accelerate launch for multi-GPU inference",
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--lora-model-dir",
|
||||||
|
type=click.Path(exists=True, path_type=str),
|
||||||
|
help="Directory containing LoRA model",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--base-model",
|
||||||
|
type=click.Path(exists=True, path_type=str),
|
||||||
|
help="Path to base model for non-LoRA models",
|
||||||
|
)
|
||||||
@click.option("--gradio", is_flag=True, help="Launch Gradio interface")
|
@click.option("--gradio", is_flag=True, help="Launch Gradio interface")
|
||||||
|
@click.option("--load-in-8bit", is_flag=True, help="Load model in 8-bit mode")
|
||||||
@add_options_from_dataclass(TrainerCliArgs)
|
@add_options_from_dataclass(TrainerCliArgs)
|
||||||
@add_options_from_config(AxolotlInputConfig)
|
@add_options_from_config(AxolotlInputConfig)
|
||||||
@filter_none_kwargs
|
def inference(
|
||||||
def inference(config: str, accelerate: bool, gradio: bool, **kwargs) -> None:
|
config: str,
|
||||||
"""
|
accelerate: bool,
|
||||||
Run inference with a trained model.
|
lora_model_dir: Optional[str] = None,
|
||||||
|
base_model: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""Run inference with a trained model."""
|
||||||
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||||
|
del kwargs["inference"] # interferes with inference.do_cli
|
||||||
|
|
||||||
|
if lora_model_dir:
|
||||||
|
kwargs["lora_model_dir"] = lora_model_dir
|
||||||
|
if base_model:
|
||||||
|
kwargs["base_model"] = base_model
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
accelerate: Whether to use `accelerate` launcher.
|
|
||||||
gradio: Whether to use Gradio browser interface or command line for inference.
|
|
||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
|
||||||
config options.
|
|
||||||
"""
|
|
||||||
if accelerate:
|
if accelerate:
|
||||||
base_cmd = ["accelerate", "launch", "-m", "axolotl.cli.inference"]
|
base_cmd = ["accelerate", "launch", "-m", "axolotl.cli.inference"]
|
||||||
if config:
|
if config:
|
||||||
base_cmd.append(config)
|
base_cmd.append(config)
|
||||||
if gradio:
|
|
||||||
base_cmd.append("--gradio")
|
|
||||||
cmd = build_command(base_cmd, kwargs)
|
cmd = build_command(base_cmd, kwargs)
|
||||||
subprocess.run(cmd, check=True) # nosec B603
|
subprocess.run(cmd, check=True) # nosec B603
|
||||||
else:
|
else:
|
||||||
from axolotl.cli.inference import do_cli
|
from axolotl.cli.inference import do_cli
|
||||||
|
|
||||||
do_cli(config=config, gradio=gradio, **kwargs)
|
do_cli(config=config, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.argument("config", type=click.Path(exists=True, path_type=str))
|
||||||
|
@click.option(
|
||||||
|
"--accelerate/--no-accelerate",
|
||||||
|
default=False,
|
||||||
|
help="Use accelerate launch for multi-GPU operations",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--model-dir",
|
||||||
|
type=click.Path(exists=True, path_type=str),
|
||||||
|
help="Directory containing model weights to shard",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--save-dir",
|
||||||
|
type=click.Path(path_type=str),
|
||||||
|
help="Directory to save sharded weights",
|
||||||
|
)
|
||||||
|
@add_options_from_dataclass(TrainerCliArgs)
|
||||||
|
@add_options_from_config(AxolotlInputConfig)
|
||||||
|
def shard(config: str, accelerate: bool, **kwargs):
|
||||||
|
"""Shard model weights."""
|
||||||
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||||
|
|
||||||
|
if accelerate:
|
||||||
|
base_cmd = ["accelerate", "launch", "-m", "axolotl.cli.shard"]
|
||||||
|
if config:
|
||||||
|
base_cmd.append(config)
|
||||||
|
cmd = build_command(base_cmd, kwargs)
|
||||||
|
subprocess.run(cmd, check=True) # nosec B603
|
||||||
|
else:
|
||||||
|
from axolotl.cli.shard import do_cli
|
||||||
|
|
||||||
|
do_cli(config=config, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@@ -156,19 +185,20 @@ def inference(config: str, accelerate: bool, gradio: bool, **kwargs) -> None:
|
|||||||
default=True,
|
default=True,
|
||||||
help="Use accelerate launch for weight merging",
|
help="Use accelerate launch for weight merging",
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
"--model-dir",
|
||||||
|
type=click.Path(exists=True, path_type=str),
|
||||||
|
help="Directory containing sharded weights",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--save-path", type=click.Path(path_type=str), help="Path to save merged weights"
|
||||||
|
)
|
||||||
@add_options_from_dataclass(TrainerCliArgs)
|
@add_options_from_dataclass(TrainerCliArgs)
|
||||||
@add_options_from_config(AxolotlInputConfig)
|
@add_options_from_config(AxolotlInputConfig)
|
||||||
@filter_none_kwargs
|
def merge_sharded_fsdp_weights(config: str, accelerate: bool, **kwargs):
|
||||||
def merge_sharded_fsdp_weights(config: str, accelerate: bool, **kwargs) -> None:
|
"""Merge sharded FSDP model weights."""
|
||||||
"""
|
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||||
Merge sharded FSDP model weights.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
accelerate: Whether to use `accelerate` launcher.
|
|
||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
|
||||||
config options.
|
|
||||||
"""
|
|
||||||
if accelerate:
|
if accelerate:
|
||||||
base_cmd = [
|
base_cmd = [
|
||||||
"accelerate",
|
"accelerate",
|
||||||
@@ -188,19 +218,28 @@ def merge_sharded_fsdp_weights(config: str, accelerate: bool, **kwargs) -> None:
|
|||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.argument("config", type=click.Path(exists=True, path_type=str))
|
@click.argument("config", type=click.Path(exists=True, path_type=str))
|
||||||
@add_options_from_dataclass(TrainerCliArgs)
|
@click.option(
|
||||||
@add_options_from_config(AxolotlInputConfig)
|
"--lora-model-dir",
|
||||||
@filter_none_kwargs
|
type=click.Path(exists=True, path_type=str),
|
||||||
def merge_lora(config: str, **kwargs) -> None:
|
help="Directory containing the LoRA model to merge",
|
||||||
"""
|
)
|
||||||
Merge trained LoRA adapters into a base model.
|
@click.option(
|
||||||
|
"--output-dir",
|
||||||
|
type=click.Path(path_type=str),
|
||||||
|
help="Directory to save the merged model",
|
||||||
|
)
|
||||||
|
def merge_lora(
|
||||||
|
config: str,
|
||||||
|
lora_model_dir: Optional[str] = None,
|
||||||
|
output_dir: Optional[str] = None,
|
||||||
|
):
|
||||||
|
"""Merge a trained LoRA into a base model"""
|
||||||
|
kwargs = {}
|
||||||
|
if lora_model_dir:
|
||||||
|
kwargs["lora_model_dir"] = lora_model_dir
|
||||||
|
if output_dir:
|
||||||
|
kwargs["output_dir"] = output_dir
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
accelerate: Whether to use `accelerate` launcher.
|
|
||||||
kwargs: Additional keyword arguments which correspond to CLI args or `axolotl`
|
|
||||||
config options.
|
|
||||||
"""
|
|
||||||
from axolotl.cli.merge_lora import do_cli
|
from axolotl.cli.merge_lora import do_cli
|
||||||
|
|
||||||
do_cli(config=config, **kwargs)
|
do_cli(config=config, **kwargs)
|
||||||
@@ -209,17 +248,13 @@ def merge_lora(config: str, **kwargs) -> None:
|
|||||||
@cli.command()
|
@cli.command()
|
||||||
@click.argument("directory", type=click.Choice(["examples", "deepspeed_configs"]))
|
@click.argument("directory", type=click.Choice(["examples", "deepspeed_configs"]))
|
||||||
@click.option("--dest", help="Destination directory")
|
@click.option("--dest", help="Destination directory")
|
||||||
def fetch(directory: str, dest: Optional[str]) -> None:
|
def fetch(directory: str, dest: Optional[str]):
|
||||||
"""
|
"""
|
||||||
Fetch example configs or other resources.
|
Fetch example configs or other resources.
|
||||||
|
|
||||||
Available directories:
|
Available directories:
|
||||||
- examples: Example configuration files
|
- examples: Example configuration files
|
||||||
- deepspeed_configs: DeepSpeed configuration files
|
- deepspeed_configs: DeepSpeed configuration files
|
||||||
|
|
||||||
Args:
|
|
||||||
directory: One of `examples`, `deepspeed_configs`.
|
|
||||||
dest: Optional destination directory.
|
|
||||||
"""
|
"""
|
||||||
fetch_from_github(f"{directory}/", dest)
|
fetch_from_github(f"{directory}/", dest)
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
"""CLI to merge a trained LoRA into a base model."""
|
"""
|
||||||
|
CLI to run merge a trained LoRA into a base model
|
||||||
import logging
|
"""
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
@@ -8,58 +8,14 @@ import fire
|
|||||||
import transformers
|
import transformers
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import do_merge_lora, load_cfg, print_axolotl_text_art
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.cli.config import load_cfg
|
|
||||||
from axolotl.cli.utils import load_model_and_tokenizer
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_merge_lora(*, cfg: DictDefault) -> None:
|
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
||||||
"""
|
|
||||||
Calls `transformers`' `merge_and_unload` on the model given in the `axolotl` config
|
|
||||||
along with the LoRA adapters to combine them into a single base model.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
"""
|
|
||||||
print_axolotl_text_art()
|
|
||||||
|
|
||||||
model, tokenizer = load_model_and_tokenizer(cfg=cfg)
|
|
||||||
safe_serialization = cfg.save_safetensors is True
|
|
||||||
|
|
||||||
LOG.info("Running merge of LoRA with base model...")
|
|
||||||
model = model.merge_and_unload(progressbar=True)
|
|
||||||
model.to(dtype=cfg.torch_dtype)
|
|
||||||
model.generation_config.do_sample = True
|
|
||||||
|
|
||||||
if cfg.local_rank == 0:
|
|
||||||
LOG.info(f"Saving merged model to: {str(Path(cfg.output_dir) / 'merged')}...")
|
|
||||||
model.save_pretrained(
|
|
||||||
str(Path(cfg.output_dir) / "merged"),
|
|
||||||
safe_serialization=safe_serialization,
|
|
||||||
progressbar=True,
|
|
||||||
)
|
|
||||||
tokenizer.save_pretrained(str(Path(cfg.output_dir) / "merged"))
|
|
||||||
|
|
||||||
|
|
||||||
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
|
||||||
"""
|
|
||||||
Parses `axolotl` config, CLI args, and calls `do_merge_lora`. Note that various
|
|
||||||
config values will be overwritten to allow the LoRA merge logic to work as expected
|
|
||||||
(`load_in_8bit=False`, `load_in4bit=False`, `flash_attention=False`, etc.).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If target directory for LoRA merged model does not exist.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
parser = transformers.HfArgumentParser(TrainerCliArgs)
|
print_axolotl_text_art()
|
||||||
|
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
return_remaining_strings=True
|
return_remaining_strings=True
|
||||||
)
|
)
|
||||||
@@ -90,7 +46,7 @@ def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
|||||||
parsed_cfg.fsdp = None
|
parsed_cfg.fsdp = None
|
||||||
parsed_cfg.fsdp_config = None
|
parsed_cfg.fsdp_config = None
|
||||||
|
|
||||||
do_merge_lora(cfg=parsed_cfg)
|
do_merge_lora(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
"""CLI to merge sharded FSDP model checkpoints into a single combined checkpoint."""
|
"""
|
||||||
|
This module provides a CLI to merge sharded FSDP model checkpoints into a single combined checkpoint
|
||||||
|
"""
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@@ -24,15 +25,16 @@ from huggingface_hub import split_torch_state_dict_into_shards
|
|||||||
from safetensors.torch import save_file as safe_save_file
|
from safetensors.torch import save_file as safe_save_file
|
||||||
from torch.distributed.checkpoint.format_utils import _EmptyStateDictLoadPlanner
|
from torch.distributed.checkpoint.format_utils import _EmptyStateDictLoadPlanner
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_cfg, print_axolotl_text_art
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.cli.config import load_cfg
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger("axolotl.cli.merge_sharded_fsdp_weights")
|
||||||
|
|
||||||
|
|
||||||
class BFloat16CastPlanner(_EmptyStateDictLoadPlanner):
|
class BFloat16CastPlanner(_EmptyStateDictLoadPlanner):
|
||||||
"""A custom planner to cast tensors to bfloat16 on the fly during loading."""
|
"""
|
||||||
|
A custom planner to cast tensors to bfloat16 on the fly during loading.
|
||||||
|
"""
|
||||||
|
|
||||||
def commit_tensor(self, read_item, tensor): # pylint: disable=unused-argument
|
def commit_tensor(self, read_item, tensor): # pylint: disable=unused-argument
|
||||||
tensor.copy_(tensor.to(torch.bfloat16))
|
tensor.copy_(tensor.to(torch.bfloat16))
|
||||||
@@ -43,19 +45,11 @@ def _distributed_checkpoint_to_merged_weights(
|
|||||||
save_path: str,
|
save_path: str,
|
||||||
safe_serialization: bool = False,
|
safe_serialization: bool = False,
|
||||||
max_shard_size: str = "5GB",
|
max_shard_size: str = "5GB",
|
||||||
) -> Path:
|
):
|
||||||
"""
|
"""
|
||||||
Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save`. Will
|
Passthrough to `torch.distributed.checkpoint.format_utils.dcp_to_torch_save`
|
||||||
save under `save_path` as either `model.safetensors` or `pytorch_model.bin`.
|
|
||||||
|
|
||||||
Args:
|
Will save under `save_path` as either `model.safetensors` or `pytorch_model.bin`.
|
||||||
checkpoint_dir: Directory where distributed checkpoint is saved.
|
|
||||||
save_path: Path to save model to.
|
|
||||||
safe_serialization: Whether to save in safetensors format.
|
|
||||||
max_shard_size: Max size of model shards to save.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path where model is saved.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
state_dict: Dict = {}
|
state_dict: Dict = {}
|
||||||
@@ -85,7 +79,6 @@ def _distributed_checkpoint_to_merged_weights(
|
|||||||
state_dict_split = split_torch_state_dict_into_shards(
|
state_dict_split = split_torch_state_dict_into_shards(
|
||||||
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
|
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
|
||||||
)
|
)
|
||||||
|
|
||||||
# Save index if sharded
|
# Save index if sharded
|
||||||
index = None
|
index = None
|
||||||
if state_dict_split.is_sharded:
|
if state_dict_split.is_sharded:
|
||||||
@@ -142,9 +135,6 @@ def merge_fsdp_weights(
|
|||||||
Whether to save the merged weights with safetensors (recommended).
|
Whether to save the merged weights with safetensors (recommended).
|
||||||
remove_checkpoint_dir (`bool`, *optional*, defaults to `False`):
|
remove_checkpoint_dir (`bool`, *optional*, defaults to `False`):
|
||||||
Whether to remove the checkpoint directory after merging.
|
Whether to remove the checkpoint directory after merging.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If torch version < 2.3.0, or if `checkpoint_dir` does not exist.
|
|
||||||
"""
|
"""
|
||||||
checkpoint_dir_ = Path(checkpoint_dir)
|
checkpoint_dir_ = Path(checkpoint_dir)
|
||||||
from accelerate.state import PartialState
|
from accelerate.state import PartialState
|
||||||
@@ -188,21 +178,18 @@ def merge_fsdp_weights(
|
|||||||
|
|
||||||
|
|
||||||
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
||||||
"""
|
|
||||||
Parses `axolotl` config, CLI args, and calls `merge_fsdp_weights`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
parser = transformers.HfArgumentParser(TrainerCliArgs)
|
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
return_remaining_strings=True
|
return_remaining_strings=True
|
||||||
)
|
)
|
||||||
parsed_cli_args.merge_lora = True
|
parsed_cli_args.merge_lora = True
|
||||||
parsed_cfg = load_cfg(config, **kwargs)
|
|
||||||
|
parsed_cfg = load_cfg(
|
||||||
|
config,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
fsdp_dir = Path(parsed_cfg.output_dir) / "pytorch_model_fsdp_0"
|
fsdp_dir = Path(parsed_cfg.output_dir) / "pytorch_model_fsdp_0"
|
||||||
merge_fsdp_weights(
|
merge_fsdp_weights(
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
"""CLI to run preprocessing of a dataset."""
|
"""
|
||||||
|
CLI to run training on a model
|
||||||
|
"""
|
||||||
import logging
|
import logging
|
||||||
import warnings
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
import fire
|
import fire
|
||||||
import transformers
|
import transformers
|
||||||
@@ -12,31 +13,40 @@ from colorama import Fore
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from transformers import AutoModelForCausalLM
|
from transformers import AutoModelForCausalLM
|
||||||
|
|
||||||
from axolotl.cli.args import PreprocessCliArgs
|
from axolotl.cli import (
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
check_accelerate_default_config,
|
||||||
from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
check_user_token,
|
||||||
from axolotl.cli.config import load_cfg
|
load_cfg,
|
||||||
|
load_datasets,
|
||||||
|
load_rl_datasets,
|
||||||
|
print_axolotl_text_art,
|
||||||
|
)
|
||||||
|
from axolotl.common.cli import PreprocessCliArgs
|
||||||
from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
|
from axolotl.common.const import DEFAULT_DATASET_PREPARED_PATH
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.trainer import disable_datasets_caching
|
from axolotl.utils.trainer import disable_datasets_caching
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger("axolotl.cli.preprocess")
|
||||||
|
|
||||||
|
|
||||||
def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
def do_cli(
|
||||||
"""
|
config: Union[Path, str] = Path("examples/"),
|
||||||
Preprocesses dataset specified in axolotl config.
|
iterable: Optional[bool] = False,
|
||||||
|
**kwargs,
|
||||||
Args:
|
):
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
# pylint: disable=duplicate-code
|
||||||
cli_args: Preprocessing-specific CLI arguments.
|
|
||||||
"""
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
|
parsed_cfg.is_preprocess = True
|
||||||
|
if iterable:
|
||||||
|
parsed_cfg.preprocess_iterable = iterable
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
check_user_token()
|
check_user_token()
|
||||||
|
parser = transformers.HfArgumentParser((PreprocessCliArgs))
|
||||||
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
|
return_remaining_strings=True
|
||||||
|
)
|
||||||
|
|
||||||
if not cfg.dataset_prepared_path:
|
if not parsed_cfg.dataset_prepared_path:
|
||||||
msg = (
|
msg = (
|
||||||
Fore.RED
|
Fore.RED
|
||||||
+ "preprocess CLI called without dataset_prepared_path set, "
|
+ "preprocess CLI called without dataset_prepared_path set, "
|
||||||
@@ -44,16 +54,16 @@ def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
|||||||
+ Fore.RESET
|
+ Fore.RESET
|
||||||
)
|
)
|
||||||
LOG.warning(msg)
|
LOG.warning(msg)
|
||||||
cfg.dataset_prepared_path = DEFAULT_DATASET_PREPARED_PATH
|
parsed_cfg.dataset_prepared_path = DEFAULT_DATASET_PREPARED_PATH
|
||||||
|
|
||||||
with disable_datasets_caching():
|
with disable_datasets_caching():
|
||||||
if cfg.rl:
|
if parsed_cfg.rl: # and parsed_cfg.rl != "orpo":
|
||||||
load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
load_rl_datasets(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
else:
|
else:
|
||||||
load_datasets(cfg=cfg, cli_args=cli_args)
|
load_datasets(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
|
||||||
if cli_args.download:
|
if parsed_cli_args.download:
|
||||||
model_name = cfg.base_model
|
model_name = parsed_cfg.base_model
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
# there are a bunch of useless UserWarnings about
|
# there are a bunch of useless UserWarnings about
|
||||||
# "copying from a non-meta parameter in the checkpoint to a meta parameter in the current model"
|
# "copying from a non-meta parameter in the checkpoint to a meta parameter in the current model"
|
||||||
@@ -70,33 +80,11 @@ def do_preprocess(cfg: DictDefault, cli_args: PreprocessCliArgs) -> None:
|
|||||||
|
|
||||||
LOG.info(
|
LOG.info(
|
||||||
Fore.GREEN
|
Fore.GREEN
|
||||||
+ f"Success! Preprocessed data path: `dataset_prepared_path: {cfg.dataset_prepared_path}`"
|
+ f"Success! Preprocessed data path: `dataset_prepared_path: {parsed_cfg.dataset_prepared_path}`"
|
||||||
+ Fore.RESET
|
+ Fore.RESET
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def do_cli(
|
|
||||||
config: Union[Path, str] = Path("examples/"),
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Parses `axolotl` config, CLI args, and calls `do_preprocess`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
parsed_cfg = load_cfg(config, **kwargs)
|
|
||||||
parsed_cfg.is_preprocess = True
|
|
||||||
parser = transformers.HfArgumentParser(PreprocessCliArgs)
|
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
|
||||||
return_remaining_strings=True
|
|
||||||
)
|
|
||||||
|
|
||||||
do_preprocess(parsed_cfg, parsed_cli_args)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
fire.Fire(do_cli)
|
fire.Fire(do_cli)
|
||||||
|
|||||||
45
src/axolotl/cli/shard.py
Normal file
45
src/axolotl/cli/shard.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
"""
|
||||||
|
CLI to shard a trained model into 10GiB chunks
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
import fire
|
||||||
|
import transformers
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
from axolotl.cli import load_cfg, print_axolotl_text_art
|
||||||
|
from axolotl.common.cli import TrainerCliArgs, load_model_and_tokenizer
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
LOG = logging.getLogger("axolotl.scripts")
|
||||||
|
|
||||||
|
|
||||||
|
def shard(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs,
|
||||||
|
):
|
||||||
|
model, _ = load_model_and_tokenizer(cfg=cfg, cli_args=cli_args)
|
||||||
|
safe_serialization = cfg.save_safetensors is True
|
||||||
|
LOG.debug("Re-saving model w/ sharding")
|
||||||
|
model.save_pretrained(cfg.output_dir, safe_serialization=safe_serialization)
|
||||||
|
|
||||||
|
|
||||||
|
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
print_axolotl_text_art()
|
||||||
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
|
parser = transformers.HfArgumentParser((TrainerCliArgs))
|
||||||
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
|
return_remaining_strings=True
|
||||||
|
)
|
||||||
|
parsed_cli_args.shard = True
|
||||||
|
|
||||||
|
shard(cfg=parsed_cfg, cli_args=parsed_cli_args)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
load_dotenv()
|
||||||
|
fire.Fire(do_cli)
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
"""CLI to run training on a model."""
|
"""
|
||||||
|
CLI to run training on a model
|
||||||
|
"""
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
@@ -8,38 +9,42 @@ import fire
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from transformers.hf_argparser import HfArgumentParser
|
from transformers.hf_argparser import HfArgumentParser
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import (
|
||||||
from axolotl.cli.art import print_axolotl_text_art
|
check_accelerate_default_config,
|
||||||
from axolotl.cli.checks import check_accelerate_default_config, check_user_token
|
check_user_token,
|
||||||
from axolotl.cli.config import load_cfg
|
load_cfg,
|
||||||
from axolotl.common.datasets import load_datasets, load_preference_datasets
|
load_datasets,
|
||||||
|
load_rl_datasets,
|
||||||
|
print_axolotl_text_art,
|
||||||
|
)
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.integrations.base import PluginManager
|
from axolotl.integrations.base import PluginManager
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger("axolotl.cli.train")
|
||||||
|
|
||||||
|
|
||||||
def do_train(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs):
|
||||||
"""
|
# pylint: disable=duplicate-code
|
||||||
Trains a `transformers` model by first loading the dataset(s) specified in the
|
parsed_cfg = load_cfg(config, **kwargs)
|
||||||
`axolotl` config, and then calling `axolotl.train.train`. Also runs the plugin
|
parser = HfArgumentParser((TrainerCliArgs))
|
||||||
manager's `post_train_unload` once training completes.
|
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
||||||
|
return_remaining_strings=True
|
||||||
|
)
|
||||||
|
return do_train(parsed_cfg, parsed_cli_args)
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
def do_train(cfg, cli_args) -> None:
|
||||||
cli_args: Training-specific CLI arguments.
|
|
||||||
"""
|
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
check_accelerate_default_config()
|
check_accelerate_default_config()
|
||||||
check_user_token()
|
check_user_token()
|
||||||
|
|
||||||
if cfg.rl:
|
if cfg.rl: # and cfg.rl != "orpo":
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
else:
|
else:
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, tokenizer = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, tokenizer = train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
plugin_manager = PluginManager.get_instance()
|
plugin_manager = PluginManager.get_instance()
|
||||||
|
|
||||||
del model
|
del model
|
||||||
@@ -48,24 +53,6 @@ def do_train(cfg: DictDefault, cli_args: TrainerCliArgs) -> None:
|
|||||||
plugin_manager.post_train_unload(cfg)
|
plugin_manager.post_train_unload(cfg)
|
||||||
|
|
||||||
|
|
||||||
def do_cli(config: Union[Path, str] = Path("examples/"), **kwargs) -> None:
|
|
||||||
"""
|
|
||||||
Parses `axolotl` config, CLI args, and calls `do_train`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Path to `axolotl` config YAML file.
|
|
||||||
kwargs: Additional keyword arguments to override config file values.
|
|
||||||
"""
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
parsed_cfg = load_cfg(config, **kwargs)
|
|
||||||
parser = HfArgumentParser(TrainerCliArgs)
|
|
||||||
parsed_cli_args, _ = parser.parse_args_into_dataclasses(
|
|
||||||
return_remaining_strings=True
|
|
||||||
)
|
|
||||||
|
|
||||||
do_train(parsed_cfg, parsed_cli_args)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
fire.Fire(do_cli)
|
fire.Fire(do_cli)
|
||||||
|
|||||||
@@ -1,84 +1,32 @@
|
|||||||
"""Utility methods for axolotl CLI."""
|
"""Utility methods for axoltl CLI."""
|
||||||
|
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
import dataclasses
|
import dataclasses
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import typing
|
|
||||||
from functools import wraps
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from types import NoneType
|
from types import NoneType
|
||||||
from typing import Any, Callable, Type, Union, get_args, get_origin
|
from typing import Any, Dict, List, Optional, Tuple, Type, Union, get_args, get_origin
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import requests
|
import requests
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
|
|
||||||
|
|
||||||
from axolotl.logging_config import configure_logging
|
LOG = logging.getLogger("axolotl.cli.utils")
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.models import load_model, load_tokenizer
|
|
||||||
|
|
||||||
configure_logging()
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def strip_optional_type(field_type: type | typing._SpecialForm | None):
|
def add_options_from_dataclass(config_class: Type[Any]):
|
||||||
"""
|
"""Create Click options from the fields of a dataclass."""
|
||||||
Extracts the non-`None` type from an `Optional` / `Union` type.
|
|
||||||
|
|
||||||
Args:
|
def decorator(function):
|
||||||
field_type: Type of field for Axolotl CLI command.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
If the input type is `Union[T, None]` or `Optional[T]`, returns `T`. Otherwise
|
|
||||||
returns the input type unchanged.
|
|
||||||
"""
|
|
||||||
if get_origin(field_type) is Union and type(None) in get_args(field_type):
|
|
||||||
field_type = next(
|
|
||||||
t for t in get_args(field_type) if not isinstance(t, NoneType)
|
|
||||||
)
|
|
||||||
|
|
||||||
return field_type
|
|
||||||
|
|
||||||
|
|
||||||
def filter_none_kwargs(func: Callable) -> Callable:
|
|
||||||
"""
|
|
||||||
Wraps function to remove `None`-valued `kwargs`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
func: Function to wrap.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Wrapped function.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(*args, **kwargs) -> Callable:
|
|
||||||
"""Filters out `None`-valued `kwargs`."""
|
|
||||||
filtered_kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
|
||||||
|
|
||||||
return func(*args, **filtered_kwargs)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def add_options_from_dataclass(config_class: Type[Any]) -> Callable:
|
|
||||||
"""
|
|
||||||
Create Click options from the fields of a dataclass.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_class: Dataclass with fields to parse from the CLI.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Function decorator for Axolotl CLI command.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorator(function: Callable) -> Callable:
|
|
||||||
# Process dataclass fields in reverse order for correct option ordering
|
# Process dataclass fields in reverse order for correct option ordering
|
||||||
for field in reversed(dataclasses.fields(config_class)):
|
for field in reversed(dataclasses.fields(config_class)):
|
||||||
field_type = strip_optional_type(field.type)
|
field_type = field.type
|
||||||
|
|
||||||
|
if get_origin(field_type) is Union and type(None) in get_args(field_type):
|
||||||
|
field_type = next(
|
||||||
|
t for t in get_args(field_type) if not isinstance(t, NoneType)
|
||||||
|
)
|
||||||
|
|
||||||
if field_type == bool:
|
if field_type == bool:
|
||||||
field_name = field.name.replace("_", "-")
|
field_name = field.name.replace("_", "-")
|
||||||
@@ -96,29 +44,18 @@ def add_options_from_dataclass(config_class: Type[Any]) -> Callable:
|
|||||||
default=field.default,
|
default=field.default,
|
||||||
help=field.metadata.get("description"),
|
help=field.metadata.get("description"),
|
||||||
)(function)
|
)(function)
|
||||||
|
|
||||||
return function
|
return function
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def add_options_from_config(config_class: Type[BaseModel]) -> Callable:
|
def add_options_from_config(config_class: Type[BaseModel]):
|
||||||
"""
|
"""Create Click options from the fields of a Pydantic model."""
|
||||||
Create Click options from the fields of a Pydantic model.
|
|
||||||
|
|
||||||
Args:
|
def decorator(function):
|
||||||
config_class: PyDantic model with fields to parse from the CLI
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Function decorator for Axolotl CLI command.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorator(function: Callable) -> Callable:
|
|
||||||
# Process model fields in reverse order for correct option ordering
|
# Process model fields in reverse order for correct option ordering
|
||||||
for name, field in reversed(config_class.model_fields.items()):
|
for name, field in reversed(config_class.model_fields.items()):
|
||||||
field_type = strip_optional_type(field.annotation)
|
if field.annotation in (bool, Optional[bool]):
|
||||||
|
|
||||||
if field_type == bool:
|
|
||||||
field_name = name.replace("_", "-")
|
field_name = name.replace("_", "-")
|
||||||
option_name = f"--{field_name}/--no-{field_name}"
|
option_name = f"--{field_name}/--no-{field_name}"
|
||||||
function = click.option(
|
function = click.option(
|
||||||
@@ -129,23 +66,13 @@ def add_options_from_config(config_class: Type[BaseModel]) -> Callable:
|
|||||||
function = click.option(
|
function = click.option(
|
||||||
option_name, default=None, help=field.description
|
option_name, default=None, help=field.description
|
||||||
)(function)
|
)(function)
|
||||||
|
|
||||||
return function
|
return function
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def build_command(base_cmd: list[str], options: dict[str, Any]) -> list[str]:
|
def build_command(base_cmd: List[str], options: Dict[str, Any]) -> List[str]:
|
||||||
"""
|
"""Build command list from base command and options."""
|
||||||
Build command list from base command and options.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
base_cmd: Command without options.
|
|
||||||
options: Options to parse and append to base command.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of strings giving shell command.
|
|
||||||
"""
|
|
||||||
cmd = base_cmd.copy()
|
cmd = base_cmd.copy()
|
||||||
|
|
||||||
for key, value in options.items():
|
for key, value in options.items():
|
||||||
@@ -165,18 +92,18 @@ def build_command(base_cmd: list[str], options: dict[str, Any]) -> list[str]:
|
|||||||
|
|
||||||
def download_file(
|
def download_file(
|
||||||
file_info: tuple, raw_base_url: str, dest_path: Path, dir_prefix: str
|
file_info: tuple, raw_base_url: str, dest_path: Path, dir_prefix: str
|
||||||
) -> tuple[str, str]:
|
) -> Tuple[str, str]:
|
||||||
"""
|
"""
|
||||||
Download a single file and return its processing status.
|
Download a single file and return its processing status.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
file_info: Tuple of (file_path, remote_sha).
|
file_info: Tuple of (file_path, remote_sha)
|
||||||
raw_base_url: Base URL for raw GitHub content.
|
raw_base_url: Base URL for raw GitHub content
|
||||||
dest_path: Local destination directory.
|
dest_path: Local destination directory
|
||||||
dir_prefix: Directory prefix to filter files.
|
dir_prefix: Directory prefix to filter files
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (file_path, status) where status is 'new', 'updated', or 'unchanged'.
|
Tuple of (file_path, status) where status is 'new', 'updated', or 'unchanged'
|
||||||
"""
|
"""
|
||||||
file_path, remote_sha = file_info
|
file_path, remote_sha = file_info
|
||||||
raw_url = f"{raw_base_url}/{file_path}"
|
raw_url = f"{raw_base_url}/{file_path}"
|
||||||
@@ -218,17 +145,16 @@ def download_file(
|
|||||||
|
|
||||||
|
|
||||||
def fetch_from_github(
|
def fetch_from_github(
|
||||||
dir_prefix: str, dest_dir: str | None = None, max_workers: int = 5
|
dir_prefix: str, dest_dir: Optional[str] = None, max_workers: int = 5
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Sync files from a specific directory in the GitHub repository.
|
Sync files from a specific directory in the GitHub repository.
|
||||||
Only downloads files that don't exist locally or have changed.
|
Only downloads files that don't exist locally or have changed.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
dir_prefix: Directory prefix to filter files (e.g., 'examples/',
|
dir_prefix: Directory prefix to filter files (e.g., 'examples/', 'deepspeed_configs/')
|
||||||
'deepspeed_configs/').
|
dest_dir: Local destination directory
|
||||||
dest_dir: Local destination directory.
|
max_workers: Maximum number of concurrent downloads
|
||||||
max_workers: Maximum number of concurrent downloads.
|
|
||||||
"""
|
"""
|
||||||
api_url = "https://api.github.com/repos/axolotl-ai-cloud/axolotl/git/trees/main?recursive=1"
|
api_url = "https://api.github.com/repos/axolotl-ai-cloud/axolotl/git/trees/main?recursive=1"
|
||||||
raw_base_url = "https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main"
|
raw_base_url = "https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main"
|
||||||
@@ -253,7 +179,7 @@ def fetch_from_github(
|
|||||||
dest_path = Path(dest_dir) if dest_dir else default_dest
|
dest_path = Path(dest_dir) if dest_dir else default_dest
|
||||||
|
|
||||||
# Keep track of processed files for summary
|
# Keep track of processed files for summary
|
||||||
files_processed: dict[str, list[str]] = {
|
files_processed: Dict[str, List[str]] = {
|
||||||
"new": [],
|
"new": [],
|
||||||
"updated": [],
|
"updated": [],
|
||||||
"unchanged": [],
|
"unchanged": [],
|
||||||
@@ -290,28 +216,3 @@ def fetch_from_github(
|
|||||||
LOG.info(f"Unchanged files: {len(files_processed['unchanged'])}")
|
LOG.info(f"Unchanged files: {len(files_processed['unchanged'])}")
|
||||||
if files_processed["error"]:
|
if files_processed["error"]:
|
||||||
LOG.info(f"Failed files: {len(files_processed['error'])}")
|
LOG.info(f"Failed files: {len(files_processed['error'])}")
|
||||||
|
|
||||||
|
|
||||||
def load_model_and_tokenizer(
|
|
||||||
*,
|
|
||||||
cfg: DictDefault,
|
|
||||||
inference: bool = False,
|
|
||||||
) -> tuple[PreTrainedModel, PreTrainedTokenizer | PreTrainedTokenizerFast | Any]:
|
|
||||||
"""
|
|
||||||
Helper function for loading a model and tokenizer specified in the given `axolotl`
|
|
||||||
config.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
inference: Boolean denoting inference mode.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
`transformers` model and tokenizer.
|
|
||||||
"""
|
|
||||||
LOG.info(f"loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}")
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
|
|
||||||
LOG.info("loading model...")
|
|
||||||
model, _ = load_model(cfg, tokenizer, inference=inference)
|
|
||||||
|
|
||||||
return model, tokenizer
|
|
||||||
|
|||||||
69
src/axolotl/common/cli.py
Normal file
69
src/axolotl/common/cli.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
"""
|
||||||
|
shared module for cli specific things
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import axolotl.monkeypatch.data.batch_dataset_fetcher # pylint: disable=unused-import # noqa: F401
|
||||||
|
from axolotl.logging_config import configure_logging
|
||||||
|
from axolotl.utils.dict import DictDefault
|
||||||
|
from axolotl.utils.models import load_model, load_tokenizer
|
||||||
|
|
||||||
|
configure_logging()
|
||||||
|
LOG = logging.getLogger("axolotl.common.cli")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PreprocessCliArgs:
|
||||||
|
"""
|
||||||
|
dataclass representing arguments for preprocessing only
|
||||||
|
"""
|
||||||
|
|
||||||
|
debug: bool = field(default=False)
|
||||||
|
debug_text_only: bool = field(default=False)
|
||||||
|
debug_num_examples: int = field(default=1)
|
||||||
|
prompter: Optional[str] = field(default=None)
|
||||||
|
download: Optional[bool] = field(default=True)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TrainerCliArgs:
|
||||||
|
"""
|
||||||
|
dataclass representing the various non-training arguments
|
||||||
|
"""
|
||||||
|
|
||||||
|
debug: bool = field(default=False)
|
||||||
|
debug_text_only: bool = field(default=False)
|
||||||
|
debug_num_examples: int = field(default=0)
|
||||||
|
inference: bool = field(default=False)
|
||||||
|
merge_lora: bool = field(default=False)
|
||||||
|
prompter: Optional[str] = field(default=None)
|
||||||
|
shard: bool = field(default=False)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class EvaluateCliArgs:
|
||||||
|
"""
|
||||||
|
dataclass representing the various evaluation arguments
|
||||||
|
"""
|
||||||
|
|
||||||
|
debug: bool = field(default=False)
|
||||||
|
debug_text_only: bool = field(default=False)
|
||||||
|
debug_num_examples: int = field(default=0)
|
||||||
|
|
||||||
|
|
||||||
|
def load_model_and_tokenizer(
|
||||||
|
*,
|
||||||
|
cfg: DictDefault,
|
||||||
|
cli_args: TrainerCliArgs,
|
||||||
|
):
|
||||||
|
LOG.info(f"loading tokenizer... {cfg.tokenizer_config or cfg.base_model_config}")
|
||||||
|
tokenizer = load_tokenizer(cfg)
|
||||||
|
|
||||||
|
LOG.info("loading model and (optionally) peft_config...")
|
||||||
|
inference = getattr(cli_args, "inference", False)
|
||||||
|
model, _ = load_model(cfg, tokenizer, inference=inference)
|
||||||
|
|
||||||
|
return model, tokenizer
|
||||||
@@ -1,146 +0,0 @@
|
|||||||
"""Dataset loading utilities."""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import math
|
|
||||||
import random
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Optional, Union
|
|
||||||
|
|
||||||
from datasets import Dataset
|
|
||||||
|
|
||||||
import axolotl.monkeypatch.data.batch_dataset_fetcher # pylint: disable=unused-import # noqa: F401
|
|
||||||
from axolotl.cli.args import PreprocessCliArgs, TrainerCliArgs
|
|
||||||
from axolotl.utils.data import prepare_dataset
|
|
||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.models import load_processor, load_tokenizer
|
|
||||||
from axolotl.utils.tokenization import check_dataset_labels
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TrainDatasetMeta:
|
|
||||||
"""Dataclass with fields for training and validation datasets and metadata."""
|
|
||||||
|
|
||||||
train_dataset: Dataset
|
|
||||||
eval_dataset: Optional[Dataset] = None
|
|
||||||
total_num_steps: Optional[int] = None
|
|
||||||
|
|
||||||
|
|
||||||
def sample_dataset(dataset: Dataset, num_samples: int) -> Dataset:
|
|
||||||
"""
|
|
||||||
Randomly sample `num_samples` samples from `dataset`.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dataset: Dataset.
|
|
||||||
num_samples: Number of samples to return.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Random sample (with replacement) of examples in `dataset`.
|
|
||||||
"""
|
|
||||||
return dataset.select(
|
|
||||||
[random.randrange(0, len(dataset) - 1) for _ in range(num_samples)] # nosec
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_datasets(
|
|
||||||
*,
|
|
||||||
cfg: DictDefault,
|
|
||||||
cli_args: Union[PreprocessCliArgs, TrainerCliArgs],
|
|
||||||
) -> TrainDatasetMeta:
|
|
||||||
"""
|
|
||||||
Loads one or more training or evaluation datasets, calling
|
|
||||||
`axolotl.utils.data.prepare_dataset`. Optionally, logs out debug information.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
cli_args: Command-specific CLI arguments.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
|
||||||
`total_num_steps`.
|
|
||||||
"""
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
processor = load_processor(cfg, tokenizer=tokenizer) if cfg.processor_type else None
|
|
||||||
preprocess_iterable = (
|
|
||||||
hasattr(cli_args, "iterable")
|
|
||||||
and cli_args.iterable is not None
|
|
||||||
and cli_args.iterable
|
|
||||||
)
|
|
||||||
|
|
||||||
train_dataset, eval_dataset, total_num_steps, prompters = prepare_dataset(
|
|
||||||
cfg,
|
|
||||||
tokenizer,
|
|
||||||
processor=processor,
|
|
||||||
preprocess_iterable=preprocess_iterable,
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
cli_args.debug
|
|
||||||
or cfg.debug
|
|
||||||
or cli_args.debug_text_only
|
|
||||||
or int(cli_args.debug_num_examples) > 0
|
|
||||||
):
|
|
||||||
LOG.info("check_dataset_labels...")
|
|
||||||
|
|
||||||
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
|
||||||
check_dataset_labels(
|
|
||||||
train_samples,
|
|
||||||
tokenizer,
|
|
||||||
num_examples=cli_args.debug_num_examples,
|
|
||||||
text_only=cli_args.debug_text_only,
|
|
||||||
)
|
|
||||||
|
|
||||||
LOG.info("printing prompters...")
|
|
||||||
for prompter in prompters:
|
|
||||||
LOG.info(prompter)
|
|
||||||
|
|
||||||
return TrainDatasetMeta(
|
|
||||||
train_dataset=train_dataset,
|
|
||||||
eval_dataset=eval_dataset,
|
|
||||||
total_num_steps=total_num_steps,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_preference_datasets(
|
|
||||||
*,
|
|
||||||
cfg: DictDefault,
|
|
||||||
cli_args: Union[PreprocessCliArgs, TrainerCliArgs],
|
|
||||||
) -> TrainDatasetMeta:
|
|
||||||
"""
|
|
||||||
Loads one or more training or evaluation datasets for RL training using paired
|
|
||||||
preference data, calling `axolotl.utils.data.rl.load_prepare_preference_datasets`.
|
|
||||||
Optionally, logs out debug information.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
|
||||||
cli_args: Command-specific CLI arguments.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dataclass with fields for training and evaluation datasets and the computed
|
|
||||||
`total_num_steps`.
|
|
||||||
"""
|
|
||||||
train_dataset, eval_dataset = load_prepare_preference_datasets(cfg)
|
|
||||||
total_num_steps = int(
|
|
||||||
math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
|
|
||||||
)
|
|
||||||
|
|
||||||
if cli_args.debug or cfg.debug:
|
|
||||||
LOG.info("check_dataset_labels...")
|
|
||||||
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
train_samples = sample_dataset(train_dataset, cli_args.debug_num_examples)
|
|
||||||
check_dataset_labels(
|
|
||||||
train_samples,
|
|
||||||
tokenizer,
|
|
||||||
num_examples=cli_args.debug_num_examples,
|
|
||||||
text_only=cli_args.debug_text_only,
|
|
||||||
rl_mode=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
return TrainDatasetMeta(
|
|
||||||
train_dataset=train_dataset,
|
|
||||||
eval_dataset=eval_dataset,
|
|
||||||
total_num_steps=total_num_steps,
|
|
||||||
)
|
|
||||||
@@ -697,12 +697,6 @@ class HFCausalTrainerBuilder(TrainerBuilderBase):
|
|||||||
training_arguments_kwargs["kd_ce_alpha"] = self.cfg.kd_ce_alpha
|
training_arguments_kwargs["kd_ce_alpha"] = self.cfg.kd_ce_alpha
|
||||||
if self.cfg.kd_alpha is not None:
|
if self.cfg.kd_alpha is not None:
|
||||||
training_arguments_kwargs["kd_alpha"] = self.cfg.kd_alpha
|
training_arguments_kwargs["kd_alpha"] = self.cfg.kd_alpha
|
||||||
if self.cfg.kd_temperature is not None:
|
|
||||||
training_arguments_kwargs["kd_temperature"] = self.cfg.kd_temperature
|
|
||||||
if self.cfg.kd_zscore_base_temp is not None:
|
|
||||||
training_arguments_kwargs[
|
|
||||||
"kd_zscore_base_temp"
|
|
||||||
] = self.cfg.kd_zscore_base_temp
|
|
||||||
|
|
||||||
training_args_cls = (
|
training_args_cls = (
|
||||||
AxolotlTrainingArguments
|
AxolotlTrainingArguments
|
||||||
|
|||||||
@@ -188,13 +188,6 @@ class AxolotlTrainingMixins:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
kd_zscore_base_temp: Optional[float] = field(
|
|
||||||
default=None,
|
|
||||||
metadata={
|
|
||||||
"help": "the base temperature parameter for KL divergence with z-score when using KD"
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class AxolotlTrainingArguments(AxolotlTrainingMixins, TrainingArguments):
|
class AxolotlTrainingArguments(AxolotlTrainingMixins, TrainingArguments):
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from typing import Dict, Optional
|
|||||||
import torch
|
import torch
|
||||||
from accelerate.logging import get_logger
|
from accelerate.logging import get_logger
|
||||||
|
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.logging_config import configure_logging
|
from axolotl.logging_config import configure_logging
|
||||||
from axolotl.train import TrainDatasetMeta
|
from axolotl.train import TrainDatasetMeta
|
||||||
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
from axolotl.utils import set_pytorch_cuda_alloc_conf
|
||||||
@@ -61,13 +62,16 @@ def evaluate_dataset(
|
|||||||
return metrics
|
return metrics
|
||||||
|
|
||||||
|
|
||||||
def evaluate(*, cfg: DictDefault, dataset_meta: TrainDatasetMeta) -> Dict[str, float]:
|
def evaluate(
|
||||||
|
*, cfg: DictDefault, cli_args: TrainerCliArgs, dataset_meta: TrainDatasetMeta
|
||||||
|
) -> Dict[str, float]:
|
||||||
"""
|
"""
|
||||||
Evaluate a model on training and validation datasets
|
Evaluate a model on training and validation datasets
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
cfg: Dictionary mapping `axolotl` config keys to values.
|
cfg: Configuration dictionary
|
||||||
dataset_meta: Dataset metadata containing training and evaluation datasets.
|
cli_args: Command line arguments
|
||||||
|
dataset_meta: Dataset metadata containing training and evaluation datasets
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple containing:
|
Tuple containing:
|
||||||
@@ -98,7 +102,9 @@ def evaluate(*, cfg: DictDefault, dataset_meta: TrainDatasetMeta) -> Dict[str, f
|
|||||||
|
|
||||||
# Load model
|
# Load model
|
||||||
LOG.debug("loading model for evaluation...")
|
LOG.debug("loading model for evaluation...")
|
||||||
model, _ = load_model(cfg, tokenizer, processor=processor)
|
model, _ = load_model(
|
||||||
|
cfg, tokenizer, processor=processor, inference=cli_args.inference
|
||||||
|
)
|
||||||
|
|
||||||
# Set up trainer
|
# Set up trainer
|
||||||
trainer = setup_trainer(
|
trainer = setup_trainer(
|
||||||
|
|||||||
@@ -31,4 +31,3 @@ class KDArgs(BaseModel):
|
|||||||
] = None # loss coefficient for cross-entropy loss during KD
|
] = None # loss coefficient for cross-entropy loss during KD
|
||||||
kd_alpha: Optional[float] = None # loss coefficient for KD loss
|
kd_alpha: Optional[float] = None # loss coefficient for KD loss
|
||||||
kd_temperature: Optional[float] = None # temperature for sampling during KD
|
kd_temperature: Optional[float] = None # temperature for sampling during KD
|
||||||
kd_zscore_base_temp: Optional[float] = None # base temperature for zscore scaling
|
|
||||||
|
|||||||
@@ -52,62 +52,26 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
train_on_eos=train_on_eos,
|
train_on_eos=train_on_eos,
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
|
||||||
def supports_batched(self) -> bool:
|
|
||||||
# batching doesn't work well for logprob data
|
|
||||||
return False
|
|
||||||
|
|
||||||
def transform_logprobs(self, sample):
|
def transform_logprobs(self, sample):
|
||||||
"""
|
|
||||||
Transform logprobs to target format for KD training
|
|
||||||
"""
|
|
||||||
|
|
||||||
logprobs = sample.pop(self.logprobs_field)
|
logprobs = sample.pop(self.logprobs_field)
|
||||||
target_seq_len = len(logprobs)
|
target_seq_len = len(logprobs)
|
||||||
input_seq_len = len(sample["input_ids"])
|
input_seq_len = len(sample["input_ids"])
|
||||||
input_padding_len = input_seq_len - target_seq_len
|
input_padding_len = input_seq_len - target_seq_len
|
||||||
# get non-zero top-k (prune None logprobs from vllm data step)
|
top_k = len(logprobs[0])
|
||||||
top_k_vals = [
|
|
||||||
len(logprobs[i])
|
|
||||||
for i in range(len(logprobs))
|
|
||||||
if logprobs[i] is not None and len(logprobs[i])
|
|
||||||
]
|
|
||||||
max_top_k = max(set(top_k_vals), key=top_k_vals.count)
|
|
||||||
min_top_k = min(set(top_k_vals), key=top_k_vals.count)
|
|
||||||
top_k = min(max_top_k, min_top_k)
|
|
||||||
if top_k == 0:
|
|
||||||
raise ValueError("No non-zero top-k logprobs found.")
|
|
||||||
|
|
||||||
target_logprobs = []
|
target_logprobs = []
|
||||||
target_token_ids = []
|
target_token_ids = []
|
||||||
target_mask = []
|
target_mask = []
|
||||||
|
|
||||||
if input_padding_len < 0:
|
|
||||||
# logprobs is longer than target_seq_len,
|
|
||||||
# so we need to slice from the left/beginning of logprobs
|
|
||||||
logprobs = logprobs[:-input_seq_len]
|
|
||||||
input_padding_len = 0
|
|
||||||
# target_seq_len = input_seq_len
|
|
||||||
|
|
||||||
# truncate the second dimension of the logprobs to top_k
|
|
||||||
logprobs = [row[:top_k] for row in logprobs]
|
|
||||||
|
|
||||||
# fill with -inf for padding_len tokens for top_k tokens
|
# fill with -inf for padding_len tokens for top_k tokens
|
||||||
# extend target_logprobs with a padding_len x top_k 2D list filled with -inf
|
# extend target_logprobs with a padding_len x top_k 2D list filled with -inf
|
||||||
|
for _ in range(1, input_padding_len): # start at 1 since this is causal
|
||||||
# for causal models, if we start the range at 1, then we don't need to shift in the trainer
|
|
||||||
# otherwise, we need to shift in the trainer
|
|
||||||
shift = 0
|
|
||||||
for _ in range(shift, input_padding_len):
|
|
||||||
target_logprobs.append([-float("inf")] * top_k)
|
target_logprobs.append([-float("inf")] * top_k)
|
||||||
target_token_ids.append(list(range(top_k)))
|
target_token_ids.append(list(range(top_k)))
|
||||||
target_mask.append([0] * top_k)
|
target_mask.append([0] * top_k)
|
||||||
|
|
||||||
for position in range(input_padding_len, input_seq_len):
|
for _ in range(target_seq_len):
|
||||||
if sample["labels"][position] == -100:
|
# TODO also check against sample["labels"]
|
||||||
target_mask.append([0] * top_k)
|
target_mask.append([1] * top_k)
|
||||||
else:
|
|
||||||
target_mask.append([1] * top_k)
|
|
||||||
|
|
||||||
for _, token_pos_logprobs in enumerate(logprobs):
|
for _, token_pos_logprobs in enumerate(logprobs):
|
||||||
# Initialize collections for logprobs and token_ids
|
# Initialize collections for logprobs and token_ids
|
||||||
@@ -127,28 +91,28 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
position_token_ids.append(token_id)
|
position_token_ids.append(token_id)
|
||||||
|
|
||||||
# Convert to a tensor for easier manipulation
|
# Convert to a tensor for easier manipulation
|
||||||
|
# Convert to tensor
|
||||||
position_logprobs_tensor = torch.tensor(
|
position_logprobs_tensor = torch.tensor(
|
||||||
position_logprobs, dtype=torch.float
|
position_logprobs, dtype=torch.float
|
||||||
)
|
)
|
||||||
|
|
||||||
# Now we have distribution at T1 in log form, i.e. log p_{T1}(k).
|
|
||||||
# Next, re-scale to T2 = self.kd_temperature via exponent-based trick
|
|
||||||
# p_{T2}(k) = [p_{T1}(k)]^(T1 / T2) / Z
|
|
||||||
#
|
|
||||||
# Convert from log to probability
|
|
||||||
teacher_probs_t1 = position_logprobs_tensor.exp()
|
|
||||||
if self.kd_temperature != self.gen_temperature:
|
if self.kd_temperature != self.gen_temperature:
|
||||||
|
#
|
||||||
|
# Now we have distribution at T1 in log form, i.e. log p_{T1}(k).
|
||||||
|
# Next, re-scale to T2 = self.kd_temperature via exponent-based trick
|
||||||
|
# p_{T2}(k) = [p_{T1}(k)]^(T1 / T2) / Z
|
||||||
|
#
|
||||||
|
# Convert from log to probability
|
||||||
|
teacher_probs_t1 = position_logprobs_tensor.exp()
|
||||||
# Exponentiate by factor (T1 / T2)
|
# Exponentiate by factor (T1 / T2)
|
||||||
exponent = self.gen_temperature / self.kd_temperature
|
exponent = self.gen_temperature / self.kd_temperature
|
||||||
teacher_probs_t2 = teacher_probs_t1**exponent
|
teacher_probs_t2 = teacher_probs_t1**exponent
|
||||||
else:
|
# Re-normalize
|
||||||
teacher_probs_t2 = teacher_probs_t1
|
teacher_probs_t2 = teacher_probs_t2 / teacher_probs_t2.sum(
|
||||||
# Re-normalize
|
dim=0, keepdim=True
|
||||||
teacher_probs_t2 = teacher_probs_t2 / teacher_probs_t2.sum(
|
)
|
||||||
dim=0, keepdim=True
|
# Convert back to log
|
||||||
)
|
position_logprobs_tensor = torch.log(teacher_probs_t2)
|
||||||
# Convert back to log
|
|
||||||
position_logprobs_tensor = torch.log(teacher_probs_t2)
|
|
||||||
|
|
||||||
# Now we have log p_{teacher, T2}(k) stored in position_logprobs_tensor
|
# Now we have log p_{teacher, T2}(k) stored in position_logprobs_tensor
|
||||||
position_logprobs_scaled = position_logprobs_tensor.tolist()
|
position_logprobs_scaled = position_logprobs_tensor.tolist()
|
||||||
@@ -156,11 +120,10 @@ class ChatTemplateStrategyWithKD(ChatTemplateStrategy):
|
|||||||
target_logprobs.append(position_logprobs_scaled)
|
target_logprobs.append(position_logprobs_scaled)
|
||||||
target_token_ids.append(position_token_ids)
|
target_token_ids.append(position_token_ids)
|
||||||
|
|
||||||
if shift == 1:
|
# since we started at index 1 for causal, we need one more padding token
|
||||||
# since we started at index 1 for causal, we need one more padding token
|
target_logprobs.append([-float("inf")] * top_k)
|
||||||
target_logprobs.append([-float("inf")] * top_k)
|
target_token_ids.append(list(range(top_k)))
|
||||||
target_token_ids.append(list(range(top_k)))
|
target_mask.append([0] * top_k)
|
||||||
target_mask.append([0] * top_k)
|
|
||||||
|
|
||||||
# Update sample with transformed logprobs
|
# Update sample with transformed logprobs
|
||||||
sample["target_logprobs"] = target_logprobs
|
sample["target_logprobs"] = target_logprobs
|
||||||
|
|||||||
@@ -16,40 +16,6 @@ loss for top_k KL divergence
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
|
||||||
def zscore_standardize(
|
|
||||||
logits: torch.Tensor,
|
|
||||||
mask: torch.Tensor = None,
|
|
||||||
base_temperature: float = 1.0,
|
|
||||||
eps: float = 1e-9,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Z-score standardize along the last dimension of `logits`.
|
|
||||||
i.e., for each [B, seq_len] row, across K entries:
|
|
||||||
z = (logits - mean) / std,
|
|
||||||
then scale by 1 / base_temperature if desired.
|
|
||||||
|
|
||||||
mask can be broadcastable or None. If None, we standardize all elements.
|
|
||||||
"""
|
|
||||||
if mask is None:
|
|
||||||
# shape: [B, seq_len, K]
|
|
||||||
# Mean and std over dim=-1
|
|
||||||
mean = logits.mean(dim=-1, keepdim=True)
|
|
||||||
var = logits.var(dim=-1, unbiased=False, keepdim=True)
|
|
||||||
else:
|
|
||||||
# If you have to exclude some tokens, multiply by mask, etc.
|
|
||||||
float_mask = mask.to(logits.dtype)
|
|
||||||
count = float_mask.sum(dim=-1, keepdim=True).clamp_min(1.0)
|
|
||||||
mean = (logits * float_mask).sum(dim=-1, keepdim=True) / count
|
|
||||||
var = (float_mask * (logits - mean) ** 2).sum(dim=-1, keepdim=True) / count
|
|
||||||
|
|
||||||
std = torch.sqrt(var.clamp_min(eps))
|
|
||||||
z = (logits - mean) / std
|
|
||||||
|
|
||||||
# Scale by 1 / base_temperature
|
|
||||||
z = z / base_temperature
|
|
||||||
return z
|
|
||||||
|
|
||||||
|
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
def loss(
|
def loss(
|
||||||
student_logits: torch.Tensor,
|
student_logits: torch.Tensor,
|
||||||
@@ -61,23 +27,8 @@ def loss(
|
|||||||
) -> torch.Tensor:
|
) -> torch.Tensor:
|
||||||
"""
|
"""
|
||||||
A KD loss function that is TorchScript-friendly.
|
A KD loss function that is TorchScript-friendly.
|
||||||
|
|
||||||
Arguments:
|
|
||||||
student_logits (torch.Tensor): The logits of the student model.
|
|
||||||
Shape: [B, student_seq_len, vocab_size]
|
|
||||||
target_token_ids (torch.Tensor): The top-k teacher/target token IDs
|
|
||||||
Shape: [B, teacher_seq_len, top_k]
|
|
||||||
target_logprobs (torch.Tensor): The top-k teacher/target logprobs, these should already be re-normalized.
|
|
||||||
Shape: [B, teacher_seq_len, top_k]
|
|
||||||
target_mask (torch.Tensor): The mask for valid tokens.
|
|
||||||
Shape: [B, teacher_seq_len, top_k]
|
|
||||||
num_items_in_batch (int, optional): The number of items in the batch.
|
|
||||||
kd_temperature (float, optional): The temperature for KD.
|
|
||||||
Default: 1.0
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
target_logprobs = target_logprobs.float()
|
|
||||||
|
|
||||||
# Determine the teacher sequence length
|
# Determine the teacher sequence length
|
||||||
# target_token_ids shape: [B, teacher_seq_len, K]
|
# target_token_ids shape: [B, teacher_seq_len, K]
|
||||||
# student_logits shape: [B, student_seq_len, vocab_size]
|
# student_logits shape: [B, student_seq_len, vocab_size]
|
||||||
@@ -93,8 +44,6 @@ def loss(
|
|||||||
student_logits_for_kd, dim=-1, index=target_token_ids
|
student_logits_for_kd, dim=-1, index=target_token_ids
|
||||||
) # [B, teacher_seq_len, K]
|
) # [B, teacher_seq_len, K]
|
||||||
|
|
||||||
student_logits_topk = student_logits_topk.float()
|
|
||||||
|
|
||||||
# Apply KD temperature to student’s logits
|
# Apply KD temperature to student’s logits
|
||||||
if kd_temperature != 1.0:
|
if kd_temperature != 1.0:
|
||||||
student_logits_topk = student_logits_topk / kd_temperature
|
student_logits_topk = student_logits_topk / kd_temperature
|
||||||
@@ -131,82 +80,3 @@ def loss(
|
|||||||
kd_loss = kd_loss / float(kd_loss_per_token.size(0))
|
kd_loss = kd_loss / float(kd_loss_per_token.size(0))
|
||||||
|
|
||||||
return kd_loss
|
return kd_loss
|
||||||
|
|
||||||
|
|
||||||
def topk_kd_loss_with_zscore(
|
|
||||||
student_logits: torch.Tensor, # [B, seq_len, vocab_size]
|
|
||||||
target_token_ids: torch.Tensor, # [B, seq_len, K]
|
|
||||||
target_logprobs: torch.Tensor, # [B, seq_len, K], sums to 1.0 in prob space
|
|
||||||
target_mask: torch.Tensor, # [B, seq_len, K] or [B, seq_len]
|
|
||||||
kd_temperature: float = 1.0, # classic KD temperature
|
|
||||||
zscore_base_temp: float = 1.0, # from the paper
|
|
||||||
num_items_in_batch: int = -1,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
A variant of top_k KL divergence with Z-score scaling
|
|
||||||
from "Logit Standardization in Knowledge Distillation".
|
|
||||||
"""
|
|
||||||
|
|
||||||
target_logprobs = target_logprobs.float()
|
|
||||||
|
|
||||||
B, teacher_seq_len, K = target_logprobs.shape # pylint: disable=invalid-name
|
|
||||||
# 1) Gather the student's top-k logits to match teacher
|
|
||||||
student_logits_for_kd = student_logits[
|
|
||||||
:, :teacher_seq_len, :
|
|
||||||
] # [B, seq_len, vocab]
|
|
||||||
student_topk_logits = torch.gather(
|
|
||||||
student_logits_for_kd, dim=-1, index=target_token_ids
|
|
||||||
) # [B, seq_len, K]
|
|
||||||
|
|
||||||
student_topk_logits = student_topk_logits.float()
|
|
||||||
|
|
||||||
# 2) If you want to keep the "classical" T scaling, apply it first
|
|
||||||
if kd_temperature != 1.0:
|
|
||||||
student_topk_logits = student_topk_logits / kd_temperature
|
|
||||||
|
|
||||||
# 3) Convert teacher logprobs -> treat them as “logits” for z-score
|
|
||||||
# (They differ by +some_constant from real logits, but in z-score
|
|
||||||
# that constant is subtracted out anyway.)
|
|
||||||
teacher_logits_for_zscore = target_logprobs # rename variable for clarity
|
|
||||||
|
|
||||||
# 4) Z-score teacher and student
|
|
||||||
# If target_mask is 2D, expand to 3D for the K dimension
|
|
||||||
if target_mask.dim() == 2 and target_mask.shape[:2] == (B, teacher_seq_len):
|
|
||||||
target_mask = target_mask.unsqueeze(-1).expand(-1, -1, K)
|
|
||||||
|
|
||||||
teacher_z = zscore_standardize(
|
|
||||||
teacher_logits_for_zscore, mask=target_mask, base_temperature=zscore_base_temp
|
|
||||||
)
|
|
||||||
student_z = zscore_standardize(
|
|
||||||
student_topk_logits, mask=target_mask, base_temperature=zscore_base_temp
|
|
||||||
)
|
|
||||||
|
|
||||||
# 5) Convert to log-probs for KL
|
|
||||||
teacher_logprobs_z = teacher_z - torch.logsumexp(teacher_z, dim=-1, keepdim=True)
|
|
||||||
student_logprobs_z = student_z - torch.logsumexp(student_z, dim=-1, keepdim=True)
|
|
||||||
|
|
||||||
# 6) Restrict to valid tokens if needed
|
|
||||||
valid_mask = target_mask.bool() # shape [B, seq_len, K]
|
|
||||||
teacher_probs_z = teacher_logprobs_z.exp()
|
|
||||||
teacher_probs_z = teacher_probs_z[valid_mask]
|
|
||||||
teacher_logprobs_z = teacher_logprobs_z[valid_mask]
|
|
||||||
student_logprobs_z = student_logprobs_z[valid_mask]
|
|
||||||
|
|
||||||
# 7) forward KL: sum( p_teacher * [log(p_teacher) - log(p_student)] )
|
|
||||||
kd_loss_per_token = teacher_probs_z * (teacher_logprobs_z - student_logprobs_z)
|
|
||||||
kd_loss = kd_loss_per_token.sum()
|
|
||||||
|
|
||||||
# 8) If using classical KD scaling by T^2
|
|
||||||
if kd_temperature != 1.0:
|
|
||||||
kd_loss = kd_loss * (kd_temperature**2)
|
|
||||||
|
|
||||||
# Optionally scale by zscore_base_temp**2 if you want (paper might differ).
|
|
||||||
# kd_loss = kd_loss * (zscore_base_temp**2)
|
|
||||||
|
|
||||||
# 9) Normalize
|
|
||||||
if num_items_in_batch is not None and num_items_in_batch > 0:
|
|
||||||
kd_loss = kd_loss / float(num_items_in_batch)
|
|
||||||
else:
|
|
||||||
kd_loss = kd_loss / float(kd_loss_per_token.size(0))
|
|
||||||
|
|
||||||
return kd_loss
|
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ KD trainer
|
|||||||
from axolotl.core.trainers.base import AxolotlTrainer
|
from axolotl.core.trainers.base import AxolotlTrainer
|
||||||
|
|
||||||
from .topk_logprob.forward_kl import loss as topk_kd_loss
|
from .topk_logprob.forward_kl import loss as topk_kd_loss
|
||||||
from .topk_logprob.forward_kl import topk_kd_loss_with_zscore
|
|
||||||
|
|
||||||
|
|
||||||
class AxolotlKDTrainer(AxolotlTrainer):
|
class AxolotlKDTrainer(AxolotlTrainer):
|
||||||
@@ -46,6 +45,7 @@ class AxolotlKDTrainer(AxolotlTrainer):
|
|||||||
inputs,
|
inputs,
|
||||||
return_outputs=False,
|
return_outputs=False,
|
||||||
num_items_in_batch=None,
|
num_items_in_batch=None,
|
||||||
|
shift_targets=False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
How the loss is computed by Trainer. By default, all models return the loss in the first element.
|
How the loss is computed by Trainer. By default, all models return the loss in the first element.
|
||||||
@@ -69,30 +69,25 @@ class AxolotlKDTrainer(AxolotlTrainer):
|
|||||||
# FIXME: account for tokenizer.padding_side
|
# FIXME: account for tokenizer.padding_side
|
||||||
student_logits = outputs["logits"][:, :seq_len, :].contiguous()
|
student_logits = outputs["logits"][:, :seq_len, :].contiguous()
|
||||||
|
|
||||||
shift_logits = student_logits.contiguous()
|
if shift_targets:
|
||||||
target_logprobs_for_loss = target_logprobs[..., 1:, :].contiguous()
|
shift_logits = student_logits[..., :-1, :].contiguous()
|
||||||
target_token_ids_for_loss = target_token_ids[..., 1:, :].contiguous()
|
target_logprobs_for_loss = target_logprobs[..., 1:, :].contiguous()
|
||||||
target_mask_for_loss = target_mask[..., 1:, :].contiguous()
|
target_token_ids_for_loss = target_token_ids[..., 1:, :].contiguous()
|
||||||
|
target_mask_for_loss = target_mask[..., 1:, :].contiguous()
|
||||||
if self.args.kd_zscore_base_temp:
|
|
||||||
loss_kd = topk_kd_loss_with_zscore(
|
|
||||||
shift_logits,
|
|
||||||
target_token_ids_for_loss,
|
|
||||||
target_logprobs_for_loss,
|
|
||||||
target_mask_for_loss,
|
|
||||||
kd_temperature=self.args.kd_temperature,
|
|
||||||
zscore_base_temp=self.args.kd_zscore_base_temp,
|
|
||||||
num_items_in_batch=num_items_in_batch,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
loss_kd = topk_kd_loss(
|
shift_logits = student_logits.contiguous()
|
||||||
shift_logits,
|
target_logprobs_for_loss = target_logprobs.contiguous()
|
||||||
target_token_ids_for_loss,
|
target_token_ids_for_loss = target_token_ids.contiguous()
|
||||||
target_logprobs_for_loss,
|
target_mask_for_loss = target_mask.contiguous()
|
||||||
target_mask_for_loss,
|
|
||||||
num_items_in_batch=num_items_in_batch,
|
loss_kd = topk_kd_loss(
|
||||||
kd_temperature=self.args.kd_temperature,
|
shift_logits,
|
||||||
)
|
target_token_ids_for_loss,
|
||||||
|
target_logprobs_for_loss,
|
||||||
|
target_mask_for_loss,
|
||||||
|
num_items_in_batch=num_items_in_batch,
|
||||||
|
kd_temperature=self.args.kd_temperature,
|
||||||
|
)
|
||||||
|
|
||||||
if self.args.kd_ce_alpha > 0:
|
if self.args.kd_ce_alpha > 0:
|
||||||
kd_alpha = self.args.kd_alpha
|
kd_alpha = self.args.kd_alpha
|
||||||
|
|||||||
@@ -5,19 +5,21 @@ import os
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import weakref
|
import weakref
|
||||||
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Tuple, Union
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import transformers.modelcard
|
import transformers.modelcard
|
||||||
from accelerate.logging import get_logger
|
from accelerate.logging import get_logger
|
||||||
from accelerate.utils import save_fsdp_model
|
from accelerate.utils import save_fsdp_model
|
||||||
|
from datasets import Dataset
|
||||||
from peft import PeftModel
|
from peft import PeftModel
|
||||||
from pkg_resources import get_distribution # type: ignore
|
from pkg_resources import get_distribution # type: ignore
|
||||||
from transformers import PreTrainedModel, PreTrainedTokenizer
|
from transformers import PreTrainedModel, PreTrainedTokenizer
|
||||||
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
||||||
|
|
||||||
from axolotl.common.datasets import TrainDatasetMeta
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.contribs.lgpl.unsloth import ( # pylint: disable = no-name-in-module
|
from axolotl.contribs.lgpl.unsloth import ( # pylint: disable = no-name-in-module
|
||||||
fix_untrained_tokens,
|
fix_untrained_tokens,
|
||||||
)
|
)
|
||||||
@@ -37,11 +39,22 @@ src_dir = os.path.join(project_root, "src")
|
|||||||
sys.path.insert(0, src_dir)
|
sys.path.insert(0, src_dir)
|
||||||
|
|
||||||
configure_logging()
|
configure_logging()
|
||||||
LOG = get_logger(__name__)
|
LOG = get_logger("axolotl.train")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TrainDatasetMeta:
|
||||||
|
"""
|
||||||
|
dataclass to capture the dataset specific options for training
|
||||||
|
"""
|
||||||
|
|
||||||
|
train_dataset: Dataset
|
||||||
|
eval_dataset: Optional[Dataset] = None
|
||||||
|
total_num_steps: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
*, cfg: DictDefault, dataset_meta: TrainDatasetMeta
|
*, cfg: DictDefault, cli_args: TrainerCliArgs, dataset_meta: TrainDatasetMeta
|
||||||
) -> Tuple[Union[PeftModel, PreTrainedModel], PreTrainedTokenizer]:
|
) -> Tuple[Union[PeftModel, PreTrainedModel], PreTrainedTokenizer]:
|
||||||
# Load tokenizer
|
# Load tokenizer
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
@@ -80,7 +93,9 @@ def train(
|
|||||||
if cfg.adapter:
|
if cfg.adapter:
|
||||||
msg += " and peft_config..."
|
msg += " and peft_config..."
|
||||||
LOG.debug(msg)
|
LOG.debug(msg)
|
||||||
model, peft_config = load_model(cfg, tokenizer, processor=processor)
|
model, peft_config = load_model(
|
||||||
|
cfg, tokenizer, processor=processor, inference=cli_args.inference
|
||||||
|
)
|
||||||
if model.generation_config is not None:
|
if model.generation_config is not None:
|
||||||
model.generation_config.do_sample = True
|
model.generation_config.do_sample = True
|
||||||
|
|
||||||
@@ -92,7 +107,9 @@ def train(
|
|||||||
model_ref = None # explicit setting to None
|
model_ref = None # explicit setting to None
|
||||||
else:
|
else:
|
||||||
# load the model again for model_ref/baseline
|
# load the model again for model_ref/baseline
|
||||||
model_ref, _ = load_model(cfg, tokenizer, reference_model=True)
|
model_ref, _ = load_model(
|
||||||
|
cfg, tokenizer, inference=cli_args.inference, reference_model=True
|
||||||
|
)
|
||||||
|
|
||||||
safe_serialization = cfg.save_safetensors is True
|
safe_serialization = cfg.save_safetensors is True
|
||||||
|
|
||||||
|
|||||||
@@ -129,7 +129,6 @@ class PretrainingDataset(BaseModel):
|
|||||||
type: Optional[str] = "pretrain"
|
type: Optional[str] = "pretrain"
|
||||||
trust_remote_code: Optional[bool] = False
|
trust_remote_code: Optional[bool] = False
|
||||||
data_files: Optional[str] = None
|
data_files: Optional[str] = None
|
||||||
skip: Optional[int] = None
|
|
||||||
|
|
||||||
|
|
||||||
class UserDefinedPrompterType(BaseModel):
|
class UserDefinedPrompterType(BaseModel):
|
||||||
@@ -371,13 +370,6 @@ class LoraConfig(BaseModel):
|
|||||||
loraplus_lr_embedding = float(loraplus_lr_embedding)
|
loraplus_lr_embedding = float(loraplus_lr_embedding)
|
||||||
return loraplus_lr_embedding
|
return loraplus_lr_embedding
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def validate_lora_dropout(cls, data):
|
|
||||||
if data.get("adapter") is not None and data.get("lora_dropout") is None:
|
|
||||||
data["lora_dropout"] = 0.0
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class ReLoRAConfig(BaseModel):
|
class ReLoRAConfig(BaseModel):
|
||||||
"""ReLoRA configuration subset"""
|
"""ReLoRA configuration subset"""
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from axolotl.utils.data.pretraining import ( # noqa: F401
|
|||||||
encode_pretraining,
|
encode_pretraining,
|
||||||
wrap_pretraining_dataset,
|
wrap_pretraining_dataset,
|
||||||
)
|
)
|
||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets # noqa: F401
|
from axolotl.utils.data.rl import load_prepare_dpo_datasets # noqa: F401
|
||||||
from axolotl.utils.data.sft import ( # noqa: F401
|
from axolotl.utils.data.sft import ( # noqa: F401
|
||||||
get_dataset_wrapper,
|
get_dataset_wrapper,
|
||||||
load_prepare_datasets,
|
load_prepare_datasets,
|
||||||
|
|||||||
@@ -18,13 +18,10 @@ LOG = logging.getLogger("axolotl")
|
|||||||
|
|
||||||
|
|
||||||
def encode_pretraining(
|
def encode_pretraining(
|
||||||
tokenizer: PreTrainedTokenizerBase,
|
tokenizer: PreTrainedTokenizerBase, max_tokens: int, examples: Dict[str, List]
|
||||||
max_tokens: int,
|
|
||||||
examples: Dict[str, List],
|
|
||||||
text_column: str = "text",
|
|
||||||
) -> Dict[str, List]:
|
) -> Dict[str, List]:
|
||||||
res = tokenizer(
|
res = tokenizer(
|
||||||
examples[text_column],
|
examples["text"],
|
||||||
truncation=True,
|
truncation=True,
|
||||||
max_length=max_tokens - 2,
|
max_length=max_tokens - 2,
|
||||||
add_special_tokens=True,
|
add_special_tokens=True,
|
||||||
@@ -199,12 +196,7 @@ def wrap_pretraining_dataset(
|
|||||||
# set this to 1 so downstream data_loader doesn't try to increase the batch again
|
# set this to 1 so downstream data_loader doesn't try to increase the batch again
|
||||||
cfg.micro_batch_size = 1
|
cfg.micro_batch_size = 1
|
||||||
else:
|
else:
|
||||||
encode = functools.partial(
|
encode = functools.partial(encode_pretraining, tokenizer, max_tokens)
|
||||||
encode_pretraining,
|
|
||||||
tokenizer,
|
|
||||||
max_tokens,
|
|
||||||
text_column=cfg.pretraining_dataset[0].text_column or "text",
|
|
||||||
)
|
|
||||||
|
|
||||||
if cfg.shuffle_merged_datasets:
|
if cfg.shuffle_merged_datasets:
|
||||||
dataset = dataset.shuffle(seed=seed, buffer_size=buffer_size)
|
dataset = dataset.shuffle(seed=seed, buffer_size=buffer_size)
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ def drop_long_rl_seq(
|
|||||||
raise ValueError("Unknown RL type")
|
raise ValueError("Unknown RL type")
|
||||||
|
|
||||||
|
|
||||||
def load_prepare_preference_datasets(cfg):
|
def load_prepare_dpo_datasets(cfg):
|
||||||
def load_split(dataset_cfgs, _cfg):
|
def load_split(dataset_cfgs, _cfg):
|
||||||
split_datasets: List[Any] = []
|
split_datasets: List[Any] = []
|
||||||
for i, ds_cfg in enumerate(dataset_cfgs):
|
for i, ds_cfg in enumerate(dataset_cfgs):
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
import functools
|
import functools
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional, Tuple, Union
|
from typing import List, Tuple, Union
|
||||||
|
|
||||||
from datasets import (
|
from datasets import (
|
||||||
Dataset,
|
Dataset,
|
||||||
@@ -58,7 +58,7 @@ LOG = logging.getLogger("axolotl")
|
|||||||
|
|
||||||
|
|
||||||
@retry_on_request_exceptions(max_retries=3, delay=5)
|
@retry_on_request_exceptions(max_retries=3, delay=5)
|
||||||
def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
def prepare_dataset(cfg, tokenizer, processor=None):
|
||||||
prompters = []
|
prompters = []
|
||||||
if not cfg.pretraining_dataset:
|
if not cfg.pretraining_dataset:
|
||||||
with zero_first(is_local_main_process()):
|
with zero_first(is_local_main_process()):
|
||||||
@@ -69,7 +69,6 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
DEFAULT_DATASET_PREPARED_PATH,
|
DEFAULT_DATASET_PREPARED_PATH,
|
||||||
split="train",
|
split="train",
|
||||||
processor=processor,
|
processor=processor,
|
||||||
preprocess_iterable=preprocess_iterable,
|
|
||||||
)
|
)
|
||||||
_, eval_dataset, _ = load_prepare_datasets(
|
_, eval_dataset, _ = load_prepare_datasets(
|
||||||
tokenizer,
|
tokenizer,
|
||||||
@@ -77,7 +76,6 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
DEFAULT_DATASET_PREPARED_PATH,
|
DEFAULT_DATASET_PREPARED_PATH,
|
||||||
split="test",
|
split="test",
|
||||||
processor=processor,
|
processor=processor,
|
||||||
preprocess_iterable=preprocess_iterable,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
train_dataset, eval_dataset, prompters = load_prepare_datasets(
|
train_dataset, eval_dataset, prompters = load_prepare_datasets(
|
||||||
@@ -85,7 +83,6 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
cfg,
|
cfg,
|
||||||
DEFAULT_DATASET_PREPARED_PATH,
|
DEFAULT_DATASET_PREPARED_PATH,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
preprocess_iterable=preprocess_iterable,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Load streaming dataset if pretraining_dataset is given
|
# Load streaming dataset if pretraining_dataset is given
|
||||||
@@ -93,13 +90,11 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
split = "train"
|
split = "train"
|
||||||
name = None
|
name = None
|
||||||
data_files = None
|
data_files = None
|
||||||
skip = 0
|
|
||||||
if isinstance(cfg.pretraining_dataset, list) and isinstance(
|
if isinstance(cfg.pretraining_dataset, list) and isinstance(
|
||||||
cfg.pretraining_dataset[0], dict
|
cfg.pretraining_dataset[0], dict
|
||||||
):
|
):
|
||||||
path = cfg.pretraining_dataset[0]["path"]
|
path = cfg.pretraining_dataset[0]["path"]
|
||||||
name = cfg.pretraining_dataset[0]["name"]
|
name = cfg.pretraining_dataset[0]["name"]
|
||||||
skip = cfg.pretraining_dataset[0]["skip"]
|
|
||||||
if "split" in cfg.pretraining_dataset[0]:
|
if "split" in cfg.pretraining_dataset[0]:
|
||||||
split = cfg.pretraining_dataset[0]["split"]
|
split = cfg.pretraining_dataset[0]["split"]
|
||||||
|
|
||||||
@@ -113,14 +108,10 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
cfg.pretraining_dataset[0]["type"] or "pretrain",
|
cfg.pretraining_dataset[0]["type"] or "pretrain",
|
||||||
)
|
)
|
||||||
|
|
||||||
iter_ds = load_dataset(
|
|
||||||
path, streaming=True, split=split, name=name, data_files=data_files
|
|
||||||
)
|
|
||||||
if skip:
|
|
||||||
LOG.info(f"Skipping {skip} samples from the dataset")
|
|
||||||
iter_ds = iter_ds.skip(skip)
|
|
||||||
train_dataset = wrap_pretraining_dataset(
|
train_dataset = wrap_pretraining_dataset(
|
||||||
iter_ds,
|
load_dataset(
|
||||||
|
path, streaming=True, split=split, name=name, data_files=data_files
|
||||||
|
),
|
||||||
tokenizer,
|
tokenizer,
|
||||||
cfg,
|
cfg,
|
||||||
ds_wrapper_partial,
|
ds_wrapper_partial,
|
||||||
@@ -141,7 +132,6 @@ def prepare_dataset(cfg, tokenizer, processor=None, preprocess_iterable=None):
|
|||||||
DEFAULT_DATASET_PREPARED_PATH,
|
DEFAULT_DATASET_PREPARED_PATH,
|
||||||
split="test",
|
split="test",
|
||||||
processor=processor,
|
processor=processor,
|
||||||
preprocess_iterable=preprocess_iterable,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if cfg.dataset_exact_deduplication:
|
if cfg.dataset_exact_deduplication:
|
||||||
@@ -173,7 +163,6 @@ def load_tokenized_prepared_datasets(
|
|||||||
default_dataset_prepared_path,
|
default_dataset_prepared_path,
|
||||||
split="train",
|
split="train",
|
||||||
processor=None,
|
processor=None,
|
||||||
preprocess_iterable: Optional[bool] = None,
|
|
||||||
) -> Tuple[DatasetDict, List[Prompter]]:
|
) -> Tuple[DatasetDict, List[Prompter]]:
|
||||||
cfg_datasets = cfg.test_datasets if split == "test" else cfg.datasets
|
cfg_datasets = cfg.test_datasets if split == "test" else cfg.datasets
|
||||||
tokenizer_name = cfg.tokenizer_config
|
tokenizer_name = cfg.tokenizer_config
|
||||||
@@ -280,7 +269,7 @@ def load_tokenized_prepared_datasets(
|
|||||||
yield dataset
|
yield dataset
|
||||||
|
|
||||||
streaming_ds = False
|
streaming_ds = False
|
||||||
if preprocess_iterable:
|
if cfg.preprocess_iterable:
|
||||||
streaming_ds = True
|
streaming_ds = True
|
||||||
# pylint: disable=invalid-name
|
# pylint: disable=invalid-name
|
||||||
for config_dataset in for_d_in_datasets(cfg_datasets):
|
for config_dataset in for_d_in_datasets(cfg_datasets):
|
||||||
@@ -376,7 +365,6 @@ def load_prepare_datasets(
|
|||||||
default_dataset_prepared_path,
|
default_dataset_prepared_path,
|
||||||
split="train",
|
split="train",
|
||||||
processor=None,
|
processor=None,
|
||||||
preprocess_iterable: Optional[bool] = False,
|
|
||||||
) -> Tuple[Dataset, Dataset, List[Prompter]]:
|
) -> Tuple[Dataset, Dataset, List[Prompter]]:
|
||||||
dataset, prompters = load_tokenized_prepared_datasets(
|
dataset, prompters = load_tokenized_prepared_datasets(
|
||||||
tokenizer,
|
tokenizer,
|
||||||
@@ -384,7 +372,6 @@ def load_prepare_datasets(
|
|||||||
default_dataset_prepared_path,
|
default_dataset_prepared_path,
|
||||||
split=split,
|
split=split,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
preprocess_iterable=preprocess_iterable,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
|
if cfg.dataset_shard_num and cfg.dataset_shard_idx is not None:
|
||||||
|
|||||||
@@ -1057,7 +1057,7 @@ class ModelLoader:
|
|||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
hasattr(self.model, "get_input_embeddings")
|
hasattr(self.model, "get_input_embeddings")
|
||||||
and self.model.get_input_embeddings().num_embeddings != embeddings_len
|
and self.model.get_input_embeddings().num_embeddings < embeddings_len
|
||||||
):
|
):
|
||||||
resize_kwargs = {}
|
resize_kwargs = {}
|
||||||
if self.cfg.mean_resizing_embeddings is not None:
|
if self.cfg.mean_resizing_embeddings is not None:
|
||||||
|
|||||||
@@ -279,7 +279,6 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
|||||||
drop_long_kwargs["desc"] = "Dropping Long Sequences"
|
drop_long_kwargs["desc"] = "Dropping Long Sequences"
|
||||||
train_dataset = train_dataset.filter(
|
train_dataset = train_dataset.filter(
|
||||||
drop_long,
|
drop_long,
|
||||||
batched=True,
|
|
||||||
**filter_map_kwargs,
|
**filter_map_kwargs,
|
||||||
**drop_long_kwargs,
|
**drop_long_kwargs,
|
||||||
)
|
)
|
||||||
@@ -311,7 +310,8 @@ def process_datasets_for_packing(cfg, train_dataset, eval_dataset):
|
|||||||
"""
|
"""
|
||||||
labels = sample["labels"]
|
labels = sample["labels"]
|
||||||
if not labels:
|
if not labels:
|
||||||
return True
|
# Edge case: if labels is empty, decide if you want to keep or drop
|
||||||
|
return True # or False
|
||||||
|
|
||||||
# Check if single example or batch
|
# Check if single example or batch
|
||||||
# If first element is an int, we assume a single example
|
# If first element is an int, we assume a single example
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
"""Shared pytest fixtures for cli module."""
|
"""Shared pytest fixtures for cli module."""
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from click.testing import CliRunner
|
from click.testing import CliRunner
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
"""pytest tests for axolotl CLI fetch command."""
|
"""pytest tests for axolotl CLI fetch command."""
|
||||||
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from axolotl.cli.main import fetch
|
from axolotl.cli.main import fetch
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
"""pytest tests for axolotl CLI inference command."""
|
"""pytest tests for axolotl CLI inference command."""
|
||||||
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from axolotl.cli.main import cli
|
from axolotl.cli.main import cli
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
"""General pytest tests for axolotl.cli.main interface."""
|
"""General pytest tests for axolotl.cli.main interface."""
|
||||||
|
|
||||||
from axolotl.cli.main import build_command, cli
|
from axolotl.cli.main import build_command, cli
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
"""pytest tests for axolotl CLI merge_lora command."""
|
"""pytest tests for axolotl CLI merge_lora command."""
|
||||||
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from axolotl.cli.main import cli
|
from axolotl.cli.main import cli
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""pytest tests for axolotl CLI merge_sharded_fsdp_weights command."""
|
"""pytest tests for axolotl CLI merge_sharded_fsdp_weights command."""
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from axolotl.cli.main import cli
|
from axolotl.cli.main import cli
|
||||||
@@ -16,3 +15,46 @@ def test_merge_sharded_fsdp_weights_no_accelerate(cli_runner, config_path):
|
|||||||
assert mock.called
|
assert mock.called
|
||||||
assert mock.call_args.kwargs["config"] == str(config_path)
|
assert mock.call_args.kwargs["config"] == str(config_path)
|
||||||
assert result.exit_code == 0
|
assert result.exit_code == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_sharded_fsdp_weights_with_model_dir(cli_runner, config_path, tmp_path):
|
||||||
|
"""Test merge_sharded_fsdp_weights command with model_dir option"""
|
||||||
|
model_dir = tmp_path / "model"
|
||||||
|
model_dir.mkdir()
|
||||||
|
|
||||||
|
with patch("axolotl.cli.merge_sharded_fsdp_weights.do_cli") as mock:
|
||||||
|
result = cli_runner.invoke(
|
||||||
|
cli,
|
||||||
|
[
|
||||||
|
"merge-sharded-fsdp-weights",
|
||||||
|
str(config_path),
|
||||||
|
"--no-accelerate",
|
||||||
|
"--model-dir",
|
||||||
|
str(model_dir),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock.called
|
||||||
|
assert mock.call_args.kwargs["config"] == str(config_path)
|
||||||
|
assert mock.call_args.kwargs["model_dir"] == str(model_dir)
|
||||||
|
assert result.exit_code == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_merge_sharded_fsdp_weights_with_save_path(cli_runner, config_path):
|
||||||
|
"""Test merge_sharded_fsdp_weights command with save_path option"""
|
||||||
|
with patch("axolotl.cli.merge_sharded_fsdp_weights.do_cli") as mock:
|
||||||
|
result = cli_runner.invoke(
|
||||||
|
cli,
|
||||||
|
[
|
||||||
|
"merge-sharded-fsdp-weights",
|
||||||
|
str(config_path),
|
||||||
|
"--no-accelerate",
|
||||||
|
"--save-path",
|
||||||
|
"/path/to/save",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock.called
|
||||||
|
assert mock.call_args.kwargs["config"] == str(config_path)
|
||||||
|
assert mock.call_args.kwargs["save_path"] == "/path/to/save"
|
||||||
|
assert result.exit_code == 0
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
"""pytest tests for axolotl CLI preprocess command."""
|
"""pytest tests for axolotl CLI preprocess command."""
|
||||||
|
|
||||||
import shutil
|
import shutil
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|||||||
76
tests/cli/test_cli_shard.py
Normal file
76
tests/cli/test_cli_shard.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
"""pytest tests for axolotl CLI shard command."""
|
||||||
|
# pylint: disable=duplicate-code
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
from axolotl.cli.main import cli
|
||||||
|
|
||||||
|
|
||||||
|
def test_shard_with_accelerate(cli_runner, config_path):
|
||||||
|
"""Test shard command with accelerate"""
|
||||||
|
with patch("subprocess.run") as mock:
|
||||||
|
result = cli_runner.invoke(cli, ["shard", str(config_path), "--accelerate"])
|
||||||
|
|
||||||
|
assert mock.called
|
||||||
|
assert mock.call_args.args[0] == [
|
||||||
|
"accelerate",
|
||||||
|
"launch",
|
||||||
|
"-m",
|
||||||
|
"axolotl.cli.shard",
|
||||||
|
str(config_path),
|
||||||
|
"--debug-num-examples",
|
||||||
|
"0",
|
||||||
|
]
|
||||||
|
assert mock.call_args.kwargs == {"check": True}
|
||||||
|
assert result.exit_code == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_shard_no_accelerate(cli_runner, config_path):
|
||||||
|
"""Test shard command without accelerate"""
|
||||||
|
with patch("axolotl.cli.shard.do_cli") as mock:
|
||||||
|
result = cli_runner.invoke(cli, ["shard", str(config_path), "--no-accelerate"])
|
||||||
|
|
||||||
|
assert mock.called
|
||||||
|
assert result.exit_code == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_shard_with_model_dir(cli_runner, config_path, tmp_path):
|
||||||
|
"""Test shard command with model_dir option"""
|
||||||
|
model_dir = tmp_path / "model"
|
||||||
|
model_dir.mkdir()
|
||||||
|
|
||||||
|
with patch("axolotl.cli.shard.do_cli") as mock:
|
||||||
|
result = cli_runner.invoke(
|
||||||
|
cli,
|
||||||
|
[
|
||||||
|
"shard",
|
||||||
|
str(config_path),
|
||||||
|
"--no-accelerate",
|
||||||
|
"--model-dir",
|
||||||
|
str(model_dir),
|
||||||
|
],
|
||||||
|
catch_exceptions=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock.called
|
||||||
|
assert mock.call_args.kwargs["config"] == str(config_path)
|
||||||
|
assert mock.call_args.kwargs["model_dir"] == str(model_dir)
|
||||||
|
assert result.exit_code == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_shard_with_save_dir(cli_runner, config_path):
|
||||||
|
with patch("axolotl.cli.shard.do_cli") as mock:
|
||||||
|
result = cli_runner.invoke(
|
||||||
|
cli,
|
||||||
|
[
|
||||||
|
"shard",
|
||||||
|
str(config_path),
|
||||||
|
"--no-accelerate",
|
||||||
|
"--save-dir",
|
||||||
|
"/path/to/save",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock.called
|
||||||
|
assert mock.call_args.kwargs["config"] == str(config_path)
|
||||||
|
assert mock.call_args.kwargs["save_dir"] == "/path/to/save"
|
||||||
|
assert result.exit_code == 0
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
"""pytest tests for axolotl CLI --version"""
|
"""pytest tests for axolotl CLI --version"""
|
||||||
|
|
||||||
from axolotl.cli.main import cli
|
from axolotl.cli.main import cli
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""pytest tests for axolotl CLI utils."""
|
"""pytest tests for axolotl CLI utils."""
|
||||||
# pylint: disable=redefined-outer-name
|
# pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from unittest.mock import Mock, patch
|
from unittest.mock import Mock, patch
|
||||||
|
|
||||||
|
|||||||
@@ -2,17 +2,17 @@
|
|||||||
Simple end-to-end test for Cut Cross Entropy integration
|
Simple end-to-end test for Cut Cross Entropy integration
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils import get_pytorch_version
|
from axolotl.utils import get_pytorch_version
|
||||||
from axolotl.utils.config import normalize_config, prepare_plugins
|
from axolotl.utils.config import normalize_config, prepare_plugins
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists
|
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
|
|
||||||
|
|
||||||
@@ -64,10 +64,10 @@ class TestCutCrossEntropyIntegration:
|
|||||||
major, minor, _ = get_pytorch_version()
|
major, minor, _ = get_pytorch_version()
|
||||||
if (major, minor) < (2, 4):
|
if (major, minor) < (2, 4):
|
||||||
with pytest.raises(ImportError):
|
with pytest.raises(ImportError):
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
else:
|
else:
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"attention_type",
|
"attention_type",
|
||||||
@@ -92,7 +92,7 @@ class TestCutCrossEntropyIntegration:
|
|||||||
major, minor, _ = get_pytorch_version()
|
major, minor, _ = get_pytorch_version()
|
||||||
if (major, minor) < (2, 4):
|
if (major, minor) < (2, 4):
|
||||||
with pytest.raises(ImportError):
|
with pytest.raises(ImportError):
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
else:
|
else:
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ from pathlib import Path
|
|||||||
import pytest
|
import pytest
|
||||||
from e2e.utils import check_tensorboard, require_torch_2_5_1
|
from e2e.utils import check_tensorboard, require_torch_2_5_1
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config, prepare_plugins
|
from axolotl.utils.config import normalize_config, prepare_plugins
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
@@ -33,7 +33,6 @@ def min_cfg(temp_dir):
|
|||||||
"dataloader_prefetch_factor": 8,
|
"dataloader_prefetch_factor": 8,
|
||||||
"dataloader_num_workers": 4,
|
"dataloader_num_workers": 4,
|
||||||
"dataloader_pin_memory": True,
|
"dataloader_pin_memory": True,
|
||||||
# "dataset_prepared_path": str(Path(temp_dir) / "last_run_prepared"),
|
|
||||||
"datasets": [
|
"datasets": [
|
||||||
{
|
{
|
||||||
"path": "axolotl-ai-co/evolkit-logprobs-pipeline-75k-v2-sample",
|
"path": "axolotl-ai-co/evolkit-logprobs-pipeline-75k-v2-sample",
|
||||||
@@ -85,7 +84,7 @@ class TestKnowledgeDistillation:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (Path(temp_dir) / "model.safetensors").exists()
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||||
@@ -115,7 +114,7 @@ class TestKnowledgeDistillation:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
temp_dir + "/runs", "train/loss", 1.0, "Train Loss is too high"
|
||||||
|
|||||||
@@ -1,17 +1,16 @@
|
|||||||
"""
|
"""
|
||||||
Simple end-to-end test for Liger integration
|
Simple end-to-end test for Liger integration
|
||||||
"""
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from e2e.utils import require_torch_2_4_1
|
from e2e.utils import require_torch_2_4_1
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config, prepare_plugins
|
from axolotl.utils.config import normalize_config, prepare_plugins
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists
|
|
||||||
|
|
||||||
|
|
||||||
class LigerIntegrationTestCase:
|
class LigerIntegrationTestCase:
|
||||||
"""
|
"""
|
||||||
@@ -61,8 +60,8 @@ class LigerIntegrationTestCase:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|
||||||
@require_torch_2_4_1
|
@require_torch_2_4_1
|
||||||
def test_llama_w_flce(self, temp_dir):
|
def test_llama_w_flce(self, temp_dir):
|
||||||
@@ -107,5 +106,5 @@ class LigerIntegrationTestCase:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for multipack fft llama using 4d attention masks
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, require_torch_2_3_1, with_temp_dir
|
from ..utils import require_torch_2_3_1, with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -65,8 +66,8 @@ class Test4dMultipackLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_torch_lora_packing(self, temp_dir):
|
def test_torch_lora_packing(self, temp_dir):
|
||||||
@@ -109,5 +110,5 @@ class Test4dMultipackLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from pathlib import Path
|
|||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from axolotl.cli.config import load_cfg
|
from axolotl.cli import load_cfg
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -4,17 +4,18 @@ E2E tests for lora llama
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, check_tensorboard
|
from ..utils import check_tensorboard
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -80,8 +81,8 @@ class TestFAXentropyLlama:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 1.5, "Train Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 1.5, "Train Loss is too high"
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for falcon
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -67,8 +68,8 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
@@ -107,5 +108,5 @@ class TestFalconPatched(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,17 +5,18 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -71,5 +72,5 @@ class TestFusedLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,16 +5,17 @@ E2E tests for llama w/ S2 attn
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -69,8 +70,8 @@ class TestLlamaShiftedSparseAttention(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_fft_s2_attn(self, temp_dir):
|
def test_fft_s2_attn(self, temp_dir):
|
||||||
@@ -109,5 +110,5 @@ class TestLlamaShiftedSparseAttention(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,17 +5,18 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from transformers.utils import is_auto_gptq_available, is_torch_bf16_gpu_available
|
from transformers.utils import is_auto_gptq_available, is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -74,8 +75,8 @@ class TestLoraLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@pytest.mark.skipif(not is_auto_gptq_available(), reason="auto-gptq not available")
|
@pytest.mark.skipif(not is_auto_gptq_available(), reason="auto-gptq not available")
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
@@ -124,5 +125,5 @@ class TestLoraLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -67,8 +68,8 @@ class TestMistral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft_packing(self, temp_dir):
|
def test_ft_packing(self, temp_dir):
|
||||||
@@ -108,5 +109,5 @@ class TestMistral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for mixtral
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -64,8 +65,8 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
@@ -102,9 +103,9 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, _ = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, _ = train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (
|
assert (
|
||||||
"MixtralFlashAttention2"
|
"MixtralFlashAttention2"
|
||||||
in model.model.layers[0].self_attn.__class__.__name__
|
in model.model.layers[0].self_attn.__class__.__name__
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import unittest
|
|||||||
|
|
||||||
import transformers
|
import transformers
|
||||||
|
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_model, load_tokenizer
|
from axolotl.utils.models import load_model, load_tokenizer
|
||||||
@@ -48,8 +49,9 @@ class TestModelPatches(unittest.TestCase):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
|
cli_args = TrainerCliArgs()
|
||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
model, _ = load_model(cfg, tokenizer, inference=False)
|
model, _ = load_model(cfg, tokenizer, inference=cli_args.inference)
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
"MixtralFlashAttention2"
|
"MixtralFlashAttention2"
|
||||||
@@ -85,8 +87,9 @@ class TestModelPatches(unittest.TestCase):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
|
cli_args = TrainerCliArgs()
|
||||||
tokenizer = load_tokenizer(cfg)
|
tokenizer = load_tokenizer(cfg)
|
||||||
load_model(cfg, tokenizer, inference=False)
|
load_model(cfg, tokenizer, inference=cli_args.inference)
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
"torch.jit"
|
"torch.jit"
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, with_temp_dir
|
from ..utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -67,8 +68,8 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_qlora_packed(self, temp_dir):
|
def test_qlora_packed(self, temp_dir):
|
||||||
@@ -118,5 +119,5 @@ class TestPhiMultipack(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|||||||
@@ -6,16 +6,17 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, most_recent_subdir
|
from ..utils import most_recent_subdir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -71,7 +72,7 @@ class TestResumeLlama:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
|
|
||||||
resume_cfg = cfg | DictDefault(
|
resume_cfg = cfg | DictDefault(
|
||||||
{
|
{
|
||||||
@@ -81,8 +82,8 @@ class TestResumeLlama:
|
|||||||
normalize_config(resume_cfg)
|
normalize_config(resume_cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
|
|
||||||
train(cfg=resume_cfg, dataset_meta=dataset_meta)
|
train(cfg=resume_cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
tb_log_path_1 = most_recent_subdir(temp_dir + "/runs")
|
tb_log_path_1 = most_recent_subdir(temp_dir + "/runs")
|
||||||
cmd = f"tensorboard --inspect --logdir {tb_log_path_1}"
|
cmd = f"tensorboard --inspect --logdir {tb_log_path_1}"
|
||||||
|
|||||||
@@ -3,16 +3,17 @@ e2e tests for unsloth qlora
|
|||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from ..utils import check_model_output_exists, check_tensorboard
|
from ..utils import check_tensorboard
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -75,8 +76,8 @@ class TestUnslothQLoRA:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||||
@@ -125,8 +126,8 @@ class TestUnslothQLoRA:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||||
@@ -180,8 +181,8 @@ class TestUnslothQLoRA:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||||
|
|||||||
@@ -9,13 +9,13 @@ from pathlib import Path
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_rl_datasets
|
||||||
from axolotl.common.datasets import load_preference_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -65,10 +65,10 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_dpo_nll_lora(self, temp_dir):
|
def test_dpo_nll_lora(self, temp_dir):
|
||||||
@@ -110,10 +110,10 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_dpo_use_weighting(self, temp_dir):
|
def test_dpo_use_weighting(self, temp_dir):
|
||||||
@@ -155,10 +155,10 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@pytest.mark.skip("kto_pair no longer supported in trl")
|
@pytest.mark.skip("kto_pair no longer supported in trl")
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
@@ -200,10 +200,10 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ipo_lora(self, temp_dir):
|
def test_ipo_lora(self, temp_dir):
|
||||||
@@ -244,10 +244,10 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_orpo_lora(self, temp_dir):
|
def test_orpo_lora(self, temp_dir):
|
||||||
@@ -291,10 +291,10 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Fix the implementation")
|
@pytest.mark.skip(reason="Fix the implementation")
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
@@ -355,7 +355,7 @@ class TestDPOLlamaLora(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
normalize_config(cfg)
|
normalize_config(cfg)
|
||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_preference_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_rl_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-20", cfg)
|
assert (Path(temp_dir) / "checkpoint-20/adapter_model.safetensors").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for llama pretrain
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, check_tensorboard, with_temp_dir
|
from .utils import check_tensorboard, with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -60,8 +61,8 @@ class TestEmbeddingsLrScale(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 2.0, "Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.0, "Loss is too high"
|
||||||
@@ -104,8 +105,8 @@ class TestEmbeddingsLrScale(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 2.0, "Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.0, "Loss is too high"
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for falcon
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -69,8 +70,8 @@ class TestFalcon(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_lora_added_vocab(self, temp_dir):
|
def test_lora_added_vocab(self, temp_dir):
|
||||||
@@ -122,8 +123,8 @@ class TestFalcon(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
@@ -161,5 +162,5 @@ class TestFalcon(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -4,11 +4,10 @@ E2E tests for llama
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from e2e.utils import check_model_output_exists
|
from axolotl.cli import load_datasets
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
|
||||||
from axolotl.common.datasets import load_datasets
|
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
@@ -60,8 +59,8 @@ class TestLlama:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|
||||||
def test_fix_untrained_tokens(self, temp_dir):
|
def test_fix_untrained_tokens(self, temp_dir):
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
@@ -103,8 +102,8 @@ class TestLlama:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|
||||||
def test_batch_flattening(self, temp_dir):
|
def test_batch_flattening(self, temp_dir):
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
@@ -142,5 +141,5 @@ class TestLlama:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|||||||
@@ -4,31 +4,28 @@ E2E tests for llama pretrain
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
from axolotl.cli import load_datasets
|
||||||
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
|
||||||
from axolotl.common.datasets import load_datasets
|
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
|
|
||||||
|
|
||||||
class TestPretrainLlama:
|
class TestPretrainLlama(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
Test case for Llama models w pretraining
|
Test case for Llama models w pretraining
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@with_temp_dir
|
||||||
"sample_packing",
|
def test_pretrain_w_sample_packing(self, temp_dir):
|
||||||
[True, False],
|
|
||||||
)
|
|
||||||
def test_pretrain(self, temp_dir, sample_packing):
|
|
||||||
# pylint: disable=duplicate-code
|
# pylint: disable=duplicate-code
|
||||||
cfg = DictDefault(
|
cfg = DictDefault(
|
||||||
{
|
{
|
||||||
@@ -36,7 +33,7 @@ class TestPretrainLlama:
|
|||||||
"tokenizer_type": "LlamaTokenizer",
|
"tokenizer_type": "LlamaTokenizer",
|
||||||
"flash_attention": True,
|
"flash_attention": True,
|
||||||
"sequence_len": 1024,
|
"sequence_len": 1024,
|
||||||
"sample_packing": sample_packing,
|
"sample_packing": True,
|
||||||
"special_tokens": {
|
"special_tokens": {
|
||||||
"unk_token": "<unk>",
|
"unk_token": "<unk>",
|
||||||
"bos_token": "<s>",
|
"bos_token": "<s>",
|
||||||
@@ -66,5 +63,5 @@ class TestPretrainLlama:
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -66,8 +67,8 @@ class TestLlamaVision(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_lora_llama_vision_multimodal_dataset(self, temp_dir):
|
def test_lora_llama_vision_multimodal_dataset(self, temp_dir):
|
||||||
@@ -111,5 +112,5 @@ class TestLlamaVision(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -63,5 +64,5 @@ class TestLoraLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|||||||
@@ -5,16 +5,17 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -63,5 +64,5 @@ class TestMamba(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,16 +5,17 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -67,8 +68,8 @@ class TestMistral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
@@ -110,5 +111,5 @@ class TestMistral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,17 +5,18 @@ E2E tests for mixtral
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -73,12 +74,12 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, _ = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, _ = train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (
|
assert (
|
||||||
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_qlora_wo_fa2(self, temp_dir):
|
def test_qlora_wo_fa2(self, temp_dir):
|
||||||
@@ -127,12 +128,12 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, _ = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, _ = train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (
|
assert (
|
||||||
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_16bit_lora_w_fa2(self, temp_dir):
|
def test_16bit_lora_w_fa2(self, temp_dir):
|
||||||
@@ -184,12 +185,12 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, _ = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, _ = train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (
|
assert (
|
||||||
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_16bit_lora_wo_fa2(self, temp_dir):
|
def test_16bit_lora_wo_fa2(self, temp_dir):
|
||||||
@@ -241,12 +242,12 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
model, _ = train(cfg=cfg, dataset_meta=dataset_meta)
|
model, _ = train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
assert (
|
assert (
|
||||||
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
model.base_model.model.model.layers[0].block_sparse_moe.gate.weight.dtype
|
||||||
== torch.float32
|
== torch.float32
|
||||||
)
|
)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_ft(self, temp_dir):
|
def test_ft(self, temp_dir):
|
||||||
@@ -285,5 +286,5 @@ class TestMixtral(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for custom optimizers using Llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, require_torch_2_5_1, with_temp_dir
|
from .utils import require_torch_2_5_1, with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -63,8 +64,8 @@ class TestCustomOptimizers(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
@require_torch_2_5_1
|
@require_torch_2_5_1
|
||||||
@@ -107,8 +108,8 @@ class TestCustomOptimizers(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_fft_schedule_free_adamw(self, temp_dir):
|
def test_fft_schedule_free_adamw(self, temp_dir):
|
||||||
@@ -143,5 +144,5 @@ class TestCustomOptimizers(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "model.safetensors").exists()
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import unittest
|
|||||||
|
|
||||||
from transformers.utils import is_torch_bf16_gpu_available
|
from transformers.utils import is_torch_bf16_gpu_available
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
@@ -63,7 +63,7 @@ class TestPackedLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
temp_dir + "/runs", "train/train_loss", 2.0, "Train Loss is too high"
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -65,8 +66,8 @@ class TestPhi(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "pytorch_model.bin").exists()
|
||||||
|
|
||||||
@with_temp_dir
|
@with_temp_dir
|
||||||
def test_phi_qlora(self, temp_dir):
|
def test_phi_qlora(self, temp_dir):
|
||||||
@@ -114,5 +115,5 @@ class TestPhi(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|||||||
@@ -7,13 +7,13 @@ import os
|
|||||||
import unittest
|
import unittest
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, check_tensorboard, with_temp_dir
|
from .utils import check_tensorboard, with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -77,11 +77,11 @@ class TestReLoraLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(Path(temp_dir) / "checkpoint-100/adapter", cfg)
|
|
||||||
assert (
|
assert (
|
||||||
Path(temp_dir) / "checkpoint-100/relora/model.safetensors"
|
Path(temp_dir) / "checkpoint-100/adapter/adapter_model.safetensors"
|
||||||
).exists(), "Relora model checkpoint not found"
|
).exists()
|
||||||
|
assert (Path(temp_dir) / "checkpoint-100/relora/model.safetensors").exists()
|
||||||
|
|
||||||
check_tensorboard(
|
check_tensorboard(
|
||||||
temp_dir + "/runs", "train/grad_norm", 0.2, "grad_norm is too high"
|
temp_dir + "/runs", "train/grad_norm", 0.2, "grad_norm is too high"
|
||||||
|
|||||||
@@ -5,14 +5,15 @@ E2E tests for reward model lora llama
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from axolotl.cli.args import TrainerCliArgs
|
from axolotl.cli import load_datasets
|
||||||
from axolotl.common.datasets import load_datasets
|
from axolotl.common.cli import TrainerCliArgs
|
||||||
from axolotl.train import train
|
from axolotl.train import train
|
||||||
from axolotl.utils.config import normalize_config
|
from axolotl.utils.config import normalize_config
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
from .utils import check_model_output_exists, with_temp_dir
|
from .utils import with_temp_dir
|
||||||
|
|
||||||
LOG = logging.getLogger("axolotl.tests.e2e")
|
LOG = logging.getLogger("axolotl.tests.e2e")
|
||||||
os.environ["WANDB_DISABLED"] = "true"
|
os.environ["WANDB_DISABLED"] = "true"
|
||||||
@@ -69,5 +70,5 @@ class TestRewardModelLoraLlama(unittest.TestCase):
|
|||||||
cli_args = TrainerCliArgs()
|
cli_args = TrainerCliArgs()
|
||||||
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
|
||||||
|
|
||||||
train(cfg=cfg, dataset_meta=dataset_meta)
|
train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
|
||||||
check_model_output_exists(temp_dir, cfg)
|
assert (Path(temp_dir) / "adapter_model.bin").exists()
|
||||||
|
|||||||
@@ -14,8 +14,6 @@ import torch
|
|||||||
from packaging import version
|
from packaging import version
|
||||||
from tbparse import SummaryReader
|
from tbparse import SummaryReader
|
||||||
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
|
|
||||||
|
|
||||||
def with_temp_dir(test_func):
|
def with_temp_dir(test_func):
|
||||||
@wraps(test_func)
|
@wraps(test_func)
|
||||||
@@ -95,27 +93,3 @@ def check_tensorboard(
|
|||||||
df = reader.scalars # pylint: disable=invalid-name
|
df = reader.scalars # pylint: disable=invalid-name
|
||||||
df = df[(df.tag == tag)] # pylint: disable=invalid-name
|
df = df[(df.tag == tag)] # pylint: disable=invalid-name
|
||||||
assert df.value.values[-1] < lt_val, assertion_err
|
assert df.value.values[-1] < lt_val, assertion_err
|
||||||
|
|
||||||
|
|
||||||
def check_model_output_exists(temp_dir: str, cfg: DictDefault) -> None:
|
|
||||||
"""
|
|
||||||
helper function to check if a model output file exists after training
|
|
||||||
|
|
||||||
checks based on adapter or not and if safetensors saves are enabled or not
|
|
||||||
"""
|
|
||||||
|
|
||||||
if cfg.save_safetensors:
|
|
||||||
if not cfg.adapter:
|
|
||||||
assert (Path(temp_dir) / "model.safetensors").exists()
|
|
||||||
else:
|
|
||||||
assert (Path(temp_dir) / "adapter_model.safetensors").exists()
|
|
||||||
else:
|
|
||||||
# check for both, b/c in trl, it often defaults to saving safetensors
|
|
||||||
if not cfg.adapter:
|
|
||||||
assert (Path(temp_dir) / "pytorch_model.bin").exists() or (
|
|
||||||
Path(temp_dir) / "model.safetensors"
|
|
||||||
).exists()
|
|
||||||
else:
|
|
||||||
assert (Path(temp_dir) / "adapter_model.bin").exists() or (
|
|
||||||
Path(temp_dir) / "adapter_model.safetensors"
|
|
||||||
).exists()
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from huggingface_hub import snapshot_download
|
|||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
from axolotl.utils.data import load_tokenized_prepared_datasets
|
from axolotl.utils.data import load_tokenized_prepared_datasets
|
||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
from axolotl.utils.data.rl import load_prepare_dpo_datasets
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
|
|
||||||
|
|
||||||
@@ -280,7 +280,7 @@ class TestDatasetPreparation(unittest.TestCase):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
train_dataset, _ = load_prepare_preference_datasets(cfg)
|
train_dataset, _ = load_prepare_dpo_datasets(cfg)
|
||||||
|
|
||||||
assert len(train_dataset) == 1800
|
assert len(train_dataset) == 1800
|
||||||
assert "conversation" in train_dataset.features
|
assert "conversation" in train_dataset.features
|
||||||
@@ -329,7 +329,7 @@ class TestDatasetPreparation(unittest.TestCase):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
train_dataset, _ = load_prepare_preference_datasets(cfg)
|
train_dataset, _ = load_prepare_dpo_datasets(cfg)
|
||||||
|
|
||||||
assert len(train_dataset) == 1800
|
assert len(train_dataset) == 1800
|
||||||
assert "conversation" in train_dataset.features
|
assert "conversation" in train_dataset.features
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from datasets import Dataset
|
|||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
from axolotl.utils.data import prepare_dataset
|
from axolotl.utils.data import prepare_dataset
|
||||||
from axolotl.utils.data.rl import load_prepare_preference_datasets
|
from axolotl.utils.data.rl import load_prepare_dpo_datasets
|
||||||
from axolotl.utils.data.utils import deduplicate_and_log_datasets
|
from axolotl.utils.data.utils import deduplicate_and_log_datasets
|
||||||
from axolotl.utils.dict import DictDefault
|
from axolotl.utils.dict import DictDefault
|
||||||
from axolotl.utils.models import load_processor, load_tokenizer
|
from axolotl.utils.models import load_processor, load_tokenizer
|
||||||
@@ -236,7 +236,7 @@ class TestDeduplicateRLDataset(unittest.TestCase):
|
|||||||
"""Verify that loading with deduplication removes duplicates."""
|
"""Verify that loading with deduplication removes duplicates."""
|
||||||
|
|
||||||
# Load the dataset using the deduplication setting
|
# Load the dataset using the deduplication setting
|
||||||
train_dataset, _ = load_prepare_preference_datasets(self.cfg)
|
train_dataset, _ = load_prepare_dpo_datasets(self.cfg)
|
||||||
|
|
||||||
# Verify that the dataset has been deduplicated
|
# Verify that the dataset has been deduplicated
|
||||||
assert len(train_dataset) == 1800, "Dataset was not properly deduplicated"
|
assert len(train_dataset) == 1800, "Dataset was not properly deduplicated"
|
||||||
@@ -245,7 +245,7 @@ class TestDeduplicateRLDataset(unittest.TestCase):
|
|||||||
"""Verify that loading without deduplication retains duplicates."""
|
"""Verify that loading without deduplication retains duplicates."""
|
||||||
self.cfg.dataset_exact_deduplication = False
|
self.cfg.dataset_exact_deduplication = False
|
||||||
# Load the dataset without deduplication
|
# Load the dataset without deduplication
|
||||||
train_dataset, _ = load_prepare_preference_datasets(self.cfg)
|
train_dataset, _ = load_prepare_dpo_datasets(self.cfg)
|
||||||
|
|
||||||
# Verify that the dataset retains duplicates
|
# Verify that the dataset retains duplicates
|
||||||
assert (
|
assert (
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
"""
|
|
||||||
tests for loading loras
|
|
||||||
"""
|
|
||||||
from axolotl.utils.config import normalize_config, validate_config
|
|
||||||
from axolotl.utils.dict import DictDefault
|
|
||||||
from axolotl.utils.models import load_model, load_tokenizer
|
|
||||||
|
|
||||||
# pylint: disable=duplicate-code
|
|
||||||
minimal_config = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
||||||
"learning_rate": 0.000001,
|
|
||||||
"datasets": [
|
|
||||||
{
|
|
||||||
"path": "mhenrichsen/alpaca_2k_test",
|
|
||||||
"type": "alpaca",
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"micro_batch_size": 1,
|
|
||||||
"gradient_accumulation_steps": 1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestLoRALoad:
|
|
||||||
"""
|
|
||||||
Test class for loading LoRA weights
|
|
||||||
"""
|
|
||||||
|
|
||||||
def test_load_lora_weights(self):
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
||||||
"adapter": "lora",
|
|
||||||
"lora_r": 8,
|
|
||||||
"lora_alpha": 16,
|
|
||||||
"lora_dropout": 0.0,
|
|
||||||
"lora_target_linear": True,
|
|
||||||
"micro_batch_size": 1,
|
|
||||||
"gradient_accumulation_steps": 1,
|
|
||||||
"sequence_len": 1024,
|
|
||||||
}
|
|
||||||
| minimal_config
|
|
||||||
)
|
|
||||||
cfg = validate_config(cfg)
|
|
||||||
normalize_config(cfg)
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
load_model(cfg, tokenizer)
|
|
||||||
|
|
||||||
def test_load_lora_weights_empty_dropout(self):
|
|
||||||
cfg = DictDefault(
|
|
||||||
{
|
|
||||||
"base_model": "HuggingFaceTB/SmolLM2-135M",
|
|
||||||
"adapter": "lora",
|
|
||||||
"lora_r": 8,
|
|
||||||
"lora_alpha": 16,
|
|
||||||
"lora_dropout": None,
|
|
||||||
"lora_target_linear": True,
|
|
||||||
"micro_batch_size": 1,
|
|
||||||
"gradient_accumulation_steps": 1,
|
|
||||||
"sequence_len": 1024,
|
|
||||||
}
|
|
||||||
| minimal_config
|
|
||||||
)
|
|
||||||
cfg = validate_config(cfg)
|
|
||||||
normalize_config(cfg)
|
|
||||||
assert cfg.lora_dropout == 0.0
|
|
||||||
tokenizer = load_tokenizer(cfg)
|
|
||||||
load_model(cfg, tokenizer)
|
|
||||||
Reference in New Issue
Block a user