Fix mypy typing
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
import copy
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import Generator
|
||||
from typing import Generator, List, Tuple
|
||||
|
||||
from axolotl.prompt_tokenizers import (
|
||||
PromptTokenizingStrategy,
|
||||
@@ -19,7 +19,7 @@ class PygmalionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
||||
Tokenizing strategy for Pygmalion.
|
||||
"""
|
||||
|
||||
bot_prefix_token_ids = []
|
||||
bot_prefix_token_ids: List[int] = []
|
||||
|
||||
def __init__(self, prompter, tokenizer, *args, **kwargs):
|
||||
super().__init__(prompter, tokenizer, *args, **kwargs)
|
||||
@@ -88,7 +88,7 @@ class PygmalionPrompter:
|
||||
|
||||
def build_prompt(
|
||||
self, source, *args, **kwargs # pylint: disable=unused-argument
|
||||
) -> Generator[str, None, None]:
|
||||
) -> Generator[Tuple[str, str], None, None]:
|
||||
for msg in source:
|
||||
yield msg["role"], msg["value"]
|
||||
|
||||
|
||||
@@ -226,20 +226,16 @@ class CompletionPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||
Tokenizing strategy for Completion prompts.
|
||||
"""
|
||||
|
||||
def parse_instruction_fields(self, prompt) -> str:
|
||||
return prompt["text"]
|
||||
|
||||
def tokenize_prompt(self, prompt):
|
||||
instruction = self.parse_instruction_fields(prompt)
|
||||
full_prompt = self._build_full_prompt(instruction, None, None)
|
||||
full_prompt = self._build_full_prompt(prompt["text"], None, None)
|
||||
tokenized_full_prompt = self._tokenize(full_prompt)
|
||||
|
||||
return tokenized_full_prompt
|
||||
|
||||
def _build_full_prompt(
|
||||
self, instruction, input, response
|
||||
): # pylint: disable=unused-argument, redefined-builtin
|
||||
return next(iter(self.prompter.build_prompt(instruction)))
|
||||
): # pylint: disable=redefined-builtin
|
||||
return next(iter(self.prompter.build_prompt(instruction, input, response)))
|
||||
|
||||
|
||||
class ReflectionPromptTokenizingStrategy(PromptTokenizingStrategy):
|
||||
@@ -419,7 +415,7 @@ def tokenize_prompt_default() -> Tuple[Dict[str, List[int]], int]:
|
||||
Returns the default values for the tokenize prompt function
|
||||
"""
|
||||
|
||||
result = {
|
||||
result: Dict[str, List[int]] = {
|
||||
"input_ids": [],
|
||||
"attention_mask": [],
|
||||
"labels": [],
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import dataclasses
|
||||
import logging
|
||||
from enum import auto, Enum
|
||||
from typing import List, Union, Generator
|
||||
from typing import List, Optional, Union, Generator
|
||||
|
||||
IGNORE_TOKEN_ID = -100
|
||||
|
||||
@@ -24,7 +24,7 @@ class AlpacaPrompter:
|
||||
|
||||
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
||||
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
|
||||
prompt_style = None
|
||||
prompt_style: Optional[PromptStyle] = None
|
||||
|
||||
def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
|
||||
self.prompt_style = prompt_style if prompt_style else PromptStyle.INSTRUCT.value
|
||||
@@ -231,18 +231,18 @@ class Conversation:
|
||||
offset: int
|
||||
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
||||
sep: str = "###"
|
||||
sep2: str = None
|
||||
sep2: Optional[str] = None
|
||||
|
||||
def get_prompt(self) -> Generator[str, None, None]:
|
||||
seps = [self.sep, self.sep2]
|
||||
preamble = self.system + seps[0]
|
||||
# seps = [self.sep, self.sep2]
|
||||
preamble = self.system + self.sep
|
||||
yield preamble
|
||||
for _, (role, message) in enumerate(self.messages):
|
||||
if message:
|
||||
yield (role + ":", " " + message)
|
||||
yield role + ":" + " " + message
|
||||
else:
|
||||
logging.warning(f"role with empty message: {role}")
|
||||
yield (role + ":",)
|
||||
yield role + ":"
|
||||
|
||||
def copy(self):
|
||||
return Conversation(
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import logging
|
||||
from hashlib import md5
|
||||
from pathlib import Path
|
||||
from typing import Tuple, Union
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
from datasets import (
|
||||
load_from_disk,
|
||||
@@ -95,40 +95,36 @@ def load_tokenized_prepared_datasets(
|
||||
|
||||
# prefer local dataset, even if hub exists
|
||||
if Path(d.path).exists():
|
||||
ds: Dataset = load_dataset(
|
||||
ds = load_dataset(
|
||||
"json", data_files=d.path, streaming=False, split=None
|
||||
)
|
||||
elif ds_from_hub:
|
||||
if d.data_files:
|
||||
ds: Dataset = load_dataset(
|
||||
ds = load_dataset(
|
||||
d.path,
|
||||
streaming=False,
|
||||
data_files=d.data_files,
|
||||
use_auth_token=use_auth_token,
|
||||
)
|
||||
else:
|
||||
ds: Dataset = load_dataset(
|
||||
ds = load_dataset(
|
||||
d.path, streaming=False, use_auth_token=use_auth_token
|
||||
)
|
||||
else:
|
||||
fp = hf_hub_download(
|
||||
repo_id=d.path, repo_type="dataset", filename=d.data_files
|
||||
)
|
||||
ds: Dataset = load_dataset(
|
||||
"json", data_files=fp, streaming=False, split=None
|
||||
)
|
||||
ds = load_dataset("json", data_files=fp, streaming=False, split=None)
|
||||
if not ds:
|
||||
raise ValueError("unhandled dataset load")
|
||||
# support for using a subset of the data
|
||||
if d.shards:
|
||||
if "train" in ds:
|
||||
ds: DatasetDict = ds.shuffle(seed=42)["train"].shard(
|
||||
ds = ds.shuffle(seed=42)["train"].shard(
|
||||
num_shards=d.shards, index=0
|
||||
)
|
||||
else:
|
||||
ds: Dataset = ds.shuffle(seed=42).shard(
|
||||
num_shards=d.shards, index=0
|
||||
)
|
||||
ds = ds.shuffle(seed=42).shard(num_shards=d.shards, index=0)
|
||||
d_type = d.type
|
||||
d_type_split = d_type.split(":")
|
||||
d_base_type = d_type_split[0]
|
||||
@@ -232,7 +228,7 @@ def load_tokenized_prepared_datasets(
|
||||
logging.error(f"unhandled prompt tokenization strategy: {d.type}")
|
||||
logging.info("tokenizing, merging, and shuffling master dataset")
|
||||
|
||||
samples = []
|
||||
samples: List[int] = []
|
||||
for d in datasets:
|
||||
samples = samples + list(d)
|
||||
dataset = Dataset.from_list(samples).shuffle(seed=42)
|
||||
|
||||
@@ -81,7 +81,7 @@ def load_model(
|
||||
adapter="lora",
|
||||
inference=False,
|
||||
):
|
||||
# type: (str, str, str, str, DictDefault, Optional[str], bool) -> Tuple[PreTrainedModel, PreTrainedTokenizer, Optional[PeftConfig]]
|
||||
# type: (str, str, str, str, DictDefault, Optional[str], bool) -> Tuple[PreTrainedModel, Optional[PeftConfig]]
|
||||
"""
|
||||
Load a model from a base model and a model type.
|
||||
"""
|
||||
|
||||
@@ -5,6 +5,7 @@ import math
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import bitsandbytes as bnb
|
||||
import torch.cuda
|
||||
@@ -28,7 +29,7 @@ class OneCycleLRSchedulerTrainer(Trainer):
|
||||
self.lr_scheduler = None
|
||||
|
||||
def create_scheduler(
|
||||
self, num_training_steps: int, optimizer: torch.optim.Optimizer = None
|
||||
self, num_training_steps: int, optimizer: Optional[torch.optim.Optimizer] = None
|
||||
):
|
||||
optimizer = self.optimizer if optimizer is None else optimizer
|
||||
num_warmup_steps = self.args.get_warmup_steps(num_training_steps)
|
||||
|
||||
Reference in New Issue
Block a user