add alpaca multiple choice instruct dataset support
This commit is contained in:
@@ -67,7 +67,7 @@ def do_inference(cfg, model, tokenizer, prompter="AlpacaPrompter"):
|
||||
instruction = get_multi_line_input()
|
||||
if not instruction:
|
||||
return
|
||||
prompt = prompter_module().build_prompt(instruction=instruction)
|
||||
prompt: str = next(prompter_module().build_prompt(instruction=instruction))
|
||||
batch = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
||||
|
||||
model.eval()
|
||||
|
||||
@@ -92,6 +92,15 @@ class AlpacaPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||
)
|
||||
|
||||
|
||||
class AlpacaMultipleChoicePromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||
def parse_instruction_fields(self, prompt) -> (str, str, str):
|
||||
return (
|
||||
prompt["question"],
|
||||
"\n".join(f'- "{choice}"' for choice in prompt["choices"]),
|
||||
prompt["explanation"],
|
||||
)
|
||||
|
||||
|
||||
class JeopardyPromptTokenizingStrategy(InstructionPromptTokenizingStrategy):
|
||||
def parse_instruction_fields(self, prompt) -> (str, str, str):
|
||||
return (
|
||||
|
||||
@@ -35,6 +35,10 @@ class JeopardyPrompter(AlpacaPrompter):
|
||||
prompt_input = "Below is a Jeopardy clue paired with input providing the category of the clue. Write a concise response that best answers tbe clue given the category.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
|
||||
|
||||
|
||||
class MultipleChoiceExplainPrompter(AlpacaPrompter):
|
||||
prompt_input = "Choose the answer that best answers the question. Explain your reasoning.\n\n### Question:\n{instruction}\n\n### Choices:\n{input}\n\n### Response:\n"
|
||||
|
||||
|
||||
class CompletionPrompter(AlpacaPrompter):
|
||||
def build_prompt(self, instruction: str, input=None, output=None) -> Generator[str, None, None]:
|
||||
yield instruction
|
||||
|
||||
@@ -19,7 +19,7 @@ from axolotl.prompt_tokenizers import (
|
||||
AlpacaReflectionPTStrategy,
|
||||
ShareGPTPromptTokenizingStrategy,
|
||||
JeopardyPromptTokenizingStrategy,
|
||||
CompletionPromptTokenizingStrategy,
|
||||
CompletionPromptTokenizingStrategy, AlpacaMultipleChoicePromptTokenizingStrategy,
|
||||
)
|
||||
from axolotl.prompters import (
|
||||
AlpacaPrompter,
|
||||
@@ -27,7 +27,7 @@ from axolotl.prompters import (
|
||||
ReflectAlpacaPrompter,
|
||||
ShareGPTPrompter,
|
||||
JeopardyPrompter,
|
||||
CompletionPrompter,
|
||||
CompletionPrompter, MultipleChoiceExplainPrompter,
|
||||
)
|
||||
|
||||
|
||||
@@ -88,6 +88,12 @@ def load_tokenized_prepared_datasets(tokenizer, cfg, default_dataset_prepared_pa
|
||||
)
|
||||
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
|
||||
datasets.append(ds_wrapper)
|
||||
elif d.type == "explainchoice":
|
||||
ds_strategy = AlpacaMultipleChoicePromptTokenizingStrategy(
|
||||
MultipleChoiceExplainPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
|
||||
)
|
||||
ds_wrapper = TokenizedPromptDataset(ds_strategy, ds["train"])
|
||||
datasets.append(ds_wrapper)
|
||||
elif d.type == "jeopardy":
|
||||
ds_strategy = JeopardyPromptTokenizingStrategy(
|
||||
JeopardyPrompter(), tokenizer, cfg.train_on_inputs, cfg.sequence_len
|
||||
|
||||
Reference in New Issue
Block a user