From c1a7b3dd69c17224f40b397590c576fde0fd699c Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Tue, 27 Feb 2024 17:20:01 -0500 Subject: [PATCH] add gemma instruct chat template (#1341) * add gemma instruct chat template * support for chat tempalte strategy too --- .../prompt_strategies/chat_template.py | 78 +++++++++++++++++++ src/axolotl/utils/chat_templates.py | 1 + .../config/models/input/v0_4_1/__init__.py | 1 + 3 files changed, 80 insertions(+) create mode 100644 src/axolotl/prompt_strategies/chat_template.py diff --git a/src/axolotl/prompt_strategies/chat_template.py b/src/axolotl/prompt_strategies/chat_template.py new file mode 100644 index 000000000..8dff3845b --- /dev/null +++ b/src/axolotl/prompt_strategies/chat_template.py @@ -0,0 +1,78 @@ +""" +HF Chat Templates prompt strategy +""" +from typing import Any, Dict, Optional + +from axolotl.prompt_tokenizers import PromptTokenizingStrategy +from axolotl.prompters import Prompter +from axolotl.utils.chat_templates import chat_templates + + +class ChatTemplatePrompter(Prompter): + """prompter for HF chat templates""" + + def __init__(self, tokenizer, chat_template=None, max_length=2048): + self.tokenizer = tokenizer + self.chat_template = chat_template + self.max_length = max_length + + def build_prompt(self, conversation, add_generation_prompt=False): + return self.tokenizer.apply_chat_template( + conversation, + truncation=True, + max_length=self.max_length, + add_generation_prompt=add_generation_prompt, + chat_template=self.chat_template, + ) + + +class ChatTemplateStrategy(PromptTokenizingStrategy): + """ + Tokenizing strategy for instruction-based prompts. + """ + + def tokenize_prompt(self, prompt): + turns = self.get_conversation_thread(prompt) + prompt_ids = self.prompter.build_prompt([turns[0]], add_generation_prompt=True) + input_ids = self.prompter.build_prompt(turns) + + if not self.train_on_inputs: + user_prompt_len = len(prompt_ids) + labels = [-100] * user_prompt_len + input_ids[user_prompt_len:] + else: + labels = input_ids + + tokenized_prompt = { + "input_ids": input_ids, + "labels": labels, + "attention_mask": [1] * len(input_ids), + } + + return tokenized_prompt + + def get_conversation_thread(self, prompt): + conversations = prompt["conversations"] + # remap roles - allow for assistant turn + role_map = { + "human": "user", + "user": "user", + "assistant": "assistant", + "gpt": "assistant", + } + turns = [ + {"role": role_map[t["from"]], "content": t["value"]} for t in conversations + ] + return turns + + +def load(tokenizer, cfg, ds_cfg: Optional[Dict[str, Any]] = None): + chat_template = ( + ds_cfg["chat_template"] if ds_cfg and "chat_template" in ds_cfg else "chatml" + ) + strategy = ChatTemplateStrategy( + ChatTemplatePrompter(tokenizer, chat_templates(chat_template)), + tokenizer, + cfg.train_on_inputs, + cfg.sequence_len, + ) + return strategy diff --git a/src/axolotl/utils/chat_templates.py b/src/axolotl/utils/chat_templates.py index bcd20fb3a..1ec83536d 100644 --- a/src/axolotl/utils/chat_templates.py +++ b/src/axolotl/utils/chat_templates.py @@ -22,6 +22,7 @@ def chat_templates(user_choice: str): "alpaca": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response: ' + message['content'] + eos_token}}{% endif %}{% endfor %}", "inst": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", # I don't know what this one is called. Used by Mistral/Mixtral. "chatml": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}", + "gemma": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}", } if user_choice in templates: diff --git a/src/axolotl/utils/config/models/input/v0_4_1/__init__.py b/src/axolotl/utils/config/models/input/v0_4_1/__init__.py index 062f60a6e..4de51544e 100644 --- a/src/axolotl/utils/config/models/input/v0_4_1/__init__.py +++ b/src/axolotl/utils/config/models/input/v0_4_1/__init__.py @@ -76,6 +76,7 @@ class SFTDataset(BaseModel): type: Optional[Union[str, UserDefinedPrompterType]] = None shards: Optional[int] = None conversation: Optional[str] = None + chat_template: Optional[str] = None data_files: Optional[Union[str, List[str]]] = None name: Optional[str] = None ds_type: Optional[str] = None