suppport for alpaca-like instruction datasets without inputs
This commit is contained in:
@@ -102,8 +102,8 @@ def load_model(base_model, base_model_config, model_type, tokenizer_type, cfg, a
|
||||
base_model_config if base_model_config else base_model,
|
||||
model_path,
|
||||
device_map=cfg.device_map,
|
||||
groupsize=-1,
|
||||
is_v1_model=True,
|
||||
groupsize=cfg.gptq_groupsize if cfg.gptq_groupsize else -1,
|
||||
is_v1_model=cfg.gptq_model_v1 if cfg.gptq_model_v1 is not None else True,
|
||||
)
|
||||
load_in_8bit = False
|
||||
elif "llama" in base_model:
|
||||
|
||||
@@ -37,7 +37,8 @@ class AlpacaPromptTokenizingStrategy(PromptTokenizingStrategy):
|
||||
tokenized_full_prompt = self._tokenize(full_prompt)
|
||||
if not self.train_on_inputs:
|
||||
user_prompt = self.prompter.build_prompt(
|
||||
prompt["instruction"], prompt["input"]
|
||||
prompt["instruction"],
|
||||
prompt["input"] if "input" in prompt else "",
|
||||
)
|
||||
tokenized_user_prompt = self._tokenize(user_prompt, add_eos_token=False)
|
||||
user_prompt_len = len(tokenized_user_prompt["input_ids"])
|
||||
@@ -51,7 +52,7 @@ class AlpacaPromptTokenizingStrategy(PromptTokenizingStrategy):
|
||||
def _tokenize_full_prompt(self, prompt):
|
||||
return self.prompter.build_prompt(
|
||||
prompt["instruction"],
|
||||
prompt["input"],
|
||||
prompt["input"] if "input" in prompt else "",
|
||||
prompt["output"],
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user