fixes w/ example for super basic lora starter

This commit is contained in:
Wing Lian
2023-05-25 11:59:08 -04:00
parent 951facbb1f
commit a5d739b66b
3 changed files with 74 additions and 5 deletions

View File

@@ -0,0 +1,67 @@
base_model: huggyllama/llama-7b
base_model_config: huggyllama/llama-7b
model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
load_in_8bit: true
load_in_4bit: false
strict: false
push_dataset_to_hub:
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0.02
adapter: lora
lora_model_dir:
sequence_len: 512
max_packed_sequence_len:
lora_r: 8
lora_alpha: 16
lora_dropout: 0.0
lora_target_modules:
- gate_proj
- down_proj
- up_proj
- q_proj
- v_proj
- k_proj
- o_proj
lora_fan_in_fan_out:
wandb_project:
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./lora-out
batch_size: 4
micro_batch_size: 1
num_epochs: 4
optimizer: adamw_bnb_8bit
torchdistx_path:
lr_scheduler: cosine
learning_rate: 0.0002
train_on_inputs: false
group_by_length: false
bf16: false
fp16: true
tf32: true
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention: true
flash_attention:
gptq_groupsize:
gptq_model_v1:
warmup_steps: 10
eval_steps: 50
save_steps:
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
bos_token: "<s>"
eos_token: "</s>"
unk_token: "<unk>"

View File

@@ -18,7 +18,7 @@ class AlpacaPrompter:
prompt_style = None
def __init__(self, prompt_style="instruct"):
self.prompt_style = prompt_style
self.prompt_style = prompt_style if prompt_style else PromptStyle.instruct.value
self.match_prompt_style()
def match_prompt_style(self):

View File

@@ -60,10 +60,12 @@ def load_tokenized_prepared_datasets(
else Path(default_dataset_prepared_path) / ds_hash
)
dataset = None
use_auth_token = False
try:
if cfg.push_dataset_to_hub:
use_auth_token = True
dataset = load_dataset(
f"{cfg.push_dataset_to_hub}/{ds_hash}", use_auth_token=True
f"{cfg.push_dataset_to_hub}/{ds_hash}", use_auth_token=use_auth_token
)
dataset = dataset["train"]
except:
@@ -83,7 +85,7 @@ def load_tokenized_prepared_datasets(
ds = None
ds_from_hub = False
try:
load_dataset(d.path, streaming=True, use_auth_token=True)
load_dataset(d.path, streaming=True, use_auth_token=use_auth_token)
ds_from_hub = True
except FileNotFoundError:
pass
@@ -99,10 +101,10 @@ def load_tokenized_prepared_datasets(
d.path,
streaming=False,
data_files=d.data_files,
use_auth_token=True,
use_auth_token=use_auth_token,
)
else:
ds = load_dataset(d.path, streaming=False, use_auth_token=True)
ds = load_dataset(d.path, streaming=False, use_auth_token=use_auth_token)
else:
fp = hf_hub_download(
repo_id=d.path, repo_type="dataset", filename=d.data_files