fix relative path for fixtures
This commit is contained in:
@@ -129,6 +129,7 @@ def load_model(
|
||||
llm_int8_threshold=6.0,
|
||||
llm_int8_has_fp16_weight=False,
|
||||
bnb_4bit_compute_dtype=torch_dtype,
|
||||
bnb_4bit_compute_dtype=torch_dtype,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
)
|
||||
@@ -280,8 +281,8 @@ def load_model(
|
||||
# llama is PROBABLY model parallelizable, but the default isn't that it is
|
||||
# so let's only set it for the 4bit, see
|
||||
# https://github.com/johnsmith0031/alpaca_lora_4bit/blob/08b3fca4a4a9e0d3945be1bab4529f100a428636/finetune.py#L130-L133
|
||||
setattr(model, 'is_parallelizable', True)
|
||||
setattr(model, 'model_parallel', True)
|
||||
setattr(model, "is_parallelizable", True)
|
||||
setattr(model, "model_parallel", True)
|
||||
|
||||
requires_grad = []
|
||||
for name, param in model.named_parameters(recurse=True):
|
||||
|
||||
@@ -125,7 +125,8 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
|
||||
output_dir=cfg.output_dir,
|
||||
save_total_limit=3,
|
||||
load_best_model_at_end=(
|
||||
cfg.val_set_size > 0
|
||||
cfg.load_best_model_at_end is not False
|
||||
and cfg.val_set_size > 0
|
||||
and save_steps
|
||||
and save_steps % eval_steps == 0
|
||||
and cfg.load_in_8bit is not True
|
||||
|
||||
Reference in New Issue
Block a user