refactor(param): rename load_4bit config param by gptq

Signed-off-by: Thytu <vdmatos@gladia.io>
This commit is contained in:
Thytu
2023-05-27 12:36:03 +00:00
parent c3d256271e
commit dd0065773a
6 changed files with 16 additions and 12 deletions

View File

@@ -176,7 +176,7 @@ tokenizer_type: AutoTokenizer
trust_remote_code:
# whether you are training a 4-bit GPTQ quantized model
load_4bit: true
gptq: true
gptq_groupsize: 128 # group size
gptq_model_v1: false # v1 or v2

View File

@@ -40,6 +40,6 @@ early_stopping_patience: 3
resume_from_checkpoint:
auto_resume_from_checkpoints: true
local_rank:
load_4bit: true
gptq: true
xformers_attention: true
flash_attention:

View File

@@ -4,7 +4,7 @@ model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
trust_remote_code:
load_in_8bit: true
load_4bit: true
gptq: true
datasets:
- path: vicgalle/alpaca-gpt4
type: alpaca

View File

@@ -73,7 +73,7 @@ def load_model(
else:
torch_dtype = torch.float32
try:
if cfg.load_4bit:
if cfg.gptq:
from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_int4_lora_model,
)
@@ -95,7 +95,7 @@ def load_model(
bnb_4bit_quant_type="nf4",
)
try:
if cfg.load_4bit and is_llama_derived_model:
if cfg.gptq and is_llama_derived_model:
from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
from huggingface_hub import snapshot_download
@@ -248,7 +248,7 @@ def load_model(
if (
((cfg.adapter == "lora" and load_in_8bit) or cfg.adapter == "qlora")
and not cfg.load_4bit
and not cfg.gptq
and (load_in_8bit or cfg.load_in_4bit)
):
logging.info("converting PEFT model w/ prepare_model_for_int8_training")
@@ -259,7 +259,7 @@ def load_model(
if cfg.ddp and not load_in_8bit:
model.to(f"cuda:{cfg.local_rank}")
if cfg.load_4bit:
if cfg.gptq:
# Scales to half
logging.info("Fitting 4bit scales and zeros to half")
for n, m in model.named_modules():
@@ -274,7 +274,7 @@ def load_model(
if (
torch.cuda.device_count() > 1
and int(os.getenv("WORLD_SIZE", "1")) > 1
and cfg.load_4bit
and cfg.gptq
):
# llama is PROBABLY model parallelizable, but the default isn't that it is
# so let's only set it for the 4bit, see

View File

@@ -63,7 +63,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
training_arguments_kwargs["warmup_steps"] = warmup_steps
training_arguments_kwargs["logging_steps"] = logging_steps
if cfg.gradient_checkpointing is not None:
if cfg.load_4bit:
if cfg.gptq:
from alpaca_lora_4bit.gradient_checkpointing import (
apply_gradient_checkpointing,
)
@@ -138,7 +138,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
importlib.import_module("torchdistx")
if (
cfg.optimizer == "adamw_bnb_8bit"
and not cfg.load_4bit
and not cfg.gptq
and not "deepspeed" in training_arguments_kwargs
and not cfg.fsdp
):

View File

@@ -2,16 +2,20 @@ import logging
def validate_config(cfg):
if cfg.load_4bit:
raise ValueError("cfg.load_4bit parameter has been deprecated and replaced by cfg.gptq")
if cfg.adapter == "qlora":
if cfg.merge_lora:
# can't merge qlora if loaded in 8bit or 4bit
assert cfg.load_in_8bit is False
assert cfg.load_4bit is False
assert cfg.gptq is False
assert cfg.load_in_4bit is False
else:
assert cfg.load_in_8bit is False
assert cfg.load_4bit is False
assert cfg.gptq is False
assert cfg.load_in_4bit is True
if not cfg.load_in_8bit and cfg.adapter == "lora":
logging.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")