rename var and reformat

This commit is contained in:
Aman Karmani
2023-08-27 19:55:11 +00:00
parent 7fd662dd89
commit f319b0bc67

View File

@@ -355,7 +355,7 @@ def load_model(
if hasattr(module, "weight"):
module.to(torch.float32)
fix_dtype = not cfg.adapter
needs_fa2_dtype = not cfg.adapter
if not cfg.gptq and (
(cfg.adapter == "lora" and load_in_8bit)
or (cfg.adapter == "qlora" and cfg.load_in_4bit)
@@ -364,13 +364,11 @@ def load_model(
model = prepare_model_for_kbit_training(
model, use_gradient_checkpointing=cfg.gradient_checkpointing
)
fix_dtype = True
needs_fa2_dtype = True
# LlamaRMSNorm layers are in fp32 after kbit_training or full finetune, so we need to
# convert them back to fp16/bf16 for flash-attn compatibility.
if fix_dtype and (
cfg.flash_attention and cfg.is_llama_derived_model
):
if needs_fa2_dtype and (cfg.flash_attention and cfg.is_llama_derived_model):
for name, module in model.named_modules():
if "norm" in name:
module.to(cfg.torch_dtype)