This commit is contained in:
Aman Karmani
2023-08-03 21:04:12 +00:00
parent 78b9efb7f4
commit 2eda9e02a9

View File

@@ -333,7 +333,7 @@ def load_model(
model, use_gradient_checkpointing=cfg.gradient_checkpointing
)
# LlamaRMSNorm layers are in fp32 after kit call, so we need to
# LlamaRMSNorm layers are in fp32 after kbit_training, so we need to
# convert them back to fp16/bf16 for flash-attn compatibility.
if cfg.flash_attention and cfg.is_llama_derived_model:
for name, module in model.named_modules():