run PR e2e docker CI tests in Modal (#1217) [skip ci]
* wip modal for ci * handle falcon layernorms better * update * rebuild the template each time with the pseudo-ARGS * fix ref * update tests to use modal * cleanup ci script * make sure to install jinja2 also * kickoff the gh action on gh hosted runners and specify num gpus
This commit is contained in:
@@ -645,7 +645,10 @@ def load_model(
|
||||
if not cfg.fsdp:
|
||||
# FSDP doesn't like mixed Float and BFloat16
|
||||
for name, module in model.named_modules():
|
||||
if any(m in name for m in ["norm", "gate"]):
|
||||
if (
|
||||
any(m in name for m in ["norm", "gate"])
|
||||
or "LayerNorm" in module.__class__.__name__
|
||||
):
|
||||
module.to(torch.float32)
|
||||
if model_config.model_type == "btlm":
|
||||
# don't upcast lm_head for btlm
|
||||
@@ -684,7 +687,7 @@ def load_model(
|
||||
if needs_fa2_dtype or cfg.flash_attention:
|
||||
LOG.info("converting modules to %s for flash attention", cfg.torch_dtype)
|
||||
for name, module in model.named_modules():
|
||||
if "norm" in name:
|
||||
if "norm" in name or "LayerNorm" in module.__class__.__name__:
|
||||
module.to(cfg.torch_dtype)
|
||||
if any(m in name for m in embedding_modules):
|
||||
if hasattr(module, "weight"):
|
||||
|
||||
Reference in New Issue
Block a user