Compare commits

...

3 Commits
tui ... sac

Author SHA1 Message Date
Wing Lian
1f5c0d3613 fix graph break for compile 2025-05-23 11:50:37 -04:00
Wing Lian
3ae0f7c08e make sure torch_compile is enabled with SAC 2025-05-23 11:15:44 -04:00
Wing Lian
5930c91a12 add support for SAC 2025-05-23 10:33:02 -04:00
3 changed files with 36 additions and 8 deletions

View File

@@ -16,15 +16,24 @@ from transformers.utils import is_torch_bf16_gpu_available
@torch.jit.script
def get_max_seqlen_in_batch(attention_mask: torch.Tensor) -> torch.Tensor:
max_num = int(torch.max(attention_mask).item())
batch_size, _ = attention_mask.shape
counts = torch.zeros((batch_size, max_num), dtype=torch.int32)
for i in range(1, max_num + 1):
mask = attention_mask == i
counts[:, i - 1] = torch.sum(mask, dim=-1).to(dtype=torch.int32)
# Keep max_num as a tensor instead of extracting to Python int
max_num = torch.max(attention_mask)
# Create a range tensor for comparison
range_tensor = torch.arange(
1, max_num + 1, device=attention_mask.device, dtype=attention_mask.dtype
)
# Vectorized approach - compare attention_mask with each value in range
mask = attention_mask.unsqueeze(-1) == range_tensor.unsqueeze(0).unsqueeze(0)
# Sum along sequence dimension to get counts
counts = mask.sum(dim=1).to(dtype=torch.int32)
# Flatten and filter non-zero values
result = counts.flatten()
nonzero_indices = torch.nonzero(result).squeeze(-1)
return result[nonzero_indices]
nonzero_mask = result != 0
return result[nonzero_mask]
@torch.jit.script

View File

@@ -521,6 +521,11 @@ def train(
"""
print_axolotl_text_art()
if cfg.activation_memory_budget is not None:
torch._functorch.config.activation_memory_budget = ( # pylint: disable=protected-access
cfg.activation_memory_budget
)
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
(
trainer,

View File

@@ -182,6 +182,7 @@ class AxolotlInputConfig(
default=False
)
gradient_checkpointing_kwargs: dict[str, Any] | None = None
activation_memory_budget: float | None = None
unfrozen_parameters: list[str] | None = None
@@ -1079,6 +1080,19 @@ class AxolotlInputConfig(
)
return data
@model_validator(mode="before")
@classmethod
def check_activation_memory_budget_w_compile(cls, data):
if data.get("activation_memory_budget") is not None and not data.get(
"torch_compile"
):
LOG.warning(
"activation_memory_budget is enabled, but torch_compile is not set. "
"Automatically setting torch_compile to true."
)
data["torch_compile"] = True
return data
@model_validator(mode="before")
@classmethod
def check_npu_config(cls, data):