Compare commits
3 Commits
moekernels
...
sac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f5c0d3613 | ||
|
|
3ae0f7c08e | ||
|
|
5930c91a12 |
@@ -16,15 +16,24 @@ from transformers.utils import is_torch_bf16_gpu_available
|
|||||||
|
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
def get_max_seqlen_in_batch(attention_mask: torch.Tensor) -> torch.Tensor:
|
def get_max_seqlen_in_batch(attention_mask: torch.Tensor) -> torch.Tensor:
|
||||||
max_num = int(torch.max(attention_mask).item())
|
# Keep max_num as a tensor instead of extracting to Python int
|
||||||
batch_size, _ = attention_mask.shape
|
max_num = torch.max(attention_mask)
|
||||||
counts = torch.zeros((batch_size, max_num), dtype=torch.int32)
|
|
||||||
for i in range(1, max_num + 1):
|
# Create a range tensor for comparison
|
||||||
mask = attention_mask == i
|
range_tensor = torch.arange(
|
||||||
counts[:, i - 1] = torch.sum(mask, dim=-1).to(dtype=torch.int32)
|
1, max_num + 1, device=attention_mask.device, dtype=attention_mask.dtype
|
||||||
|
)
|
||||||
|
|
||||||
|
# Vectorized approach - compare attention_mask with each value in range
|
||||||
|
mask = attention_mask.unsqueeze(-1) == range_tensor.unsqueeze(0).unsqueeze(0)
|
||||||
|
|
||||||
|
# Sum along sequence dimension to get counts
|
||||||
|
counts = mask.sum(dim=1).to(dtype=torch.int32)
|
||||||
|
|
||||||
|
# Flatten and filter non-zero values
|
||||||
result = counts.flatten()
|
result = counts.flatten()
|
||||||
nonzero_indices = torch.nonzero(result).squeeze(-1)
|
nonzero_mask = result != 0
|
||||||
return result[nonzero_indices]
|
return result[nonzero_mask]
|
||||||
|
|
||||||
|
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
|
|||||||
@@ -521,6 +521,11 @@ def train(
|
|||||||
"""
|
"""
|
||||||
print_axolotl_text_art()
|
print_axolotl_text_art()
|
||||||
|
|
||||||
|
if cfg.activation_memory_budget is not None:
|
||||||
|
torch._functorch.config.activation_memory_budget = ( # pylint: disable=protected-access
|
||||||
|
cfg.activation_memory_budget
|
||||||
|
)
|
||||||
|
|
||||||
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
# Setup model, tokenizer, (causal or RLHF) trainer, etc.
|
||||||
(
|
(
|
||||||
trainer,
|
trainer,
|
||||||
|
|||||||
@@ -182,6 +182,7 @@ class AxolotlInputConfig(
|
|||||||
default=False
|
default=False
|
||||||
)
|
)
|
||||||
gradient_checkpointing_kwargs: dict[str, Any] | None = None
|
gradient_checkpointing_kwargs: dict[str, Any] | None = None
|
||||||
|
activation_memory_budget: float | None = None
|
||||||
|
|
||||||
unfrozen_parameters: list[str] | None = None
|
unfrozen_parameters: list[str] | None = None
|
||||||
|
|
||||||
@@ -1079,6 +1080,19 @@ class AxolotlInputConfig(
|
|||||||
)
|
)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@model_validator(mode="before")
|
||||||
|
@classmethod
|
||||||
|
def check_activation_memory_budget_w_compile(cls, data):
|
||||||
|
if data.get("activation_memory_budget") is not None and not data.get(
|
||||||
|
"torch_compile"
|
||||||
|
):
|
||||||
|
LOG.warning(
|
||||||
|
"activation_memory_budget is enabled, but torch_compile is not set. "
|
||||||
|
"Automatically setting torch_compile to true."
|
||||||
|
)
|
||||||
|
data["torch_compile"] = True
|
||||||
|
return data
|
||||||
|
|
||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_npu_config(cls, data):
|
def check_npu_config(cls, data):
|
||||||
|
|||||||
Reference in New Issue
Block a user