llama sdpa patching WIP - static class function import

This commit is contained in:
Sunny Liu
2025-01-22 20:33:13 -05:00
parent f3bec17917
commit d7b133dc1f

View File

@@ -28,13 +28,11 @@ def hijack_llama_prepare_4d_mask():
# )
def llama_patched_prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: Optional[torch.Tensor],
*args,
attention_mask: Optional[torch.Tensor], *args, **kwargs
):
dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float32
return LlamaModel._prepare_4d_causal_attention_mask_with_cache_position(
mask_2d_to_4d(attention_mask, dtype=dtype),
*args,
mask_2d_to_4d(attention_mask, dtype=dtype), *args, **kwargs
)
LlamaModel._prepare_4d_causal_attention_mask_with_cache_position = ( # pylint: disable=protected-access