llama sdpa patching WIP - static class function import

This commit is contained in:
Sunny Liu
2025-01-22 21:10:05 -05:00
parent 152e988d3c
commit 0dd18a3681

View File

@@ -29,11 +29,11 @@ def hijack_llama_prepare_4d_mask():
@staticmethod
def llama_patched_prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: Optional[torch.Tensor], *args
attention_mask: Optional[torch.Tensor], **kwargs
):
dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float32
return LlamaModel._prepare_4d_causal_attention_mask_with_cache_position(
mask_2d_to_4d(attention_mask, dtype=dtype), *args
mask_2d_to_4d(attention_mask, dtype=dtype), **kwargs
)
LlamaModel._prepare_4d_causal_attention_mask_with_cache_position = ( # pylint: disable=protected-access