Skip to content

Commit 6e28a46

Browse files
Apple most likely is never fixing the fp16 attention bug. (comfyanonymous#8485)
1 parent c7b2578 commit 6e28a46

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

comfy/model_management.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1052,7 +1052,7 @@ def pytorch_attention_flash_attention():
10521052
global ENABLE_PYTORCH_ATTENTION
10531053
if ENABLE_PYTORCH_ATTENTION:
10541054
#TODO: more reliable way of checking for flash attention?
1055-
if is_nvidia(): #pytorch flash attention only works on Nvidia
1055+
if is_nvidia():
10561056
return True
10571057
if is_intel_xpu():
10581058
return True
@@ -1068,7 +1068,7 @@ def force_upcast_attention_dtype():
10681068
upcast = args.force_upcast_attention
10691069

10701070
macos_version = mac_version()
1071-
if macos_version is not None and ((14, 5) <= macos_version < (16,)): # black image bug on recent versions of macOS
1071+
if macos_version is not None and ((14, 5) <= macos_version): # black image bug on recent versions of macOS, I don't think it's ever getting fixed
10721072
upcast = True
10731073

10741074
if upcast:

0 commit comments

Comments
 (0)