Skip to content

Commit 7f800d0

Browse files
Enable AMD fp8 and pytorch attention on some GPUs. (comfyanonymous#8474)
Information is from the pytorch source code.
1 parent 97755ee commit 7f800d0

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

comfy/model_management.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -307,10 +307,10 @@ def is_amd():
307307
logging.info("ROCm version: {}".format(rocm_version))
308308
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
309309
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
310-
if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches
310+
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx1201 and gfx950
311311
ENABLE_PYTORCH_ATTENTION = True
312312
if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4):
313-
if any((a in arch) for a in ["gfx1201"]): # TODO: more arches
313+
if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches
314314
SUPPORT_FP8_OPS = True
315315

316316
except:

0 commit comments

Comments
 (0)