Skip to content

Commit 97755ee

Browse files
Enable fp8 ops by default on gfx1201 (comfyanonymous#8464)
1 parent daf9d25 commit 97755ee

File tree

1 file changed

+6
-1
lines changed

1 file changed

+6
-1
lines changed

comfy/model_management.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,7 @@ def is_amd():
295295
pass
296296

297297

298+
SUPPORT_FP8_OPS = args.supports_fp8_compute
298299
try:
299300
if is_amd():
300301
try:
@@ -308,6 +309,10 @@ def is_amd():
308309
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
309310
if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches
310311
ENABLE_PYTORCH_ATTENTION = True
312+
if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4):
313+
if any((a in arch) for a in ["gfx1201"]): # TODO: more arches
314+
SUPPORT_FP8_OPS = True
315+
311316
except:
312317
pass
313318

@@ -1262,7 +1267,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
12621267
return False
12631268

12641269
def supports_fp8_compute(device=None):
1265-
if args.supports_fp8_compute:
1270+
if SUPPORT_FP8_OPS:
12661271
return True
12671272

12681273
if not is_nvidia():

0 commit comments

Comments
 (0)