Skip to content

Commit daf9d25

Browse files
Cleaner torch version comparisons. (comfyanonymous#8453)
1 parent 3b4b171 commit daf9d25

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

comfy/model_management.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ def is_amd():
305305
logging.info("AMD arch: {}".format(arch))
306306
logging.info("ROCm version: {}".format(rocm_version))
307307
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
308-
if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much
308+
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
309309
if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches
310310
ENABLE_PYTORCH_ATTENTION = True
311311
except:
@@ -328,7 +328,7 @@ def is_amd():
328328
pass
329329

330330
try:
331-
if torch_version_numeric[0] == 2 and torch_version_numeric[1] >= 5:
331+
if torch_version_numeric >= (2, 5):
332332
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
333333
except:
334334
logging.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp")
@@ -1276,11 +1276,11 @@ def supports_fp8_compute(device=None):
12761276
if props.minor < 9:
12771277
return False
12781278

1279-
if torch_version_numeric[0] < 2 or (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 3):
1279+
if torch_version_numeric < (2, 3):
12801280
return False
12811281

12821282
if WINDOWS:
1283-
if (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 4):
1283+
if torch_version_numeric < (2, 4):
12841284
return False
12851285

12861286
return True

0 commit comments

Comments
 (0)