We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 49dd2ce commit 248a403Copy full SHA for 248a403
test/prototype/mx_formats/test_mxfp8_allgather.py
@@ -1,3 +1,4 @@
1
+import pytest
2
import torch
3
import torch.distributed as dist
4
from torch.testing._internal.common_distributed import (
@@ -9,6 +10,11 @@
9
10
11
from torchao.prototype.mx_formats.mx_tensor import MXTensor
12
13
+if not torch.cuda.is_available() or torch.cuda.get_device_capability() < (9, 0):
14
+ pytest.skip(
15
+ "Test Requires CUDA and compute capability >= 9.0", allow_module_level=True
16
+ )
17
+
18
19
@instantiate_parametrized_tests
20
class MXFP8OnDeviceAllGatherTest(MultiProcessTestCase):
0 commit comments