From c9be1aae9de880edcb36e54f3bb7bb9fb08f2f42 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 14:14:24 -0700 Subject: [PATCH 01/15] Move e2e testing out of pt1 Signed-off-by: zjgarvey --- projects/CMakeLists.txt | 5 + projects/{pt1 => e2e}/e2e_testing/main.py | 0 .../{pt1 => e2e}/e2e_testing/xfail_sets.py | 16 +- projects/{pt1 => e2e}/tools/e2e_test.sh | 0 .../torch_mlir_e2e_test/CMakeLists.txt | 0 .../torch_mlir_e2e_test/__init__.py | 0 .../torch_mlir_e2e_test/annotations.py | 0 .../torch_mlir_e2e_test/configs/__init__.py | 0 .../configs/fx_importer_backend.py | 0 .../configs/jit_importer_backend.py | 0 .../configs/lazy_tensor_core.py | 0 .../configs/native_torch.py | 0 .../configs/onnx_backend.py | 1 - .../configs/torchdynamo.py | 0 .../configs/torchscript.py | 0 .../torch_mlir_e2e_test/configs/utils.py | 0 .../torch_mlir_e2e_test/debug/lockstep.py | 0 .../torch_mlir_e2e_test/framework.py | 0 .../linalg_on_tensors_backends/__init__.py | 0 .../linalg_on_tensors_backends/abc.py | 0 .../linalg_on_tensors_backends/refbackend.py | 0 .../torch_mlir_e2e_test/registry.py | 0 .../torch_mlir_e2e_test/reporting.py | 0 .../stablehlo_backends/__init__.py | 0 .../stablehlo_backends/abc.py | 0 .../stablehlo_backends/linalg_on_tensors.py | 0 .../test_suite/__init__.py | 0 .../torch_mlir_e2e_test/test_suite/arange.py | 0 .../test_suite/backprop.py | 0 .../torch_mlir_e2e_test/test_suite/basic.py | 0 .../torch_mlir_e2e_test/test_suite/cast.py | 0 .../test_suite/constant_alloc.py | 0 .../test_suite/control_flow.py | 0 .../torch_mlir_e2e_test/test_suite/conv.py | 1885 ++++++++++ .../test_suite/custom_op_example.py | 0 .../test_suite/diagonal.py | 0 .../test_suite/elementwise.py | 0 .../test_suite/elementwise_comparison.py | 0 .../test_suite/gridsampler.py | 0 .../histogram_binning_calibration.py | 0 .../test_suite/index_select.py | 0 .../test_suite/kl_div_loss.py | 0 .../test_suite/linalg_algorithms.py | 0 .../torch_mlir_e2e_test/test_suite/matmul.py | 0 .../test_suite/meshgrid.py | 0 .../torch_mlir_e2e_test/test_suite/mlp.py | 0 .../test_suite/nll_loss.py | 0 .../test_suite/norm_like.py | 0 .../torch_mlir_e2e_test/test_suite/padding.py | 0 .../torch_mlir_e2e_test/test_suite/pooling.py | 3038 +++++++++++++++++ .../test_suite/quantized_models.py | 0 .../test_suite/reduction.py | 0 .../test_suite/reshape_like.py | 0 .../test_suite/return_types.py | 0 .../torch_mlir_e2e_test/test_suite/rng.py | 0 .../torch_mlir_e2e_test/test_suite/scalar.py | 0 .../test_suite/scalar_comparison.py | 0 .../torch_mlir_e2e_test/test_suite/scatter.py | 0 .../test_suite/slice_like.py | 0 .../test_suite/spectral.py | 0 .../torch_mlir_e2e_test/test_suite/squeeze.py | 0 .../torch_mlir_e2e_test/test_suite/stats.py | 0 .../test_suite/threshold.py | 0 .../torch_mlir_e2e_test/test_suite/timeout.py | 0 .../test_suite/type_conversion.py | 0 .../test_suite/type_promotion.py | 0 .../test_suite/vision_models.py | 0 .../tosa_backends/__init__.py | 0 .../torch_mlir_e2e_test/tosa_backends/abc.py | 0 .../tosa_backends/linalg_on_tensors.py | 0 .../torch_mlir_e2e_test/utils.py | 0 projects/pt1/python/CMakeLists.txt | 1 - projects/pt1/python/torch_mlir/torchscript.py | 9 +- 73 files changed, 4935 insertions(+), 20 deletions(-) rename projects/{pt1 => e2e}/e2e_testing/main.py (100%) rename projects/{pt1 => e2e}/e2e_testing/xfail_sets.py (99%) rename projects/{pt1 => e2e}/tools/e2e_test.sh (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/CMakeLists.txt (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/__init__.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/annotations.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/__init__.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/fx_importer_backend.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/jit_importer_backend.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/lazy_tensor_core.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/native_torch.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/onnx_backend.py (99%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/torchdynamo.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/torchscript.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/configs/utils.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/debug/lockstep.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/framework.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/linalg_on_tensors_backends/__init__.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/linalg_on_tensors_backends/abc.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/registry.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/reporting.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/stablehlo_backends/__init__.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/stablehlo_backends/abc.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/stablehlo_backends/linalg_on_tensors.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/__init__.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/arange.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/backprop.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/basic.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/cast.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/constant_alloc.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/control_flow.py (100%) create mode 100644 projects/e2e/torch_mlir_e2e_test/test_suite/conv.py rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/custom_op_example.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/diagonal.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/elementwise.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/elementwise_comparison.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/gridsampler.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/histogram_binning_calibration.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/index_select.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/kl_div_loss.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/linalg_algorithms.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/matmul.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/meshgrid.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/mlp.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/nll_loss.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/norm_like.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/padding.py (100%) create mode 100644 projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/quantized_models.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/reduction.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/reshape_like.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/return_types.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/rng.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/scalar.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/scalar_comparison.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/scatter.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/slice_like.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/spectral.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/squeeze.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/stats.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/threshold.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/timeout.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/type_conversion.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/type_promotion.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/test_suite/vision_models.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/tosa_backends/__init__.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/tosa_backends/abc.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py (100%) rename projects/{pt1/python => e2e}/torch_mlir_e2e_test/utils.py (100%) diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt index 572c4535b7a5..affd2b3cfc8b 100644 --- a/projects/CMakeLists.txt +++ b/projects/CMakeLists.txt @@ -64,6 +64,11 @@ if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC) message(STATUS "TORCH_LIBRARIES = ${TORCH_LIBRARIES}") endif() +# Include e2e testing infra. +if(NOT TORCH_MLIR_ENABLE_ONLY_MLIR_PYTHON_BINDINGS) + add_subdirectory(torch_mlir_e2e_test) +endif() + # Include jit_ir_common if the jit_ir importer or LTC is enabled, # since they both require it. if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC) diff --git a/projects/pt1/e2e_testing/main.py b/projects/e2e/e2e_testing/main.py similarity index 100% rename from projects/pt1/e2e_testing/main.py rename to projects/e2e/e2e_testing/main.py diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/e2e/e2e_testing/xfail_sets.py similarity index 99% rename from projects/pt1/e2e_testing/xfail_sets.py rename to projects/e2e/e2e_testing/xfail_sets.py index f494bc8574e6..e4a2e319d7fe 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/e2e/e2e_testing/xfail_sets.py @@ -497,6 +497,7 @@ "CrossEntropyLossModule_basic", "CrossEntropyLossNoReductionModule_basic", "IsInfiniteModule_basic", + "InterpolateDynamicModule_sizes_nearest", "IouOfModule_basic", "MeshgridIndexingIJ_basic", "MeshgridIndexingXY_basic", @@ -914,12 +915,8 @@ "TraceUnsignedIntModule_empty", "UnsafeIndexPutHackedTwin1DFloatNonAccumulateModule_basic", "UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic", - "UpSampleNearest1dVecNoneScales_basic", - "UpSampleNearest1dVecNoneShape_basic", "UpSampleNearest2dBackwardScalesNone_basic", "UpSampleNearest2dBackward_basic", - "UpSampleNearest2dVecNoneScales_basic", - "UpSampleNearest2dVecNoneShape_basic", "ViewCollapseDynamicWithAtenSizeIntModule_basic", "ViewSizeFromOtherTensor_basic", # Error: `aten.as_strided` op is not supported @@ -3044,8 +3041,6 @@ "LogCumsumExpModule_basic", "LogCumsumExpStaticNegativeDimModule_basic", "LogCumsumExpStaticFloat64DtypeModule_basic", - "MaxPool1dWithIndicesModule_basic", - "MaxPool1dWithIndicesCeilModeModule_basic", "MaxPool1dCeilModeTrueModule_basic", "MaxPool1dModule_basic", "MaxPool2dCeilModeTrueModule_basic", @@ -3808,8 +3803,6 @@ "LogCumsumExpStaticNegativeDimModule_basic", "LogCumsumExpStaticFloat64DtypeModule_basic", "MaskedScatterStaticBasic_basic", - "MaxPool1dWithIndicesModule_basic", - "MaxPool1dWithIndicesCeilModeModule_basic", "MaxPool1dCeilModeTrueModule_basic", "MaxPool1dModule_basic", "MaxPool2dCeilModeTrueModule_basic", @@ -3963,13 +3956,8 @@ "TransposedConv2dNegativePadding_basic", "TransposedConv3dNegativePadding_basic", "UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic", - "InterpolateDynamicModule_sizes_nearest", - "UpSampleNearest1dVecNoneScales_basic", - "UpSampleNearest1dVecNoneShape_basic", "UpSampleNearest2dBackwardScalesNone_basic", "UpSampleNearest2dBackward_basic", - "UpSampleNearest2dVecNoneScales_basic", - "UpSampleNearest2dVecNoneShape_basic", "ViewCollapseDynamicWithAtenSizeIntModule_basic", "ViewSizeFromOtherTensor_basic", "VisionTransformerModule_basic", @@ -4668,8 +4656,6 @@ "Matmul_4d", "Matmul_matvec", "Matmul_vecmat", - "MaxPool1dWithIndicesModule_basic", - "MaxPool1dWithIndicesCeilModeModule_basic", "MaxPool1dCeilModeTrueModule_basic", "MaxPool1dModule_basic", "MaxPool2dCeilModeTrueModule_basic", diff --git a/projects/pt1/tools/e2e_test.sh b/projects/e2e/tools/e2e_test.sh similarity index 100% rename from projects/pt1/tools/e2e_test.sh rename to projects/e2e/tools/e2e_test.sh diff --git a/projects/pt1/python/torch_mlir_e2e_test/CMakeLists.txt b/projects/e2e/torch_mlir_e2e_test/CMakeLists.txt similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/CMakeLists.txt rename to projects/e2e/torch_mlir_e2e_test/CMakeLists.txt diff --git a/projects/pt1/python/torch_mlir_e2e_test/__init__.py b/projects/e2e/torch_mlir_e2e_test/__init__.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/__init__.py rename to projects/e2e/torch_mlir_e2e_test/__init__.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/annotations.py b/projects/e2e/torch_mlir_e2e_test/annotations.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/annotations.py rename to projects/e2e/torch_mlir_e2e_test/annotations.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/__init__.py b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/__init__.py rename to projects/e2e/torch_mlir_e2e_test/configs/__init__.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/fx_importer_backend.py b/projects/e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/fx_importer_backend.py rename to projects/e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/jit_importer_backend.py b/projects/e2e/torch_mlir_e2e_test/configs/jit_importer_backend.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/jit_importer_backend.py rename to projects/e2e/torch_mlir_e2e_test/configs/jit_importer_backend.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/lazy_tensor_core.py b/projects/e2e/torch_mlir_e2e_test/configs/lazy_tensor_core.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/lazy_tensor_core.py rename to projects/e2e/torch_mlir_e2e_test/configs/lazy_tensor_core.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/native_torch.py b/projects/e2e/torch_mlir_e2e_test/configs/native_torch.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/native_torch.py rename to projects/e2e/torch_mlir_e2e_test/configs/native_torch.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/onnx_backend.py b/projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py similarity index 99% rename from projects/pt1/python/torch_mlir_e2e_test/configs/onnx_backend.py rename to projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py index 5461dc04c0d1..207b8745d78e 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/configs/onnx_backend.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py @@ -10,7 +10,6 @@ import onnx import torch from torch.onnx._constants import ONNX_TORCHSCRIPT_EXPORTER_MAX_OPSET as max_opset_ver -import torch_mlir from torch_mlir_e2e_test.framework import TestConfig, Trace, TraceItem from torch_mlir_e2e_test.utils import convert_annotations_to_placeholders diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/torchdynamo.py b/projects/e2e/torch_mlir_e2e_test/configs/torchdynamo.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/torchdynamo.py rename to projects/e2e/torch_mlir_e2e_test/configs/torchdynamo.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/torchscript.py b/projects/e2e/torch_mlir_e2e_test/configs/torchscript.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/torchscript.py rename to projects/e2e/torch_mlir_e2e_test/configs/torchscript.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/configs/utils.py b/projects/e2e/torch_mlir_e2e_test/configs/utils.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/configs/utils.py rename to projects/e2e/torch_mlir_e2e_test/configs/utils.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/debug/lockstep.py b/projects/e2e/torch_mlir_e2e_test/debug/lockstep.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/debug/lockstep.py rename to projects/e2e/torch_mlir_e2e_test/debug/lockstep.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/framework.py b/projects/e2e/torch_mlir_e2e_test/framework.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/framework.py rename to projects/e2e/torch_mlir_e2e_test/framework.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/__init__.py b/projects/e2e/torch_mlir_e2e_test/linalg_on_tensors_backends/__init__.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/__init__.py rename to projects/e2e/torch_mlir_e2e_test/linalg_on_tensors_backends/__init__.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/abc.py b/projects/e2e/torch_mlir_e2e_test/linalg_on_tensors_backends/abc.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/abc.py rename to projects/e2e/torch_mlir_e2e_test/linalg_on_tensors_backends/abc.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py b/projects/e2e/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py rename to projects/e2e/torch_mlir_e2e_test/linalg_on_tensors_backends/refbackend.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/registry.py b/projects/e2e/torch_mlir_e2e_test/registry.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/registry.py rename to projects/e2e/torch_mlir_e2e_test/registry.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/reporting.py b/projects/e2e/torch_mlir_e2e_test/reporting.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/reporting.py rename to projects/e2e/torch_mlir_e2e_test/reporting.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/stablehlo_backends/__init__.py b/projects/e2e/torch_mlir_e2e_test/stablehlo_backends/__init__.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/stablehlo_backends/__init__.py rename to projects/e2e/torch_mlir_e2e_test/stablehlo_backends/__init__.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/stablehlo_backends/abc.py b/projects/e2e/torch_mlir_e2e_test/stablehlo_backends/abc.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/stablehlo_backends/abc.py rename to projects/e2e/torch_mlir_e2e_test/stablehlo_backends/abc.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/stablehlo_backends/linalg_on_tensors.py b/projects/e2e/torch_mlir_e2e_test/stablehlo_backends/linalg_on_tensors.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/stablehlo_backends/linalg_on_tensors.py rename to projects/e2e/torch_mlir_e2e_test/stablehlo_backends/linalg_on_tensors.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/__init__.py b/projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/__init__.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/arange.py b/projects/e2e/torch_mlir_e2e_test/test_suite/arange.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/arange.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/arange.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py b/projects/e2e/torch_mlir_e2e_test/test_suite/backprop.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/backprop.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/basic.py b/projects/e2e/torch_mlir_e2e_test/test_suite/basic.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/basic.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/basic.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/cast.py b/projects/e2e/torch_mlir_e2e_test/test_suite/cast.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/cast.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/cast.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/constant_alloc.py b/projects/e2e/torch_mlir_e2e_test/test_suite/constant_alloc.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/constant_alloc.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/constant_alloc.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/control_flow.py b/projects/e2e/torch_mlir_e2e_test/test_suite/control_flow.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/control_flow.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/control_flow.py diff --git a/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py b/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py new file mode 100644 index 000000000000..2ec87b9fee43 --- /dev/null +++ b/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py @@ -0,0 +1,1885 @@ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# Also available under a BSD-style license. See LICENSE. + +import torch +from torch_mlir_e2e_test.framework import TestUtils +from torch_mlir_e2e_test.registry import register_test_case +from torch_mlir_e2e_test.annotations import annotate_args, export + +# ============================================================================== + + +class Conv2dNoPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=False) + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dNoPaddingModule()) +def Conv2dNoPaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +class Conv2dBiasNoPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=True) + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dBiasNoPaddingModule()) +def Conv2dBiasNoPaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +class Conv2dWithPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding=3) + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dWithPaddingModule()) +def Conv2dWithPaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +class Conv2dWithPaddingDilationStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d( + in_channels=2, + out_channels=10, + kernel_size=3, + padding=3, + stride=2, + dilation=3, + bias=False, + ) + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dWithPaddingDilationStrideModule()) +def Conv2dWithPaddingDilationStrideModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +class Conv2dWithPaddingDilationStrideStaticModule(torch.nn.Module): + def __init__(self, out_channels, groups): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d( + in_channels=4, + out_channels=out_channels, + kernel_size=3, + padding=3, + stride=2, + dilation=3, + bias=False, + groups=groups, + ) + self.train(False) + + @export + @annotate_args( + [ + None, + ([5, 4, 10, 20], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case( + module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( + out_channels=10, groups=1 + ) +) +def Conv2dWithPaddingDilationStrideStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 4, 10, 20)) + + +@register_test_case( + module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( + out_channels=4, groups=4 + ) +) +def Conv2dWithPaddingDilationStrideStaticModule_depthwise(module, tu: TestUtils): + module.forward(tu.rand(5, 4, 10, 20)) + + +@register_test_case( + module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( + out_channels=8, groups=4 + ) +) +def Conv2dWithPaddingDilationStrideStaticModule_depthwise_multiplier( + module, tu: TestUtils +): + module.forward(tu.rand(5, 4, 10, 20)) + + +@register_test_case( + module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( + out_channels=4, groups=2 + ) +) +def Conv2dWithPaddingDilationStrideStaticModule_grouped(module, tu: TestUtils): + module.forward(tu.rand(5, 4, 10, 20)) + + +@register_test_case( + module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( + out_channels=8, groups=2 + ) +) +def Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier( + module, tu: TestUtils +): + module.forward(tu.rand(5, 4, 10, 20)) + + +class Conv2dWithSamePaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding="same") + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dWithSamePaddingModule()) +def Conv2dWithSamePaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +class Conv2dWithValidPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding="valid") + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dWithValidPaddingModule()) +def Conv2dWithValidPaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +# ============================================================================== + + +class Convolution2DModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Convolution2DModule()) +def Convolution2DModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class Convolution2DStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([3, 3, 10, 10], torch.float32, True), + ([3, 3, 2, 2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Convolution2DStaticModule()) +def Convolution2DStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class Convolution2DStridedModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Convolution2DStridedModule()) +def Convolution2DStridedModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _Convolution2DAllFalseModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=False, + cudnn_enabled=False, + allow_tf32=False, + ) + + +@register_test_case(module_factory=lambda: _Convolution2DAllFalseModule()) +def _Convolution2DAllFalseModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _Convolution2DBenchmarkModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=True, + deterministic=False, + cudnn_enabled=False, + allow_tf32=False, + ) + + +@register_test_case(module_factory=lambda: _Convolution2DBenchmarkModule()) +def _Convolution2DBenchmarkModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _Convolution2DDeterministicModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=True, + cudnn_enabled=False, + allow_tf32=False, + ) + + +@register_test_case(module_factory=lambda: _Convolution2DDeterministicModule()) +def _Convolution2DDeterministicModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _Convolution2DCudnnModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=False, + cudnn_enabled=True, + allow_tf32=False, + ) + + +@register_test_case(module_factory=lambda: _Convolution2DCudnnModule()) +def _Convolution2DCudnnModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _Convolution2DTF32Module(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=False, + cudnn_enabled=False, + allow_tf32=True, + ) + + +@register_test_case(module_factory=lambda: _Convolution2DTF32Module()) +def _Convolution2DTF32Module_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _ConvolutionDeprecated2DAllFalseModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=False, + cudnn_enabled=False, + ) + + +@register_test_case(module_factory=lambda: _ConvolutionDeprecated2DAllFalseModule()) +def _ConvolutionDeprecated2DAllFalseModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _ConvolutionDeprecated2DBenchmarkModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=True, + deterministic=False, + cudnn_enabled=False, + ) + + +@register_test_case(module_factory=lambda: _ConvolutionDeprecated2DBenchmarkModule()) +def _ConvolutionDeprecated2DBenchmarkModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _ConvolutionDeprecated2DDeterministicModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=True, + cudnn_enabled=False, + ) + + +@register_test_case( + module_factory=lambda: _ConvolutionDeprecated2DDeterministicModule() +) +def _ConvolutionDeprecated2DDeterministicModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class _ConvolutionDeprecated2DCudnnModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten._convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=1, + benchmark=False, + deterministic=False, + cudnn_enabled=True, + ) + + +@register_test_case(module_factory=lambda: _ConvolutionDeprecated2DCudnnModule()) +def _ConvolutionDeprecated2DCudnnModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) + + +class ConvolutionModule2DGroups(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[3, 3], + padding=[2, 2], + dilation=[1, 1], + transposed=False, + output_padding=[0, 0], + groups=4, + ) + + +@register_test_case(module_factory=lambda: ConvolutionModule2DGroups()) +def ConvolutionModule2DGroups_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 32, 4, 4), tu.rand(32, 8, 3, 3)) + + +# ============================================================================== + + +class ConvolutionModule2DTranspose(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[1, 1], + padding=[1, 1], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: ConvolutionModule2DTranspose()) +def ConvolutionModule2DTranspose_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 4, 4), tu.rand(3, 3, 2, 2)) + + +class ConvolutionModule2DTransposeStrided(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: ConvolutionModule2DTransposeStrided()) +def ConvolutionModule2DTransposeStrided_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) + + +class ConvolutionModule2DTransposeStridedStatic(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([5, 2, 5, 6], torch.float32, True), + ([2, 5, 2, 2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: ConvolutionModule2DTransposeStridedStatic()) +def ConvolutionModule2DTransposeStridedStatic_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) + + +class ConvolutionModule2DTransposeNonUnitOutputPadding(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=None, + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + transposed=True, + output_padding=[1, 1], + groups=1, + ) + + +@register_test_case( + module_factory=lambda: ConvolutionModule2DTransposeNonUnitOutputPadding() +) +def ConvolutionModule2DTransposeNonUnitOutputPadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 2, 4, 4), tu.rand(2, 2, 3, 3)) + + +class Conv_Transpose1dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv_transpose1d( + inputVec, + weight, + bias=None, + stride=[2], + padding=[1], + dilation=[1], + output_padding=[0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv_Transpose1dModule()) +def Conv_Transpose1dModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 6), tu.rand(2, 5, 2)) + + +class Conv_Transpose1dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([5, 2, 6], torch.float32, True), + ([2, 5, 2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv_transpose1d( + inputVec, + weight, + bias=None, + stride=[2], + padding=[1], + dilation=[1], + output_padding=[0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv_Transpose1dStaticModule()) +def Conv_Transpose1dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 6), tu.rand(2, 5, 2)) + + +class Conv_Transpose2dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv_transpose2d( + inputVec, + weight, + bias=None, + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv_Transpose2dModule()) +def Conv_Transpose2dModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) + + +class Conv_Transpose2dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([5, 2, 5, 6], torch.float32, True), + ([2, 5, 2, 2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv_transpose2d( + inputVec, + weight, + bias=None, + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv_Transpose2dStaticModule()) +def Conv_Transpose2dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) + + +class Conv_Transpose3dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv_transpose3d( + inputVec, + weight, + bias=None, + stride=[2, 2, 2], + padding=[1, 1, 1], + dilation=[1, 1, 1], + output_padding=[0, 0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv_Transpose3dModule()) +def Conv_Transpose3dModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 5, 6, 7), tu.rand(2, 5, 2, 2, 2)) + + +class Conv_Transpose3dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([5, 2, 5, 6, 7], torch.float32, True), + ([2, 5, 2, 2, 2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv_transpose3d( + inputVec, + weight, + bias=None, + stride=[2, 2, 2], + padding=[1, 1, 1], + dilation=[1, 1, 1], + output_padding=[0, 0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv_Transpose3dStaticModule()) +def Conv_Transpose3dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 2, 5, 6, 7), tu.rand(2, 5, 2, 2, 2)) + + +class UpSampleNearest2d(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float64, True), + ] + ) + def forward(self, input): + return torch.ops.aten.upsample_nearest2d( + input, output_size=[18, 48], scales_h=3.0, scales_w=4.0 + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2d()) +def UpSampleNearest2d_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 6, 12).to(torch.float64)) + + +class UpSampleNearest2dSameSize(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec): + return torch._C._nn.upsample_nearest2d( + inputVec, output_size=[11, 11], scales_h=None, scales_w=None + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2dSameSize()) +def UpSampleNearest2dStaticSize_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4)) + + +class UpSampleNearest2dDiffSize(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, inputVec): + return torch._C._nn.upsample_nearest2d( + inputVec, output_size=[8, 11], scales_h=None, scales_w=None + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2dDiffSize()) +def UpSampleNearest2dDynamicSize_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 3, 2, 2)) + + +class UpSampleNearest2dDiffFactor(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, inputVec): + return torch._C._nn.upsample_nearest2d( + inputVec, output_size=[6, 10], scales_h=2.3, scales_w=4.7 + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2dDiffFactor()) +def UpSampleNearest2dDynamicFactor_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 3, 2, 2)) + + +class UpSampleNearest2dSameFactor(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, inputVec): + return torch._C._nn.upsample_nearest2d( + inputVec, output_size=[8, 8], scales_h=2.0, scales_w=2.0 + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2dSameFactor()) +def UpSampleNearest2dStaticFactor_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 3, 4, 4)) + + +class Conv1dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv1d( + inputVec, weight, bias=bias, stride=[1], padding=[0], dilation=[1], groups=1 + ) + + +@register_test_case(module_factory=lambda: Conv1dModule()) +def Conv1dModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6) + weight = torch.randn(8, 2, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv1dDepthwiseWithPaddingDilationStrideStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([2, 4, 6], torch.float32, True), + ([4, 1, 3], torch.float32, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv1d( + inputVec, weight, bias=None, stride=[1], padding=[4], dilation=[1], groups=4 + ) + + +@register_test_case( + module_factory=lambda: Conv1dDepthwiseWithPaddingDilationStrideStaticModule() +) +def Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 4, 6) + weight = torch.randn(4, 1, 3) + module.forward(inputVec, weight) + + +class Conv1dWithSamePaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv1d(2, 10, 3, bias=False, padding="same") + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv1dWithSamePaddingModule()) +def Conv1dWithSamePaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10) + module.forward(t) + + +class Conv1dWithValidPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv1d( + inputVec, + weight, + bias=bias, + stride=[1], + padding="valid", + dilation=[1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv1dWithValidPaddingModule()) +def Conv1dWithValidPaddingModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6) + weight = torch.randn(8, 2, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv1dGroupModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv1d( + inputVec, weight, bias=bias, stride=[1], padding=[0], dilation=[1], groups=2 + ) + + +@register_test_case(module_factory=lambda: Conv1dGroupModule()) +def Conv1dGroupModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 4, 6) + weight = torch.randn(8, 2, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv2dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv2d( + inputVec, + weight, + bias=bias, + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv2dModule()) +def Conv2dModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6) + weight = torch.randn(8, 2, 3, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv2dFP16NoBiasModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float16, True), + ([-1, -1, -1, -1], torch.float16, True), + ] + ) + def forward(self, inputVec, weight): + return torch.ops.aten.conv2d( + inputVec, + weight, + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv2dFP16NoBiasModule()) +def Conv2dFP16NoBiasModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6).to(torch.float16) + weight = torch.randn(8, 2, 3, 3).to(torch.float16) + module.forward(inputVec, weight) + + +class Conv3dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv3d( + inputVec, + weight, + bias=bias, + stride=[1, 1, 1], + padding=[0, 0, 0], + dilation=[1, 1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv3dModule()) +def Conv3dModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6, 6) + weight = torch.randn(8, 2, 3, 3, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv3dWithSamePaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv3d( + inputVec, + weight, + bias=bias, + stride=[1, 1, 1], + padding="same", + dilation=[1, 1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv3dWithSamePaddingModule()) +def Conv3dWithSamePaddingModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6, 6) + weight = torch.randn(8, 2, 3, 3, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv3dWithValidPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv3d( + inputVec, + weight, + bias=bias, + stride=[1, 1, 1], + padding="valid", + dilation=[1, 1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv3dWithValidPaddingModule()) +def Conv3dWithValidPaddingModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6, 6) + weight = torch.randn(8, 2, 3, 3, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class ConvTbcModule(torch.nn.Module): + def __init__(self): + super().__init__() + + # shapes from https://github.com/pytorch/pytorch/blob/3e8c8ce37bbfaafa8581fb48506c0a70ea54463d/test/nn/test_convolution.py#L623 + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, x, weight, bias): + return torch.conv_tbc(x, weight, bias) + + +@register_test_case(module_factory=lambda: ConvTbcModule()) +def ConvTbcModule_basic(module, tu: TestUtils): + module.forward(tu.rand(9, 4, 5), tu.rand(3, 5, 6), tu.rand(6)) + + +# For DQ-Q fake quantization ops +import torch.ao.quantization.fx._decomposed + + +class Conv2dQInt8ModuleBase(torch.nn.Module): + def __init__(self, groups=1): + self.groups = groups + super().__init__() + + def _forward(self, input, weight, bias): + input = torch.ops.quantized_decomposed.dequantize_per_tensor.default( + input, 0.01, 7, -128, 127, torch.int8 + ) + weight = torch.ops.quantized_decomposed.dequantize_per_tensor.default( + weight, 0.01, 3, -128, 127, torch.int8 + ) + bias = torch.ops.quantized_decomposed.dequantize_per_tensor.default( + bias, 1, 0, -1000, 1000, torch.int32 + ) + + conv = torch.ops.aten.conv2d( + input, + weight, + bias=bias, + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + groups=self.groups, + ) + + # Use int32 to avoid overflows + return torch.ops.quantized_decomposed.quantize_per_tensor.default( + conv, 1, 0, -(2**31), 2**31 - 1, torch.int32 + ) + + +class Conv2dQInt8ModuleDyn(Conv2dQInt8ModuleBase): + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.int8, True), + ([-1, -1, -1, -1], torch.int8, True), + ([-1], torch.int32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return self._forward(inputVec, weight, bias) + + +class Conv2dQInt8ModuleStatic(Conv2dQInt8ModuleBase): + @export + @annotate_args( + [ + None, + ([2, 3, 12, 12], torch.int8, True), + ([3, 1, 5, 3], torch.int8, True), + ([3], torch.int32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return self._forward(inputVec, weight, bias) + + +class Conv2dQInt8ModuleStatic_MoreOutChannels(Conv2dQInt8ModuleBase): + @export + @annotate_args( + [ + None, + ([2, 3, 12, 12], torch.int8, True), + ([6, 1, 5, 3], torch.int8, True), + ([6], torch.int32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return self._forward(inputVec, weight, bias) + + +@register_test_case(module_factory=lambda: Conv2dQInt8ModuleDyn()) +def Conv2dQInt8Module_basic(module, tu: TestUtils): + inputVec = tu.randint(2, 4, 7, 8, low=-128, high=127).to(torch.int8) + weight = tu.randint(3, 4, 3, 2, low=-128, high=127).to(torch.int8) + bias = tu.randint(3, low=-1000, high=1000).to(torch.int32) + module.forward(inputVec, weight, bias) + + +@register_test_case(module_factory=lambda: Conv2dQInt8ModuleDyn(groups=2)) +def Conv2dQInt8Module_grouped(module, tu: TestUtils): + inputVec = tu.randint(2, 8, 7, 8, low=-128, high=127).to(torch.int8) + weight = tu.randint(6, 4, 3, 2, low=-128, high=127).to(torch.int8) + bias = tu.randint(6, low=-1000, high=1000).to(torch.int32) + module.forward(inputVec, weight, bias) + + +@register_test_case(module_factory=lambda: Conv2dQInt8ModuleStatic(groups=3)) +def Conv2dQInt8Module_depthwise(module, tu: TestUtils): + inputVec = tu.randint(2, 3, 12, 12, low=-128, high=127).to(torch.int8) + weight = tu.randint(3, 1, 5, 3, low=-128, high=127).to(torch.int8) + bias = tu.randint(3, low=-1000, high=1000).to(torch.int32) + module.forward(inputVec, weight, bias) + + +@register_test_case( + module_factory=lambda: Conv2dQInt8ModuleStatic_MoreOutChannels(groups=3) +) +def Conv2dQInt8Module_not_depthwise(module, tu: TestUtils): + inputVec = tu.randint(2, 3, 12, 12, low=-128, high=127).to(torch.int8) + weight = tu.randint(6, 1, 5, 3, low=-128, high=127).to(torch.int8) + bias = tu.randint(6, low=-1000, high=1000).to(torch.int32) + module.forward(inputVec, weight, bias) + + +class ConvTranspose2DQInt8Module(torch.nn.Module): + + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.int8, True), + ([-1, -1, -1, -1], torch.int8, True), + ([-1], torch.float, True), + ] + ) + def forward(self, input, weight, bias): + input = torch.ops.quantized_decomposed.dequantize_per_tensor.default( + input, 0.01, -25, -128, 127, torch.int8 + ) + weight = torch.ops.quantized_decomposed.dequantize_per_tensor.default( + weight, 0.01, 50, -128, 127, torch.int8 + ) + + res = torch.ops.aten.convolution( + input, + weight, + bias=bias, + stride=[2, 1], + padding=[1, 1], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=1, + ) + + # Use int32 to avoid overflows + return torch.ops.quantized_decomposed.quantize_per_tensor.default( + res, 1, 0, -(2**31), 2**31 - 1, torch.int32 + ) + + +@register_test_case(module_factory=lambda: ConvTranspose2DQInt8Module()) +def ConvTranspose2DQInt8_basic(module, tu: TestUtils): + N = 10 + Cin = 5 + Cout = 7 + Hin = 10 + Win = 8 + Hker = 3 + Wker = 2 + module.forward( + tu.randint(N, Cin, Hin, Win, low=-128, high=127).to(torch.int8), + tu.randint(Cin, Cout, Hker, Wker, low=-128, high=127).to(torch.int8), + torch.rand(Cout), + ) + + +class Conv2dQInt8PerChannelModuleBase(torch.nn.Module): + def __init__(self, groups=1): + self.groups = groups + super().__init__() + + def _forward(self, inputVec, weight, scales, zeropoints, bias): + inputVec = torch.ops.quantized_decomposed.dequantize_per_tensor.default( + inputVec, 0.01, 7, -128, 127, torch.int8 + ) + weight = torch.ops.quantized_decomposed.dequantize_per_channel.default( + weight, scales, zeropoints, 0, -128, 127, torch.int8 + ) + + conv = torch.ops.aten.conv2d( + inputVec, + weight, + bias=bias, + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + groups=self.groups, + ) + + # Use int32 to avoid overflows + return torch.ops.quantized_decomposed.quantize_per_tensor.default( + conv, 1, 0, -(2**31), 2**31 - 1, torch.int32 + ) + + +class Conv2dQInt8PerChannelModuleDyn(Conv2dQInt8PerChannelModuleBase): + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.int8, True), + ([-1, -1, -1, -1], torch.int8, True), + ([-1], torch.float, True), + ([-1], torch.int8, True), + ([-1], torch.float, True), + ] + ) + def forward(self, inputVec, weight, scales, zeropoints, bias): + return self._forward(inputVec, weight, scales, zeropoints, bias) + + +class Conv2dQInt8PerChannelModuleStatic(Conv2dQInt8PerChannelModuleBase): + @export + @annotate_args( + [ + None, + ([2, 3, 12, 12], torch.int8, True), + ([3, 1, 5, 3], torch.int8, True), + ([3], torch.float, True), + ([3], torch.int8, True), + ([3], torch.float, True), + ] + ) + def forward(self, inputVec, weight, scales, zeropoints, bias): + return self._forward(inputVec, weight, scales, zeropoints, bias) + + +@register_test_case(module_factory=lambda: Conv2dQInt8PerChannelModuleDyn()) +def Conv2dQInt8PerChannelModule_basic(module, tu: TestUtils): + inputVec = tu.randint(2, 4, 7, 8, low=-128, high=127).to(torch.int8) + weight = tu.randint(3, 4, 3, 2, low=-128, high=127).to(torch.int8) + scales = tu.rand(3) + zeropoints = tu.rand(3).to(torch.int8) + bias = torch.rand(3) + module.forward(inputVec, weight, scales, zeropoints, bias) + + +@register_test_case(module_factory=lambda: Conv2dQInt8PerChannelModuleDyn(groups=2)) +def Conv2dQInt8PerChannelModule_grouped(module, tu: TestUtils): + inputVec = tu.randint(2, 8, 7, 8, low=-128, high=127).to(torch.int8) + weight = tu.randint(6, 4, 3, 2, low=-128, high=127).to(torch.int8) + scales = tu.rand(6) + zeropoints = tu.rand(6).to(torch.int8) + bias = torch.rand(6) + module.forward(inputVec, weight, scales, zeropoints, bias) + + +@register_test_case(module_factory=lambda: Conv2dQInt8PerChannelModuleStatic(groups=3)) +def Conv2dQInt8PerChannelModule_depthwise(module, tu: TestUtils): + inputVec = tu.randint(2, 3, 12, 12, low=-128, high=127).to(torch.int8) + weight = tu.randint(3, 1, 5, 3, low=-128, high=127).to(torch.int8) + scales = tu.rand(3) + zeropoints = tu.rand(3).to(torch.int8) + bias = torch.rand(3) + module.forward(inputVec, weight, scales, zeropoints, bias) + + +# torchvision.deform_conv2d + +import torchvision + +# This section defines a torch->onnx path for this torchvision op so we can test the onnx paths e2e. + +# Create symbolic function +from torch.onnx.symbolic_helper import parse_args, _get_tensor_sizes + + +@parse_args("v", "v", "v", "v", "v", "i", "i", "i", "i", "i", "i", "i", "i", "b") +def symbolic_deform_conv2d_forward( + g, + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask, +): + args = [input, weight, offset, bias] + if use_mask: + args.append(mask) + weight_size = _get_tensor_sizes(weight) + kwargs = { + "dilations_i": [dilation_h, dilation_w], + "group_i": groups, + "kernel_shape_i": weight_size[2:], + "offset_group_i": offset_groups, + # NB: ONNX supports asymmetric padding, whereas PyTorch supports only + # symmetric padding + "pads_i": [pad_h, pad_w, pad_h, pad_w], + "strides_i": [stride_h, stride_w], + } + return g.op("DeformConv", *args, **kwargs) + + +# Register symbolic function +from torch.onnx import register_custom_op_symbolic + +register_custom_op_symbolic( + "torchvision::deform_conv2d", symbolic_deform_conv2d_forward, 19 +) + +N = 1 +Cin = 1 +Hin = 7 +Win = 6 +Cout = 1 +Hker = 2 +Wker = 2 +offset_groups = 1 +Hout = 6 +Wout = 5 +offset_dim1 = 2 * offset_groups * Hker * Wker + + +class DeformableConvModule(torch.nn.Module): + @export + @annotate_args( + [ + None, + ([N, Cin, Hin, Win], torch.float32, True), + ([N, offset_dim1, Hout, Wout], torch.float32, True), + ([Cout, Cin, Hker, Wker], torch.float32, True), + ] + ) + def forward(self, input, offset, weight): + return torchvision.ops.deform_conv2d(input, offset, weight) + + +@register_test_case(module_factory=lambda: DeformableConvModule()) +def DeformConv2D_basic(module, tu: TestUtils): + input = tu.rand(N, Cin, Hin, Win) + offset = tu.rand(N, offset_dim1, Hout, Wout) + weight = tu.rand(Cout, Cin, Hker, Wker) + module.forward(input, offset, weight) + + +class ConvolutionModule2DGroupedTranspose(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 2, 5, 7], torch.float32, True), + ([2, 2, 3, 3], torch.float32, True), + ([4], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=2, + ) + + +@register_test_case(module_factory=lambda: ConvolutionModule2DGroupedTranspose()) +def ConvolutionModule2DGroupedTranspose_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 2, 5, 7), tu.rand(2, 2, 3, 3), tu.rand(4)) + + +class TransposedConv1dNegativePadding(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 7], torch.float32, True), + ([1, 2, 3], torch.float32, True), + ([2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[1], + padding=[3], + dilation=[1], + transposed=True, + output_padding=[0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: TransposedConv1dNegativePadding()) +def TransposedConv1dNegativePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) + + +class TransposedConv2dNegativePadding(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 7], torch.float32, True), + ([1, 2, 3, 3], torch.float32, True), + ([2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[1, 1], + padding=[0, 3], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: TransposedConv2dNegativePadding()) +def TransposedConv2dNegativePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 7), tu.rand(1, 2, 3, 3), tu.rand(2)) + + +class TransposedConv3dNegativePadding(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([4, 1, 8, 13, 17], torch.float32, True), + ([1, 1, 3, 7, 3], torch.float32, True), + ([1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[1, 1, 1], + padding=[2, 1, 3], + dilation=[1, 1, 1], + transposed=True, + output_padding=[0, 0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: TransposedConv3dNegativePadding()) +def TransposedConv3dNegativePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(4, 1, 8, 13, 17), tu.rand(1, 1, 3, 7, 3), tu.rand(1)) diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/custom_op_example.py b/projects/e2e/torch_mlir_e2e_test/test_suite/custom_op_example.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/custom_op_example.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/custom_op_example.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/diagonal.py b/projects/e2e/torch_mlir_e2e_test/test_suite/diagonal.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/diagonal.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/diagonal.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py b/projects/e2e/torch_mlir_e2e_test/test_suite/elementwise.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/elementwise.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise_comparison.py b/projects/e2e/torch_mlir_e2e_test/test_suite/elementwise_comparison.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise_comparison.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/elementwise_comparison.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/gridsampler.py b/projects/e2e/torch_mlir_e2e_test/test_suite/gridsampler.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/gridsampler.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/gridsampler.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/histogram_binning_calibration.py b/projects/e2e/torch_mlir_e2e_test/test_suite/histogram_binning_calibration.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/histogram_binning_calibration.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/histogram_binning_calibration.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/index_select.py b/projects/e2e/torch_mlir_e2e_test/test_suite/index_select.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/index_select.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/index_select.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/kl_div_loss.py b/projects/e2e/torch_mlir_e2e_test/test_suite/kl_div_loss.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/kl_div_loss.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/kl_div_loss.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/linalg_algorithms.py b/projects/e2e/torch_mlir_e2e_test/test_suite/linalg_algorithms.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/linalg_algorithms.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/linalg_algorithms.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/matmul.py b/projects/e2e/torch_mlir_e2e_test/test_suite/matmul.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/matmul.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/matmul.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/meshgrid.py b/projects/e2e/torch_mlir_e2e_test/test_suite/meshgrid.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/meshgrid.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/meshgrid.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/mlp.py b/projects/e2e/torch_mlir_e2e_test/test_suite/mlp.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/mlp.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/mlp.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/nll_loss.py b/projects/e2e/torch_mlir_e2e_test/test_suite/nll_loss.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/nll_loss.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/nll_loss.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/norm_like.py b/projects/e2e/torch_mlir_e2e_test/test_suite/norm_like.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/norm_like.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/norm_like.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/padding.py b/projects/e2e/torch_mlir_e2e_test/test_suite/padding.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/padding.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/padding.py diff --git a/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py b/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py new file mode 100644 index 000000000000..bb7f386f3708 --- /dev/null +++ b/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py @@ -0,0 +1,3038 @@ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# Also available under a BSD-style license. See LICENSE. + +import torch + +from torch_mlir_e2e_test.framework import TestUtils +from torch_mlir_e2e_test.registry import register_test_case +from torch_mlir_e2e_test.annotations import annotate_args, export + +# ============================================================================== + + +class AdaptiveAvgPool2dNonUnitOutputSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((7, 7)) + + @export + @annotate_args( + [ + None, + ([1, 512, 7, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dNonUnitOutputSizeStaticModule() +) +def AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7, 7)) + + +class AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((7, 7)) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule() +) +def AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7, 7)) + + +class AdaptiveAvgPool2dOutputSizeDivisibleByInputDynamicModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((5, 7)) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dOutputSizeDivisibleByInputDynamicModule() +) +def AdaptiveAvgPool2dOutputSizeDivisibleByInputDynamicModule_basic( + module, tu: TestUtils +): + module.forward(tu.rand(1, 512, 15, 28)) + + +class AdaptiveAvgPool2dOutputSizeDivisibleByInputStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((3, 7)) + + @export + @annotate_args( + [ + None, + ([1, 512, 15, 14], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dOutputSizeDivisibleByInputStaticModule() +) +def AdaptiveAvgPool2dOutputSizeDivisibleByInputStaticModule_basic( + module, tu: TestUtils +): + module.forward(tu.rand(1, 512, 15, 14)) + + +class AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((2, 2)) + + @export + @annotate_args( + [ + None, + ([1, 3, 7, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule() +) +def AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 3, 7, 7)) + + +class AdaptiveAvgPool2dUnitOutputSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + @export + @annotate_args( + [ + None, + ([1, 512, 7, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dUnitOutputSizeStaticModule() +) +def AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7, 7)) + + +class AdaptiveAvgPool2dUnitOutputSizeDynamicModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool2dUnitOutputSizeDynamicModule() +) +def AdaptiveAvgPool2dUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7, 7)) + + +# ============================================================================== + + +class MaxPool1dModule(torch.nn.Module): + + def __init__(self): + super().__init__() + self.mp1d = torch.nn.MaxPool1d( + kernel_size=[6], stride=[2], padding=[3], dilation=2 + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp1d(x) + + +@register_test_case(module_factory=lambda: MaxPool1dModule()) +def MaxPool1dModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, low=-1)) + + +class MaxPool1dEmptyStrideStaticModule(torch.nn.Module): + + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 20], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool1d(x, kernel_size=2, stride=[]) + + +@register_test_case(module_factory=lambda: MaxPool1dEmptyStrideStaticModule()) +def MaxPool1dEmptyStrideStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, low=-1)) + + +class MaxPool1dStaticModule(torch.nn.Module): + + def __init__(self): + super().__init__() + self.mp1d = torch.nn.MaxPool1d( + kernel_size=[3], stride=[2], padding=[1], dilation=[1] + ) + + @export + @annotate_args( + [ + None, + ([1, 64, 112], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp1d(x) + + +@register_test_case(module_factory=lambda: MaxPool1dStaticModule()) +def MaxPool1dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112)) + + +class MaxPool1dStaticCeilModeTrueModule(torch.nn.Module): + + def __init__(self): + super().__init__() + self.mp1d = torch.nn.MaxPool1d( + kernel_size=[3], stride=[2], padding=[1], dilation=[1], ceil_mode=True + ) + + @export + @annotate_args( + [ + None, + ([1, 64, 112], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp1d(x) + + +@register_test_case(module_factory=lambda: MaxPool1dStaticCeilModeTrueModule()) +def MaxPool1dStaticCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112)) + + +class MaxPool1dCeilModeTrueModule(torch.nn.Module): + + def __init__(self): + super().__init__() + self.mp1d = torch.nn.MaxPool1d( + kernel_size=[6], stride=[2], padding=[3], dilation=2, ceil_mode=True + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp1d(x) + + +@register_test_case(module_factory=lambda: MaxPool1dCeilModeTrueModule()) +def MaxPool1dCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, low=0.5, high=1.0)) + + +# ============================================================================== + + +class MaxPool2dModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[6, 8], stride=[2, 2], padding=[3, 4], dilation=2 + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case(module_factory=lambda: MaxPool2dModule()) +def MaxPool2dModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, low=-1)) + + +class MaxPool2dEmptyStrideStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 20, 20], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d(x, kernel_size=2, stride=[]) + + +@register_test_case(module_factory=lambda: MaxPool2dEmptyStrideStaticModule()) +def MaxPool2dEmptyStrideStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, low=-1)) + + +class MaxPool2dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[3, 3], stride=[2, 2], padding=[1, 1], dilation=[1, 1] + ) + + @export + @annotate_args( + [ + None, + ([1, 64, 112, 112], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case(module_factory=lambda: MaxPool2dStaticModule()) +def MaxPool2dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112, 112)) + + +class MaxPool2dStaticCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[3, 3], + stride=[2, 2], + padding=[1, 1], + dilation=[1, 1], + ceil_mode=True, + ) + + @export + @annotate_args( + [ + None, + ([1, 64, 112, 112], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case(module_factory=lambda: MaxPool2dStaticCeilModeTrueModule()) +def MaxPool2dStaticCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112, 112)) + + +class MaxPool2dCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[6, 8], + stride=[2, 2], + padding=[3, 4], + dilation=2, + ceil_mode=True, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case(module_factory=lambda: MaxPool2dCeilModeTrueModule()) +def MaxPool2dCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, low=0.5, high=1.0)) + + +class MaxPool2dStaticCeilModeTrueReduceOutputModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=6, + stride=6, + padding=3, + dilation=1, + ceil_mode=True, + ) + + @export + @annotate_args( + [ + None, + ([2, 6, 20, 10], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case( + module_factory=lambda: MaxPool2dStaticCeilModeTrueReduceOutputModule() +) +def MaxPool2dStaticCeilModeTrueReduceOutputModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 6, 20, 10, low=0.5, high=1.0)) + + +class MaxPool2dWithoutPadFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[3, 3], stride=[2, 2], padding=[0, 0] + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithoutPadFullDimIndivisibleByStrideModule() +) +def MaxPool2dWithoutPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 56, 56, low=-1)) + + +class MaxPool2dWithPadFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[3, 3], stride=[2, 2], padding=[1, 1] + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithPadFullDimIndivisibleByStrideModule() +) +def MaxPool2dWithPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 112, 112, low=-1)) + + +class MaxPool2dFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[3, 3], stride=[3, 3], padding=[1, 1] + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case(module_factory=lambda: MaxPool2dFullDimIndivisibleByStrideModule()) +def MaxPool2dFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 75, 75, low=-1)) + + +class MaxPool2dCeilModeFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp2d = torch.nn.MaxPool2d( + kernel_size=[3, 3], + stride=[3, 3], + padding=[1, 1], + ceil_mode=True, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp2d(x) + + +@register_test_case( + module_factory=lambda: MaxPool2dCeilModeFullDimIndivisibleByStrideModule() +) +def MaxPool2dCeilModeFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 75, 75, low=-1)) + + +# ============================================================================== + + +class MaxPool3dModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp3d = torch.nn.MaxPool3d( + kernel_size=[4, 4, 4], stride=[2, 2, 2], padding=[1, 1, 1], dilation=1 + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp3d(x) + + +@register_test_case(module_factory=lambda: MaxPool3dModule()) +def MaxPool3dModule_basic(module, tu: TestUtils): + module.forward(torch.arange(8 * 8 * 8).view(1, 1, 8, 8, 8).float()) + + +class MaxPool3dRandomSimpleModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp3d = torch.nn.MaxPool3d( + kernel_size=[4, 4, 4], stride=[2, 2, 2], padding=[1, 1, 1], dilation=1 + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp3d(x) + + +@register_test_case(module_factory=lambda: MaxPool3dRandomSimpleModule()) +def MaxPool3dModuleRandomSimple_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, 20, low=-1)) + + +class MaxPool3dLargeDataModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp3d = torch.nn.MaxPool3d( + kernel_size=[6, 8, 8], stride=[2, 2, 2], padding=[3, 4, 4], dilation=2 + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp3d(x) + + +@register_test_case(module_factory=lambda: MaxPool3dLargeDataModule()) +def MaxPool3dLargeDatadModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, 20, low=-1)) + + +class MaxPool3dEmptyStrideStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 20, 20, 20], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d(x, kernel_size=2, stride=[]) + + +@register_test_case(module_factory=lambda: MaxPool3dEmptyStrideStaticModule()) +def MaxPool3dEmptyStrideStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, 20, low=-1)) + + +class MaxPool3dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp3d = torch.nn.MaxPool3d( + kernel_size=[3, 3, 3], + stride=[2, 2, 2], + padding=[1, 1, 1], + dilation=[1, 1, 1], + ) + + @export + @annotate_args( + [ + None, + ([1, 64, 112, 112, 112], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp3d(x) + + +@register_test_case(module_factory=lambda: MaxPool3dStaticModule()) +def MaxPool3dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112, 112, 112)) + + +class MaxPool3dStaticCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp3d = torch.nn.MaxPool3d( + kernel_size=[3, 3, 3], + stride=[2, 2, 2], + padding=[1, 1, 1], + dilation=[1, 1, 1], + ceil_mode=True, + ) + + @export + @annotate_args( + [ + None, + ([1, 64, 112, 112, 112], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp3d(x) + + +@register_test_case(module_factory=lambda: MaxPool3dStaticCeilModeTrueModule()) +def MaxPool3dStaticCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112, 112, 112)) + + +class MaxPool3dCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.mp3d = torch.nn.MaxPool3d( + kernel_size=[6, 8, 8], + stride=[2, 2, 2], + padding=[3, 4, 4], + dilation=2, + ceil_mode=True, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.mp3d(x) + + +@register_test_case(module_factory=lambda: MaxPool3dCeilModeTrueModule()) +def MaxPool3dCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 20, 20, 20, low=0.5, high=1.0)) + + +# ============================================================================== + + +class MaxPool2dWithIndicesModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[2, 2], stride=[1, 1], padding=[0, 0], dilation=[1, 1] + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesModule()) +def MaxPool2dWithIndicesModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 8, 8, low=0.5, high=1.0)) + + +class MaxPool2dWithIndicesFullSizeKernelModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[4, 4], stride=1, padding=0, dilation=1 + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesFullSizeKernelModule()) +def MaxPool2dWithIndicesFullSizeKernelModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 3, 4, 4, low=0.5, high=1.0)) + + +class MaxPool2dWithIndicesNonDefaultPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[4, 8], stride=[1, 1], padding=[2, 4], dilation=1 + ) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithIndicesNonDefaultPaddingModule() +) +def MaxPool2dWithIndicesNonDefaultPaddingModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 16, 16, low=-1.5, high=1.0)) + + +class MaxPool2dWithIndicesNonDefaultStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[4, 4], stride=[1, 2], padding=0, dilation=1 + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesNonDefaultStrideModule()) +def MaxPool2dWithIndicesNonDefaultStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 4, 16, 80, low=0.5, high=2.0)) + + +class MaxPool2dWithIndicesNonDefaultDilationModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[4, 4], stride=[1, 1], padding=0, dilation=[2, 2] + ) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithIndicesNonDefaultDilationModule() +) +def MaxPool2dWithIndicesNonDefaultDilationModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 4, 16, 80, low=0.5, high=2.0)) + + +class MaxPool2dWithIndicesNonDefaultParamsModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[8, 4], stride=[2, 2], padding=[1, 2], dilation=[2, 2] + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesNonDefaultParamsModule()) +def MaxPool2dWithIndicesNonDefaultParamsModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 4, 16, 80, low=-0.5, high=4.0)) + + +class MaxPool2dWithIndicesAllNegativeValuesModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[4, 8], stride=[1, 1], padding=[2, 4], dilation=1 + ) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithIndicesAllNegativeValuesModule() +) +def MaxPool2dWithIndicesAllNegativeValuesModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 16, 16, low=-4.5, high=-1.0)) + + +class MaxPool2dWithIndicesStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([2, 4, 16, 16], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[4, 8], stride=[1, 1], padding=[2, 4], dilation=1 + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesStaticModule()) +def MaxPool2dWithIndicesStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 16, 16, low=-4.5, high=-1.0)) + + +class MaxPool2dWithIndicesAllOnesModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, kernel_size=[2, 2], stride=[1, 1], padding=[0, 0], dilation=[1, 1] + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesAllOnesModule()) +def MaxPool2dWithIndicesAllOnesModule_basic(module, tu: TestUtils): + module.forward(torch.ones(1, 1, 8, 8)) + + +class MaxPool2dWithIndicesCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool2d_with_indices( + x, + kernel_size=[2, 2], + stride=[1, 1], + padding=[0, 0], + dilation=[1, 1], + ceil_mode=True, + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesCeilModeTrueModule()) +def MaxPool2dWithIndicesCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 8, 8, low=0.5, high=1.0)) + + +# ============================================================================== + + +class MaxPool2dWithIndicesBackwardStatic4DModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([2, 4, 7, 6], torch.float32, True), + ([2, 4, 6, 5], torch.float32, True), + ([2, 4, 7, 6], torch.int64, True), + ] + ) + def forward(self, output, input, indices): + kernel_size = [2, 2] + stride = [1, 1] + padding = [1, 1] + dilation = [1, 1] + ceil_mode = False + return torch.ops.aten.max_pool2d_with_indices_backward( + output, input, kernel_size, stride, padding, dilation, ceil_mode, indices + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesBackwardStatic4DModule()) +def MaxPool2dWithIndicesBackwardStatic4DModule_basic(module, tu: TestUtils): + module.forward( + tu.rand(2, 4, 7, 6), tu.rand(2, 4, 6, 5), tu.randint(2, 4, 7, 6, high=16) + ) + + +class MaxPool2dWithIndicesBackwardStatic3DModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([4, 7, 6], torch.float32, True), + ([4, 6, 5], torch.float32, True), + ([4, 7, 6], torch.int64, True), + ] + ) + def forward(self, output, input, indices): + kernel_size = [2, 2] + stride = [1, 1] + padding = [1, 1] + dilation = [1, 1] + ceil_mode = False + return torch.ops.aten.max_pool2d_with_indices_backward( + output, input, kernel_size, stride, padding, dilation, ceil_mode, indices + ) + + +@register_test_case(module_factory=lambda: MaxPool2dWithIndicesBackwardStatic3DModule()) +def MaxPool2dWithIndicesBackwardStatic3DModule_basic(module, tu: TestUtils): + module.forward(tu.rand(4, 7, 6), tu.rand(4, 6, 5), tu.randint(4, 7, 6, high=16)) + + +class MaxPool2dWithIndicesBackwardDynamic4DModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1], torch.int64, True), + ] + ) + def forward(self, output, input, indices): + kernel_size = [2, 2] + stride = [1, 1] + padding = [1, 1] + dilation = [1, 1] + ceil_mode = False + return torch.ops.aten.max_pool2d_with_indices_backward( + output, input, kernel_size, stride, padding, dilation, ceil_mode, indices + ) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithIndicesBackwardDynamic4DModule() +) +def MaxPool2dWithIndicesBackwardDynamic4DModule_basic(module, tu: TestUtils): + module.forward( + tu.rand(2, 4, 7, 6), tu.rand(2, 4, 6, 5), tu.randint(2, 4, 7, 6, high=16) + ) + + +class MaxPool2dWithIndicesBackwardDynamic3DModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.int64, True), + ] + ) + def forward(self, output, input, indices): + kernel_size = [2, 2] + stride = [1, 1] + padding = [1, 1] + dilation = [1, 1] + ceil_mode = False + return torch.ops.aten.max_pool2d_with_indices_backward( + output, input, kernel_size, stride, padding, dilation, ceil_mode, indices + ) + + +@register_test_case( + module_factory=lambda: MaxPool2dWithIndicesBackwardDynamic3DModule() +) +def MaxPool2dWithIndicesBackwardDynamic3DModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 7, 6), tu.rand(2, 6, 5), tu.randint(2, 7, 6, high=16)) + + +# ============================================================================== + + +class MaxPool3dWithIndicesModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, + kernel_size=[2, 2, 2], + stride=[1, 1, 1], + padding=[0, 0, 0], + dilation=[1, 1, 1], + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesModule()) +def MaxPool3dWithIndicesModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 8, 8, 8, low=0.5, high=1.0)) + + +class MaxPool3dWithIndicesFullSizeKernelModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, kernel_size=[4, 4, 4], stride=1, padding=0, dilation=1 + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesFullSizeKernelModule()) +def MaxPool3dWithIndicesFullSizeKernelModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 3, 4, 4, 4, low=0.5, high=1.0)) + + +class MaxPool3dWithIndicesNonDefaultPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, kernel_size=[4, 8, 4], stride=[1, 1, 1], padding=[2, 4, 2], dilation=1 + ) + + +@register_test_case( + module_factory=lambda: MaxPool3dWithIndicesNonDefaultPaddingModule() +) +def MaxPool3dWithIndicesNonDefaultPaddingModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 16, 16, 16, low=-1.5, high=1.0)) + + +class MaxPool3dWithIndicesNonDefaultStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, kernel_size=[4, 4, 4], stride=[1, 2, 1], padding=0, dilation=1 + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesNonDefaultStrideModule()) +def MaxPool3dWithIndicesNonDefaultStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 4, 16, 80, 16, low=0.5, high=2.0)) + + +class MaxPool3dWithIndicesNonDefaultDilationModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, kernel_size=[4, 4, 4], stride=[1, 1, 1], padding=0, dilation=[2, 2, 2] + ) + + +@register_test_case( + module_factory=lambda: MaxPool3dWithIndicesNonDefaultDilationModule() +) +def MaxPool3dWithIndicesNonDefaultDilationModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 4, 16, 80, 16, low=0.5, high=2.0)) + + +class MaxPool3dWithIndicesNonDefaultParamsModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, + kernel_size=[8, 4, 8], + stride=[2, 2, 2], + padding=[1, 2, 1], + dilation=[2, 2, 2], + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesNonDefaultParamsModule()) +def MaxPool3dWithIndicesNonDefaultParamsModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 4, 16, 80, 16, low=-0.5, high=4.0)) + + +class MaxPool3dWithIndicesAllNegativeValuesModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, kernel_size=[4, 8, 4], stride=[1, 1, 1], padding=[2, 4, 2], dilation=1 + ) + + +@register_test_case( + module_factory=lambda: MaxPool3dWithIndicesAllNegativeValuesModule() +) +def MaxPool3dWithIndicesAllNegativeValuesModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 16, 16, 16, low=-4.5, high=-1.0)) + + +class MaxPool3dWithIndicesStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([2, 4, 16, 16, 16], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, kernel_size=[4, 8, 4], stride=[1, 1, 1], padding=[2, 4, 2], dilation=1 + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesStaticModule()) +def MaxPool3dWithIndicesStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 16, 16, 16, low=-4.5, high=-1.0)) + + +class MaxPool3dWithIndicesAllOnesModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, + kernel_size=[2, 2, 2], + stride=[1, 1, 1], + padding=[0, 0, 0], + dilation=[1, 1, 1], + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesAllOnesModule()) +def MaxPool3dWithIndicesAllOnesModule_basic(module, tu: TestUtils): + module.forward(torch.ones(1, 1, 8, 8, 8)) + + +class MaxPool3dWithIndicesCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool3d_with_indices( + x, + kernel_size=[2, 2, 2], + stride=[1, 1, 1], + padding=[0, 0, 0], + dilation=[1, 1, 1], + ceil_mode=True, + ) + + +@register_test_case(module_factory=lambda: MaxPool3dWithIndicesCeilModeTrueModule()) +def MaxPool3dWithIndicesCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 8, 8, 8, low=0.5, high=1.0)) + + +# ============================================================================== + + +class AvgPool2dFloatModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[6, 8], + stride=[2, 2], + padding=[3, 4], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dFloatModule()) +def AvgPool2dFloatModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 20, 20, low=-1)) + + +class AvgPool2dIntModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[6, 8], + stride=[2, 2], + padding=[3, 4], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.int64, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dIntModule()) +def AvgPool2dIntModule_basic(module, tu: TestUtils): + module.forward(tu.randint(2, 4, 20, 20, high=100)) + + +class AvgPool2dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[6, 8], + stride=[2, 2], + padding=[3, 4], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([2, 2, 10, 20], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dStaticModule()) +def AvgPool2dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 2, 10, 20, low=-1)) + + +class AvgPool2dCountIncludePadFalseStaticModule(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[1, 1], + ceil_mode=False, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([32, 384, 25, 25], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCountIncludePadFalseStaticModule()) +def AvgPool2dCountIncludePadFalseStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(32, 384, 25, 25, low=-1)) + + +class AvgPool2dDivisorOverrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[4, 8], + stride=[2, 3], + padding=[2, 4], + ceil_mode=False, + count_include_pad=True, + divisor_override=22, + ) + + @export + @annotate_args( + [ + None, + ([4, 4, 20, 20], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dDivisorOverrideModule()) +def AvgPool2dDivisorOverrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(4, 4, 20, 20, low=-1)) + + +class AvgPool2dCeilModeTrueModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[6, 8], + stride=[2, 2], + padding=[3, 4], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCeilModeTrueModule()) +def AvgPool2dCeilModeTrueModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) + + +class AvgPool2dWithoutPadModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[6, 8], + stride=[2, 2], + padding=[0, 0], + ceil_mode=False, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dWithoutPadModule()) +def AvgPool2dWithoutPadModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) + + +class AvgPool2dCHWModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[6, 8], + stride=[2, 2], + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCHWModule()) +def AvgPool2dCHWModule_basic(module, tu: TestUtils): + module.forward(tu.rand(4, 20, 20, low=0.5, high=1.0)) + + +class AvgPool2dSingleIntTupleParamsModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=(6,), + stride=(2,), + padding=(1,), + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dSingleIntTupleParamsModule()) +def AvgPool2dSingleIntTupleParamsModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) + + +class AvgPool2dSingleIntTupleParamsIncludePadModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=(6,), + stride=(2,), + padding=(1,), + count_include_pad=True, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dSingleIntTupleParamsIncludePadModule() +) +def AvgPool2dSingleIntTupleParamsIncludePadModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) + + +class AvgPool2dWithoutPadFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[2, 2], + padding=[0, 0], + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dWithoutPadFullDimIndivisibleByStrideModule() +) +def AvgPool2dWithoutPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 56, 56, low=-1)) + + +class AvgPool2dWithPadFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[2, 2], + padding=[1, 1], + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dWithPadFullDimIndivisibleByStrideModule() +) +def AvgPool2dWithPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 112, 112, low=-1)) + + +class AvgPool2dFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[3, 3], + padding=[1, 1], + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dFullDimIndivisibleByStrideModule()) +def AvgPool2dFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 75, 75, low=-1)) + + +class AvgPool2dCeilModeFullDimIndivisibleByStrideModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[3, 3], + padding=[1, 1], + ceil_mode=True, + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dCeilModeFullDimIndivisibleByStrideModule() +) +def AvgPool2dCeilModeFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 75, 75, low=-1)) + + +# ============================================================================== + + +class AvgPool3dStaticModule(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool3d( + kernel_size=[2, 2, 2], + stride=[2, 2, 2], + padding=[0, 0, 0], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([2, 2, 4, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool3dStaticModule()) +def AvgPool3dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 2, 4, 4, 4, low=-1)) + + +class AvgPool3dCountIncludePadFalse(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap3d = torch.nn.AvgPool3d( + kernel_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[1, 1, 1], + ceil_mode=False, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([3, 3, 12, 12, 12], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap3d(x) + + +@register_test_case(module_factory=lambda: AvgPool3dCountIncludePadFalse()) +def AvgPool3dCountIncludePadFalse_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 12, 12, 12, low=-1)) + + +class AvgPool3dCountIncludePadFalseWithoutPadding(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap3d = torch.nn.AvgPool3d( + kernel_size=[3, 3, 3], + stride=[1, 1, 1], + padding=[0, 0, 0], + ceil_mode=False, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([3, 3, 12, 12, 12], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap3d(x) + + +@register_test_case( + module_factory=lambda: AvgPool3dCountIncludePadFalseWithoutPadding() +) +def AvgPool3dCountIncludePadFalseWithoutPadding_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 3, 12, 12, 12, low=-1)) + + +# ============================================================================== + + +class AvgPool1dFloatModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap1d = torch.nn.AvgPool1d( + kernel_size=6, stride=2, padding=3, ceil_mode=False, count_include_pad=True + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap1d(x) + + +@register_test_case(module_factory=lambda: AvgPool1dFloatModule()) +def AvgPool1dFloatModule_basic(module, tu: TestUtils): + module.forward(tu.rand(2, 4, 20, low=-1)) + + +class AvgPool1dIntModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap1d = torch.nn.AvgPool1d( + kernel_size=6, stride=2, padding=3, ceil_mode=False, count_include_pad=True + ) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.int64, True), + ] + ) + def forward(self, x): + return self.ap1d(x) + + +@register_test_case(module_factory=lambda: AvgPool1dIntModule()) +def AvgPool1dIntModule_basic(module, tu: TestUtils): + module.forward(tu.randint(2, 4, 20, high=100)) + + +class AvgPool1dStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap1d = torch.nn.AvgPool1d( + kernel_size=6, stride=2, padding=3, ceil_mode=False, count_include_pad=True + ) + + @export + @annotate_args( + [ + None, + ([2, 4, 20], torch.int64, True), + ] + ) + def forward(self, x): + return self.ap1d(x) + + +@register_test_case(module_factory=lambda: AvgPool1dStaticModule()) +def AvgPool1dStaticModule_basic(module, tu: TestUtils): + module.forward(tu.randint(2, 4, 20, high=100)) + + +class AvgPool1dCountIncludePadFalseWithoutPadding(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap1d = torch.nn.AvgPool1d( + kernel_size=3, stride=1, padding=0, ceil_mode=False, count_include_pad=False + ) + + @export + @annotate_args( + [ + None, + ([3, 4, 20], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap1d(x) + + +@register_test_case( + module_factory=lambda: AvgPool1dCountIncludePadFalseWithoutPadding() +) +def AvgPool1dCountIncludePadFalseWithoutPadding_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 4, 20)) + + +class AvgPool1dCountIncludePadFalse(torch.nn.Module): + def __init__(self): + super().__init__() + self.ap1d = torch.nn.AvgPool1d( + kernel_size=3, stride=1, padding=1, ceil_mode=False, count_include_pad=False + ) + + @export + @annotate_args( + [ + None, + ([3, 4, 20], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap1d(x) + + +@register_test_case(module_factory=lambda: AvgPool1dCountIncludePadFalse()) +def AvgPool1dCountIncludePadFalse_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 4, 20)) + + +# ============================================================================== + + +class AdaptiveAvgPool1dStaticLargerOutput(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=13) + + @export + @annotate_args([None, ([5, 512, 7], torch.float32, True)]) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool1dStaticLargerOutput()) +def AdaptiveAvgPool1dStaticLargerOutput_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 512, 7)) + + +class AdaptiveAvgPool1dStaticEvenMultiple(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) + + @export + @annotate_args([None, ([5, 512, 147], torch.float32, True)]) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool1dStaticEvenMultiple()) +def AdaptiveAvgPool1dStaticEvenMultiple_basic(module, tu: TestUtils): + module.forward(tu.rand(5, 512, 147)) + + +class AdaptiveAvgPool1dGeneralDynamic(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) + + @export + @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool1dGeneralDynamic()) +def AdaptiveAvgPool1dGeneralDynamic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10)) + + +class AdaptiveAvgPool1dGeneralDynamicNoBatches(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) + + @export + @annotate_args([None, ([-1, -1], torch.float32, True)]) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool1dGeneralDynamicNoBatches()) +def AdaptiveAvgPool1dGeneralDynamicNoBatches_basic(module, tu: TestUtils): + module.forward(tu.rand(512, 10)) + + +class AdaptiveAvgPool1dNonUnitOutputSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) + + @export + @annotate_args( + [ + None, + ([1, 512, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool1dNonUnitOutputSizeStaticModule() +) +def AdaptiveAvgPool1dNonUnitOutputSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7)) + + +class AdaptiveAvgPool1dNonUnitOutputSizeDynamicModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool1dNonUnitOutputSizeDynamicModule() +) +def AdaptiveAvgPool1dNonUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7)) + + +class AdaptiveAvgPool1dUnitOutputSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=1) + + @export + @annotate_args( + [ + None, + ([1, 512, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool1dUnitOutputSizeStaticModule() +) +def AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7)) + + +class AdaptiveAvgPool1dUnitOutputSizeDynamicModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=1) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.aap1d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveAvgPool1dUnitOutputSizeDynamicModule() +) +def AdaptiveAvgPool1dUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7)) + + +# AdaptiveAvgPool2d + + +class AdaptiveAvgPool2dDynamic(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d(output_size=(7, 13)) + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool2dDynamic()) +def AdaptiveAvgPool2dDynamic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16)) + + +class AdaptiveAvgPool2dDynamicNoBatch(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap2d = torch.nn.AdaptiveAvgPool2d(output_size=(7, 13)) + + @export + @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.aap2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool2dDynamicNoBatch()) +def AdaptiveAvgPool2dDynamicNoBatch_basic(module, tu: TestUtils): + module.forward(tu.rand(512, 10, 16)) + + +# AdaptiveAvgPool3d + + +class AdaptiveAvgPool3dDynamic(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap3d = torch.nn.AdaptiveAvgPool3d(output_size=(7, 13, 15)) + + @export + @annotate_args([None, ([-1, -1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.aap3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool3dDynamic()) +def AdaptiveAvgPool3dDynamic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16, 17)) + + +class AdaptiveAvgPool3dDynamicNoBatch(torch.nn.Module): + def __init__(self): + super().__init__() + self.aap3d = torch.nn.AdaptiveAvgPool3d(output_size=(7, 13, 15)) + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.aap3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveAvgPool3dDynamicNoBatch()) +def AdaptiveAvgPool3dDynamicNoBatch_basic(module, tu: TestUtils): + module.forward(tu.rand(512, 10, 16, 17)) + + +# AdaptiveMaxPool1d + + +class AdaptiveMaxPool1dDynamic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(7), return_indices=False) + + @export + @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool1dDynamic()) +def AdaptiveMaxPool1dDynamic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10)) + + +class AdaptiveMaxPool1dDynamicNoBatch(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(7), return_indices=False) + + @export + @annotate_args([None, ([-1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool1dDynamicNoBatch()) +def AdaptiveMaxPool1dDynamicNoBatch_basic(module, tu: TestUtils): + module.forward(tu.rand(512, 10)) + + +class AdaptiveMaxPool1dStatic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(7), return_indices=False) + + @export + @annotate_args([None, ([1, 512, 10], torch.float32, True)]) + def forward(self, x): + return self.amp1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool1dStatic()) +def AdaptiveMaxPool1dStatic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10)) + + +class AdaptiveMaxPool1dDimOneStatic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(1), return_indices=False) + + @export + @annotate_args([None, ([1, 512, 7], torch.float32, True)]) + def forward(self, x): + return self.amp1d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool1dDimOneStatic()) +def AdaptiveMaxPool1dDimOneStatic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7)) + + +# AdaptiveMaxPool2d + + +class AdaptiveMaxPool2dDynamic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d( + output_size=(7, 13), return_indices=False + ) + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool2dDynamic()) +def AdaptiveMaxPool2dDynamic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16)) + + +class AdaptiveMaxPool2dDynamicNoBatch(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d( + output_size=(7, 13), return_indices=False + ) + + @export + @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool2dDynamicNoBatch()) +def AdaptiveMaxPool2dDynamicNoBatch_basic(module, tu: TestUtils): + module.forward(tu.rand(512, 10, 16)) + + +class AdaptiveMaxPool2dDynamicWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d( + output_size=(7, 13), return_indices=True + ) + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool2dDynamicWithIndices()) +def AdaptiveMaxPool2dDynamicWithIndices_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16)) + + +class AdaptiveMaxPool2dStatic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d( + output_size=(7, 13), return_indices=False + ) + + @export + @annotate_args([None, ([1, 512, 10, 9], torch.float32, True)]) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool2dStatic()) +def AdaptiveMaxPool2dStatic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 9)) + + +class AdaptiveMaxPool2dStaticWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d( + output_size=(7, 13), return_indices=True + ) + + @export + @annotate_args([None, ([1, 512, 10, 16], torch.float32, True)]) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool2dStaticWithIndices()) +def AdaptiveMaxPool2dStaticWithIndices_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16)) + + +class AdaptiveMaxPool2dFixedKernelStrideSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d((2, 2)) + + @export + @annotate_args( + [ + None, + ([1, 3, 7, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveMaxPool2dFixedKernelStrideSizeStaticModule() +) +def AdaptiveMaxPool2dFixedKernelStrideSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 3, 7, 7)) + + +class AdaptiveMaxPool2dUnitOutputSizeStaticModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp2d = torch.nn.AdaptiveMaxPool2d((1, 1)) + + @export + @annotate_args( + [ + None, + ([1, 512, 7, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.amp2d(x) + + +@register_test_case( + module_factory=lambda: AdaptiveMaxPool2dUnitOutputSizeStaticModule() +) +def AdaptiveMaxPool2dUnitOutputSizeStaticModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 7, 7)) + + +# AdaptiveMaxPool3d + + +class AdaptiveMaxPool3dDynamic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp3d = torch.nn.AdaptiveMaxPool3d( + output_size=(7, 13, 15), return_indices=False + ) + + @export + @annotate_args([None, ([-1, -1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool3dDynamic()) +def AdaptiveMaxPool3dDynamic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16, 17)) + + +class AdaptiveMaxPool3dDynamicNoBatch(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp3d = torch.nn.AdaptiveMaxPool3d( + output_size=(7, 13, 15), return_indices=False + ) + + @export + @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool3dDynamicNoBatch()) +def AdaptiveMaxPool3dDynamicNoBatch_basic(module, tu: TestUtils): + module.forward(tu.rand(512, 10, 16, 17)) + + +class AdaptiveMaxPool3dDynamicWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp3d = torch.nn.AdaptiveMaxPool3d( + output_size=(7, 13, 15), return_indices=True + ) + + @export + @annotate_args([None, ([-1, -1, -1, -1, -1], torch.float32, True)]) + def forward(self, x): + return self.amp3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool3dDynamicWithIndices()) +def AdaptiveMaxPool3dDynamicWithIndices_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16, 17)) + + +class AdaptiveMaxPool3dStatic(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp3d = torch.nn.AdaptiveMaxPool3d( + output_size=(7, 13, 15), return_indices=False + ) + + @export + @annotate_args([None, ([1, 512, 10, 9, 5], torch.float32, True)]) + def forward(self, x): + return self.amp3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool3dStatic()) +def AdaptiveMaxPool3dStatic_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 9, 5)) + + +class AdaptiveMaxPool3dStaticWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.amp3d = torch.nn.AdaptiveMaxPool3d( + output_size=(7, 13, 15), return_indices=True + ) + + @export + @annotate_args([None, ([1, 512, 10, 16, 17], torch.float32, True)]) + def forward(self, x): + return self.amp3d(x) + + +@register_test_case(module_factory=lambda: AdaptiveMaxPool3dStaticWithIndices()) +def AdaptiveMaxPool3dStaticWithIndices_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 512, 10, 16, 17)) + + +# ============================================================================== + + +class MaxUnpool2dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([2, 2, 2, 4], torch.float32, True), + ([2, 2, 2, 4], torch.int64, True), + ] + ) + def forward(self, x, indices): + return torch.ops.aten.max_unpool2d(x, indices, (4, 8)) + + +@register_test_case(module_factory=lambda: MaxUnpool2dModule()) +def MaxUnpool2dModule_basic(module, tu: TestUtils): + input = tu.rand(2, 2, 4, 8) + pool = torch.nn.MaxPool2d(kernel_size=(2, 2), return_indices=True) + output, indices = pool(input) + + module.forward(output, indices) + + +# ============================================================================== + + +class MaxUnpool2dModule_3dInput(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([2, 2, 4], torch.float32, True), + ([2, 2, 4], torch.int64, True), + ] + ) + def forward(self, x, indices): + return torch.ops.aten.max_unpool2d(x, indices, (4, 8)) + + +@register_test_case(module_factory=lambda: MaxUnpool2dModule_3dInput()) +def MaxUnpool2dModule_3dInput_basic(module, tu: TestUtils): + input = tu.rand(2, 4, 8) + pool = torch.nn.MaxPool2d(kernel_size=(2, 2), return_indices=True) + output, indices = pool(input) + + module.forward(output, indices) + + +# ============================================================================== + + +class MaxUnpool3dModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, 2, 2, 4], torch.float32, True), + ([-1, -1, 2, 2, 4], torch.int64, True), + ] + ) + def forward(self, x, indices): + return torch.ops.aten.max_unpool3d(x, indices, (4, 5, 6), (2, 3, 2), (0, 0, 1)) + + +@register_test_case(module_factory=lambda: MaxUnpool3dModule()) +def MaxUnpool3dModule_basic(module, tu: TestUtils): + input = tu.rand(2, 2, 4, 5, 6) + pool = torch.nn.MaxPool3d( + kernel_size=(2, 2, 2), stride=(2, 3, 2), padding=(0, 0, 1), return_indices=True + ) + output, indices = pool(input) + + module.forward(output, indices) + + +# We have a special case for all-zeros padding, test it too. +class MaxUnpool3dModulePad0(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, 2, 2, 3], torch.float32, True), + ([-1, -1, 2, 2, 3], torch.int64, True), + ] + ) + def forward(self, x, indices): + return torch.ops.aten.max_unpool3d(x, indices, (4, 5, 6), (2, 3, 2), (0, 0, 0)) + + +@register_test_case(module_factory=lambda: MaxUnpool3dModulePad0()) +def MaxUnpool3dModulePad0_basic(module, tu: TestUtils): + input = tu.rand(2, 2, 4, 5, 6) + pool = torch.nn.MaxPool3d( + kernel_size=(2, 2, 2), stride=(2, 3, 2), padding=(0, 0, 0), return_indices=True + ) + output, indices = pool(input) + + module.forward(output, indices) + + +class AvgPool2dCeilNoPadUnitaryStrides(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[0, 0], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCeilNoPadUnitaryStrides()) +def AvgPool2dCeilNoPadUnitaryStrides_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dCeilPadNonUnitaryStrides(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[2, 2], + padding=[1, 1], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCeilPadNonUnitaryStrides()) +def AvgPool2dCeilPadNonUnitaryStrides_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dCeilNoPadStridedIncludePadding(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[2, 2], + padding=[0, 0], + ceil_mode=True, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCeilNoPadStridedIncludePadding()) +def AvgPool2dCeilNoPadStridedIncludePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dCeilNoPadUnitaryStrideIncludePadding(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[0, 0], + ceil_mode=True, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dCeilNoPadUnitaryStrideIncludePadding() +) +def AvgPool2dCeilNoPadUnitaryStrideIncludePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dCeilPaddingUnitaryStrideIncludePaddingFalse(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[1, 1], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dCeilPaddingUnitaryStrideIncludePaddingFalse() +) +def AvgPool2dCeilPaddingUnitaryStrideIncludePaddingFalse_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dFloorNoPadUnitaryStrideIncludePadding(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[0, 0], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dFloorNoPadUnitaryStrideIncludePadding() +) +def AvgPool2dFloorNoPadUnitaryStrideIncludePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dFloorPaddingUnitaryStrideIncludePadding(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[1, 1], + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dFloorPaddingUnitaryStrideIncludePadding() +) +def AvgPool2dFloorPaddingUnitaryStrideIncludePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dCeilPaddingUnitaryStrideIncludePadding(torch.nn.Module): + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[1, 1], + padding=[1, 1], + ceil_mode=True, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dCeilPaddingUnitaryStrideIncludePadding() +) +def AvgPool2dCeilPaddingUnitaryStrideIncludePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dCeilPaddingStridedIncludePadding(torch.nn.Module): + # Note that in this case the kernel window center will go into the padding. + # When this happens the padding elements are counted in the divisor, but + # the out of bound elements from the ceiling are not counted + # (i.e., clamped from the divisor count). + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 3], + stride=[2, 2], + padding=[1, 1], + ceil_mode=True, + count_include_pad=True, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool2dCeilPaddingStridedIncludePadding()) +def AvgPool2dCeilPaddingStridedIncludePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 4, low=-1)) + + +class AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded(torch.nn.Module): + # This test captures the torch-mlir issue reported here: + # https://github.com/llvm/torch-mlir/issues/4079 + # The issue was caused by having the ceil_mode = true and + # count_include_pad = false. Also the kernel and stride sizes are + # different in this test to make sure that they are processed in + # the right order. + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 2], + stride=[2, 3], + padding=[0, 0], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 3, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded() +) +def AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 3, 4, low=-1)) + + +class AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded(torch.nn.Module): + # Different sizes used for each kernel, stride, and padding.dimensions. + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool2d( + kernel_size=[3, 4], + stride=[2, 3], + padding=[1, 2], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 3, 4], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded() +) +def AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 3, 4, low=-1)) + + +class AvgPool3dDiffKernelsStridesNoPadCeilPadNotIncluded(torch.nn.Module): + # 3D version of AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded. + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool3d( + kernel_size=[3, 2, 4], + stride=[3, 2, 5], + padding=[0, 0, 0], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 5, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool3dDiffKernelsStridesNoPadCeilPadNotIncluded() +) +def AvgPool3dDiffKernelsStridesNoPadCeilPadNotIncluded_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 5, 7, low=-1)) + + +class AvgPool3dDiffKernelsStridesPadCeilPadNotIncluded(torch.nn.Module): + # 3-D version of AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded. + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool3d( + kernel_size=[3, 4, 7], + stride=[2, 3, 4], + padding=[1, 2, 3], + ceil_mode=True, + count_include_pad=False, + divisor_override=None, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 3, 4, 7], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case( + module_factory=lambda: AvgPool3dDiffKernelsStridesPadCeilPadNotIncluded() +) +def AvgPool3dDiffKernelsStridesPadCeilPadNotIncluded_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 3, 4, 7, low=-1)) + + +class AvgPool1dNoPadCeilPadNotIncluded(torch.nn.Module): + # 1D version of AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded. + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool1d( + kernel_size=[2], + stride=[2], + padding=[1], + ceil_mode=True, + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 5], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool1dNoPadCeilPadNotIncluded()) +def AvgPool1dNoPadCeilPadNotIncluded_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 5, low=-1)) + + +class AvgPool1dPadCeilPadNotIncluded(torch.nn.Module): + # 1-D version of AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded. + + def __init__(self): + super().__init__() + self.ap2d = torch.nn.AvgPool1d( + kernel_size=[2], + stride=[2], + padding=[1], + ceil_mode=True, + count_include_pad=False, + ) + + @export + @annotate_args( + [ + None, + ([1, 1, 3], torch.float32, True), + ] + ) + def forward(self, x): + return self.ap2d(x) + + +@register_test_case(module_factory=lambda: AvgPool1dPadCeilPadNotIncluded()) +def AvgPool1dPadCeilPadNotIncluded_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 3, low=-1)) diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/quantized_models.py b/projects/e2e/torch_mlir_e2e_test/test_suite/quantized_models.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/quantized_models.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/quantized_models.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/reduction.py b/projects/e2e/torch_mlir_e2e_test/test_suite/reduction.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/reduction.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/reduction.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/reshape_like.py b/projects/e2e/torch_mlir_e2e_test/test_suite/reshape_like.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/reshape_like.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/reshape_like.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/return_types.py b/projects/e2e/torch_mlir_e2e_test/test_suite/return_types.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/return_types.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/return_types.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/rng.py b/projects/e2e/torch_mlir_e2e_test/test_suite/rng.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/rng.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/rng.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/scalar.py b/projects/e2e/torch_mlir_e2e_test/test_suite/scalar.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/scalar.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/scalar.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/scalar_comparison.py b/projects/e2e/torch_mlir_e2e_test/test_suite/scalar_comparison.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/scalar_comparison.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/scalar_comparison.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/scatter.py b/projects/e2e/torch_mlir_e2e_test/test_suite/scatter.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/scatter.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/scatter.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/slice_like.py b/projects/e2e/torch_mlir_e2e_test/test_suite/slice_like.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/slice_like.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/slice_like.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/spectral.py b/projects/e2e/torch_mlir_e2e_test/test_suite/spectral.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/spectral.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/spectral.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/squeeze.py b/projects/e2e/torch_mlir_e2e_test/test_suite/squeeze.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/squeeze.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/squeeze.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/stats.py b/projects/e2e/torch_mlir_e2e_test/test_suite/stats.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/stats.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/stats.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/threshold.py b/projects/e2e/torch_mlir_e2e_test/test_suite/threshold.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/threshold.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/threshold.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/timeout.py b/projects/e2e/torch_mlir_e2e_test/test_suite/timeout.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/timeout.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/timeout.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/type_conversion.py b/projects/e2e/torch_mlir_e2e_test/test_suite/type_conversion.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/type_conversion.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/type_conversion.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/type_promotion.py b/projects/e2e/torch_mlir_e2e_test/test_suite/type_promotion.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/type_promotion.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/type_promotion.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/vision_models.py b/projects/e2e/torch_mlir_e2e_test/test_suite/vision_models.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/test_suite/vision_models.py rename to projects/e2e/torch_mlir_e2e_test/test_suite/vision_models.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/tosa_backends/__init__.py b/projects/e2e/torch_mlir_e2e_test/tosa_backends/__init__.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/tosa_backends/__init__.py rename to projects/e2e/torch_mlir_e2e_test/tosa_backends/__init__.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/tosa_backends/abc.py b/projects/e2e/torch_mlir_e2e_test/tosa_backends/abc.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/tosa_backends/abc.py rename to projects/e2e/torch_mlir_e2e_test/tosa_backends/abc.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py b/projects/e2e/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py rename to projects/e2e/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py diff --git a/projects/pt1/python/torch_mlir_e2e_test/utils.py b/projects/e2e/torch_mlir_e2e_test/utils.py similarity index 100% rename from projects/pt1/python/torch_mlir_e2e_test/utils.py rename to projects/e2e/torch_mlir_e2e_test/utils.py diff --git a/projects/pt1/python/CMakeLists.txt b/projects/pt1/python/CMakeLists.txt index c86f8e52c881..ca0febf011e4 100644 --- a/projects/pt1/python/CMakeLists.txt +++ b/projects/pt1/python/CMakeLists.txt @@ -41,7 +41,6 @@ endif() if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER) add_subdirectory(torch_mlir/jit_ir_importer) add_subdirectory(torch_mlir/csrc/jit_ir_importer) - add_subdirectory(torch_mlir_e2e_test) endif() ################################################################################ diff --git a/projects/pt1/python/torch_mlir/torchscript.py b/projects/pt1/python/torch_mlir/torchscript.py index cf979838f0f0..cc2163c7ac8d 100644 --- a/projects/pt1/python/torch_mlir/torchscript.py +++ b/projects/pt1/python/torch_mlir/torchscript.py @@ -5,6 +5,7 @@ from typing import Optional, Sequence, Union, List, Dict, Tuple, Callable, Iterable from enum import Enum +from warnings import warn import sys from io import StringIO @@ -21,9 +22,11 @@ lower_mlir_module, TensorPlaceholder, ) -from torch_mlir.jit_ir_importer import ClassAnnotator, ImportOptions, ModuleBuilder -from torch_mlir.jit_ir_importer.build_tools.library_generator import generate_library - +try: + from torch_mlir.jit_ir_importer import ClassAnnotator, ImportOptions, ModuleBuilder + from torch_mlir.jit_ir_importer.build_tools.library_generator import generate_library +except ModuleNotFoundError as e: + warn("torch_mlir.torchscript.compile relies on jit_ir_importer. Please build with `TORCH_MLIR_ENABLE_JIT_IR_IMPORTER=ON`.") _example_arg = Union[TensorPlaceholder, torch.Tensor] _example_args_for_one_method = Union[_example_arg, Sequence[_example_arg]] From 8e90b648a8e16d838abb4ad89011c978c7da2580 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 14:28:04 -0700 Subject: [PATCH 02/15] Disable JIT_IR_IMPORTER by default. update build_posix script and cmake lists. Signed-off-by: zjgarvey --- CMakeLists.txt | 2 +- build_tools/ci/build_posix.sh | 2 +- projects/CMakeLists.txt | 2 +- projects/e2e/CMakeLists.txt | 7 +++++++ 4 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 projects/e2e/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 5413da9d9bab..ac162a2b23c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,7 +74,7 @@ endif() # Turning this off disables the old TorchScript path, leaving FX based import as the current supported option. # The option will be retained for a time, and if a maintainer is interested in setting up testing for it, # please reach out on the list and speak up for it. It will only be enabled in CI for test usage. -cmake_dependent_option(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER "Enables JIT IR Importer" ON TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF) +cmake_dependent_option(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER "Enables JIT IR Importer" OFF TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF) cmake_dependent_option(TORCH_MLIR_ENABLE_LTC "Enables LTC backend" OFF TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS OFF) option(TORCH_MLIR_ENABLE_ONNX_C_IMPORTER "Enables the ONNX C importer" OFF) diff --git a/build_tools/ci/build_posix.sh b/build_tools/ci/build_posix.sh index b9bb122acd37..dde181206ead 100755 --- a/build_tools/ci/build_posix.sh +++ b/build_tools/ci/build_posix.sh @@ -51,7 +51,7 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \ -DLLVM_TARGETS_TO_BUILD=host \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ -DTORCH_MLIR_ENABLE_LTC=OFF \ - -DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS=ON + -DTORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS=OFF echo "::endgroup::" echo "::group::Build" diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt index affd2b3cfc8b..e6b6ed6e9c50 100644 --- a/projects/CMakeLists.txt +++ b/projects/CMakeLists.txt @@ -66,7 +66,7 @@ endif() # Include e2e testing infra. if(NOT TORCH_MLIR_ENABLE_ONLY_MLIR_PYTHON_BINDINGS) - add_subdirectory(torch_mlir_e2e_test) + add_subdirectory(e2e) endif() # Include jit_ir_common if the jit_ir importer or LTC is enabled, diff --git a/projects/e2e/CMakeLists.txt b/projects/e2e/CMakeLists.txt new file mode 100644 index 000000000000..1fdd60f89e1d --- /dev/null +++ b/projects/e2e/CMakeLists.txt @@ -0,0 +1,7 @@ +message(STATUS "Building PyTorch1 compatibility project") + +################################################################################ +# Setup python. +################################################################################ + +add_subdirectory(torch_mlir_e2e_test) From be3b6fdc080860b2e747db16542ca8fd57545804 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 14:32:58 -0700 Subject: [PATCH 03/15] Temporarily remove checking generated sources. Signed-off-by: zjgarvey --- .github/workflows/ci.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a5ffb648783..78b078fcdbec 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -75,8 +75,3 @@ jobs: - name: Integration tests (torch-${{ matrix.torch-version }}) run: | bash build_tools/ci/test_posix.sh ${{ matrix.torch-version }} - - - name: Check generated sources (torch-nightly only) - if: ${{ matrix.torch-version == 'nightly' }} - run: | - bash build_tools/ci/check_generated_sources.sh From b88dbcf33deec27f334079ba966381586b50031d Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 15:34:25 -0700 Subject: [PATCH 04/15] Update test script with new pythonpath. Fix imports in various files. Make config loading localized to prevent errors. Signed-off-by: zjgarvey --- build_tools/ci/test_posix.sh | 2 +- projects/e2e/e2e_testing/main.py | 34 +++++++------------ projects/e2e/e2e_testing/xfail_sets.py | 2 +- .../torch_mlir_e2e_test/configs/__init__.py | 26 ++++++++++---- .../test_suite/__init__.py | 2 -- projects/e2e/torch_mlir_e2e_test/utils.py | 8 +++++ .../python/test/torchscript_e2e_test/basic.py | 2 +- .../compilation_failure.py | 2 +- .../torchscript_e2e_test/error_reports.py | 2 +- .../torchscript_e2e_test/non_tensor_values.py | 2 +- .../torchscript_e2e_test/runtime_failure.py | 2 +- .../test/torchscript_e2e_test/submodule.py | 2 +- projects/pt1/python/torch_mlir/torchscript.py | 7 ++-- 13 files changed, 50 insertions(+), 43 deletions(-) diff --git a/build_tools/ci/test_posix.sh b/build_tools/ci/test_posix.sh index bdeae5c7dfe4..25de2c1a7368 100755 --- a/build_tools/ci/test_posix.sh +++ b/build_tools/ci/test_posix.sh @@ -6,7 +6,7 @@ this_dir="$(cd $(dirname $0) && pwd)" repo_root="$(cd $this_dir/../.. && pwd)" torch_version="${1:-unknown}" -export PYTHONPATH="$repo_root/build/tools/torch-mlir/python_packages/torch_mlir:$repo_root/projects/pt1" +export PYTHONPATH="$repo_root/build/tools/torch-mlir/python_packages/torch_mlir:$repo_root/projects/e2e" echo "::group::Run ONNX e2e integration tests" python3 -m e2e_testing.main --config=onnx -v diff --git a/projects/e2e/e2e_testing/main.py b/projects/e2e/e2e_testing/main.py index ae4986b7f96c..e85dc041eb85 100644 --- a/projects/e2e/e2e_testing/main.py +++ b/projects/e2e/e2e_testing/main.py @@ -17,15 +17,7 @@ # Available test configs. -from torch_mlir_e2e_test.configs import ( - LazyTensorCoreTestConfig, - NativeTorchTestConfig, - OnnxBackendTestConfig, - TorchScriptTestConfig, - TorchDynamoTestConfig, - JITImporterTestConfig, - FxImporterTestConfig, -) +from torch_mlir_e2e_test.configs import load_config from torch_mlir_e2e_test.linalg_on_tensors_backends.refbackend import ( RefBackendLinalgOnTensorsBackend, @@ -150,54 +142,54 @@ def main(): # Find the selected config. if args.config == "linalg": - config = JITImporterTestConfig(RefBackendLinalgOnTensorsBackend()) + config = load_config("JITImporterTestConfig")(RefBackendLinalgOnTensorsBackend()) xfail_set = LINALG_XFAIL_SET crashing_set = LINALG_CRASHING_SET elif args.config == "stablehlo": - config = JITImporterTestConfig(LinalgOnTensorsStablehloBackend(), "stablehlo") + config = load_config("JITImporterTestConfig")(LinalgOnTensorsStablehloBackend(), "stablehlo") xfail_set = all_test_unique_names - STABLEHLO_PASS_SET crashing_set = STABLEHLO_CRASHING_SET elif args.config == "tosa": - config = JITImporterTestConfig(LinalgOnTensorsTosaBackend(), "tosa") + config = load_config("JITImporterTestConfig")(LinalgOnTensorsTosaBackend(), "tosa") xfail_set = all_test_unique_names - TOSA_PASS_SET crashing_set = TOSA_CRASHING_SET elif args.config == "native_torch": - config = NativeTorchTestConfig() + config = load_config("NativeTorchTestConfig")() xfail_set = set() crashing_set = set() elif args.config == "torchscript": - config = TorchScriptTestConfig() + config = load_config("TorchScriptTestConfig")() xfail_set = set() crashing_set = set() elif args.config == "lazy_tensor_core": - config = LazyTensorCoreTestConfig() + config = load_config("LazyTensorCoreTestConfig")() xfail_set = LTC_XFAIL_SET crashing_set = LTC_CRASHING_SET elif args.config == "fx_importer": - config = FxImporterTestConfig(RefBackendLinalgOnTensorsBackend()) + config = load_config("FxImporterTestConfig")(RefBackendLinalgOnTensorsBackend()) xfail_set = FX_IMPORTER_XFAIL_SET crashing_set = FX_IMPORTER_CRASHING_SET elif args.config == "fx_importer_stablehlo": - config = FxImporterTestConfig(LinalgOnTensorsStablehloBackend(), "stablehlo") + config = load_config("FxImporterTestConfig")(LinalgOnTensorsStablehloBackend(), "stablehlo") xfail_set = FX_IMPORTER_STABLEHLO_XFAIL_SET crashing_set = FX_IMPORTER_STABLEHLO_CRASHING_SET elif args.config == "fx_importer_tosa": - config = FxImporterTestConfig(LinalgOnTensorsTosaBackend(), "tosa") + config = load_config("FxImporterTestConfig")(LinalgOnTensorsTosaBackend(), "tosa") xfail_set = FX_IMPORTER_TOSA_XFAIL_SET crashing_set = FX_IMPORTER_TOSA_CRASHING_SET elif args.config == "torchdynamo": # TODO: Enanble runtime verification and extend crashing set. - config = TorchDynamoTestConfig( + config = load_config("TorchDynamoTestConfig")( RefBackendLinalgOnTensorsBackend(generate_runtime_verification=False) ) xfail_set = TORCHDYNAMO_XFAIL_SET crashing_set = TORCHDYNAMO_CRASHING_SET elif args.config == "onnx": - config = OnnxBackendTestConfig(RefBackendLinalgOnTensorsBackend()) + config = load_config("OnnxBackendTestConfig")(RefBackendLinalgOnTensorsBackend()) xfail_set = ONNX_XFAIL_SET crashing_set = ONNX_CRASHING_SET elif args.config == "onnx_tosa": - config = OnnxBackendTestConfig(LinalgOnTensorsTosaBackend(), output_type="tosa") + config = load_config("OnnxBackendTestConfig")(LinalgOnTensorsTosaBackend(), output_type="tosa") xfail_set = ONNX_TOSA_XFAIL_SET crashing_set = ONNX_TOSA_CRASHING_SET diff --git a/projects/e2e/e2e_testing/xfail_sets.py b/projects/e2e/e2e_testing/xfail_sets.py index e4a2e319d7fe..1f735f30bd28 100644 --- a/projects/e2e/e2e_testing/xfail_sets.py +++ b/projects/e2e/e2e_testing/xfail_sets.py @@ -11,7 +11,7 @@ # might be used to keep more elaborate sets of testing configurations). from torch_mlir_e2e_test.test_suite import COMMON_TORCH_MLIR_LOWERING_XFAILS -from torch_mlir._version import torch_version_for_comparison, version +from torch_mlir_e2e_test.utils import torch_version_for_comparison, version print(f"TORCH_VERSION_FOR_COMPARISON =", torch_version_for_comparison()) diff --git a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py index 62e731349a95..08433c7f0380 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py @@ -3,10 +3,22 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # Also available under a BSD-style license. See LICENSE. -from .lazy_tensor_core import LazyTensorCoreTestConfig -from .native_torch import NativeTorchTestConfig -from .onnx_backend import OnnxBackendTestConfig -from .torchscript import TorchScriptTestConfig -from .torchdynamo import TorchDynamoTestConfig -from .jit_importer_backend import JITImporterTestConfig -from .fx_importer_backend import FxImporterTestConfig +__all__ = ["load_config"] + +from importlib import import_module + +CONFIG_LOCATIONS = { + "LazyTensorCoreTestConfig" : "lazy_tensor_core", + "NativeTorchTestConfig" : "native_torch", + "OnnxBackendTestConfig" : "onnx_backend", + "TorchScriptTestConfig" : "torchscript", + "TorchDynamoTestConfig" : "torchdynamo", + "JITImporterTestConfig" : "jit_importer_backend", + "FxImporterTestConfig" : "fx_importer_backend", +} + +def load_config(name: str) -> type: + source = CONFIG_LOCATIONS.get(name) + assert source is not None, f"Could not find TestConfig named {name}." + module = import_module(f'.{source}', __package__) + return getattr(module, name) diff --git a/projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py b/projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py index bd82cd1c11b0..93cdc1560673 100644 --- a/projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py +++ b/projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py @@ -7,8 +7,6 @@ # These represent further work needed in torch-mlir to lower them properly # to the backend contract. -from torch_mlir._version import torch_version_for_comparison, version - COMMON_TORCH_MLIR_LOWERING_XFAILS = { "NativeGroupNormBackwardModule_basic", "QuantizedMLP_basic", diff --git a/projects/e2e/torch_mlir_e2e_test/utils.py b/projects/e2e/torch_mlir_e2e_test/utils.py index 0ab47efa9284..b3a144f37387 100644 --- a/projects/e2e/torch_mlir_e2e_test/utils.py +++ b/projects/e2e/torch_mlir_e2e_test/utils.py @@ -3,6 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # Also available under a BSD-style license. See LICENSE. +from packaging import version +import torch + from torch_mlir.compiler_utils import TensorPlaceholder from torch_mlir_e2e_test.annotations import TORCH_MLIR_ARG_ANNOTATIONS_ATTR_NAME @@ -22,3 +25,8 @@ def convert_annotations_to_placeholders(forward_method): ) placeholders.append(TensorPlaceholder(annotation[0], annotation[1])) return placeholders + + +def torch_version_for_comparison(): + # Ignore +cpu, +cu117m, etc. in comparisons + return version.parse(torch.__version__.split("+", 1)[0]) diff --git a/projects/pt1/python/test/torchscript_e2e_test/basic.py b/projects/pt1/python/test/torchscript_e2e_test/basic.py index 83a73900d6b8..54361f244b21 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/basic.py +++ b/projects/pt1/python/test/torchscript_e2e_test/basic.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs import TorchScriptTestConfig +from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig class MmModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py b/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py index 4aac65df8aae..0d715b5a65a3 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py +++ b/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs import TorchScriptTestConfig +from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig class MmModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/error_reports.py b/projects/pt1/python/test/torchscript_e2e_test/error_reports.py index f6c949c3d828..4aa203f09ef7 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/error_reports.py +++ b/projects/pt1/python/test/torchscript_e2e_test/error_reports.py @@ -12,7 +12,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs import TorchScriptTestConfig +from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig # CHECK: Unexpected outcome summary: # CHECK: FAIL - "ErroneousModule_basic" diff --git a/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py b/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py index 8991229f0a29..f84dc112224c 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py +++ b/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py @@ -12,7 +12,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs import TorchScriptTestConfig +from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig class NonTensorValuesModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py b/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py index b7609bc2c0e2..7bd17139d5f9 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py +++ b/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs import TorchScriptTestConfig +from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig class MmModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/submodule.py b/projects/pt1/python/test/torchscript_e2e_test/submodule.py index ae7ee878bf36..4a882a0270cd 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/submodule.py +++ b/projects/pt1/python/test/torchscript_e2e_test/submodule.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs import TorchScriptTestConfig +from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig class Submodule2(torch.nn.Module): diff --git a/projects/pt1/python/torch_mlir/torchscript.py b/projects/pt1/python/torch_mlir/torchscript.py index cc2163c7ac8d..d0392879bf53 100644 --- a/projects/pt1/python/torch_mlir/torchscript.py +++ b/projects/pt1/python/torch_mlir/torchscript.py @@ -22,11 +22,8 @@ lower_mlir_module, TensorPlaceholder, ) -try: - from torch_mlir.jit_ir_importer import ClassAnnotator, ImportOptions, ModuleBuilder - from torch_mlir.jit_ir_importer.build_tools.library_generator import generate_library -except ModuleNotFoundError as e: - warn("torch_mlir.torchscript.compile relies on jit_ir_importer. Please build with `TORCH_MLIR_ENABLE_JIT_IR_IMPORTER=ON`.") +from torch_mlir.jit_ir_importer import ClassAnnotator, ImportOptions, ModuleBuilder +from torch_mlir.jit_ir_importer.build_tools.library_generator import generate_library _example_arg = Union[TensorPlaceholder, torch.Tensor] _example_args_for_one_method = Union[_example_arg, Sequence[_example_arg]] From df8814ce01840ee0fc36931f66a82e916bf1c493 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 15:50:14 -0700 Subject: [PATCH 05/15] Sync xfails Signed-off-by: zjgarvey --- projects/e2e/e2e_testing/xfail_sets.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/projects/e2e/e2e_testing/xfail_sets.py b/projects/e2e/e2e_testing/xfail_sets.py index 1f735f30bd28..b9a505a1efa2 100644 --- a/projects/e2e/e2e_testing/xfail_sets.py +++ b/projects/e2e/e2e_testing/xfail_sets.py @@ -497,7 +497,6 @@ "CrossEntropyLossModule_basic", "CrossEntropyLossNoReductionModule_basic", "IsInfiniteModule_basic", - "InterpolateDynamicModule_sizes_nearest", "IouOfModule_basic", "MeshgridIndexingIJ_basic", "MeshgridIndexingXY_basic", @@ -915,8 +914,12 @@ "TraceUnsignedIntModule_empty", "UnsafeIndexPutHackedTwin1DFloatNonAccumulateModule_basic", "UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic", + "UpSampleNearest1dVecNoneScales_basic", + "UpSampleNearest1dVecNoneShape_basic", "UpSampleNearest2dBackwardScalesNone_basic", "UpSampleNearest2dBackward_basic", + "UpSampleNearest2dVecNoneScales_basic", + "UpSampleNearest2dVecNoneShape_basic", "ViewCollapseDynamicWithAtenSizeIntModule_basic", "ViewSizeFromOtherTensor_basic", # Error: `aten.as_strided` op is not supported @@ -3041,6 +3044,8 @@ "LogCumsumExpModule_basic", "LogCumsumExpStaticNegativeDimModule_basic", "LogCumsumExpStaticFloat64DtypeModule_basic", + "MaxPool1dWithIndicesModule_basic", + "MaxPool1dWithIndicesCeilModeModule_basic", "MaxPool1dCeilModeTrueModule_basic", "MaxPool1dModule_basic", "MaxPool2dCeilModeTrueModule_basic", @@ -3803,6 +3808,8 @@ "LogCumsumExpStaticNegativeDimModule_basic", "LogCumsumExpStaticFloat64DtypeModule_basic", "MaskedScatterStaticBasic_basic", + "MaxPool1dWithIndicesModule_basic", + "MaxPool1dWithIndicesCeilModeModule_basic", "MaxPool1dCeilModeTrueModule_basic", "MaxPool1dModule_basic", "MaxPool2dCeilModeTrueModule_basic", @@ -3956,8 +3963,13 @@ "TransposedConv2dNegativePadding_basic", "TransposedConv3dNegativePadding_basic", "UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic", + "InterpolateDynamicModule_sizes_nearest", + "UpSampleNearest1dVecNoneScales_basic", + "UpSampleNearest1dVecNoneShape_basic", "UpSampleNearest2dBackwardScalesNone_basic", "UpSampleNearest2dBackward_basic", + "UpSampleNearest2dVecNoneScales_basic", + "UpSampleNearest2dVecNoneShape_basic", "ViewCollapseDynamicWithAtenSizeIntModule_basic", "ViewSizeFromOtherTensor_basic", "VisionTransformerModule_basic", @@ -4656,6 +4668,8 @@ "Matmul_4d", "Matmul_matvec", "Matmul_vecmat", + "MaxPool1dWithIndicesModule_basic", + "MaxPool1dWithIndicesCeilModeModule_basic", "MaxPool1dCeilModeTrueModule_basic", "MaxPool1dModule_basic", "MaxPool2dCeilModeTrueModule_basic", From 66c52695983f0e06a8a74996918282a26077a7b5 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 15:51:25 -0700 Subject: [PATCH 06/15] Fix a few test files Signed-off-by: zjgarvey --- .../torch_mlir_e2e_test/test_suite/conv.py | 88 + .../torch_mlir_e2e_test/test_suite/pooling.py | 49 + .../torch_mlir_e2e_test/test_suite/conv.py | 1973 ----------- .../torch_mlir_e2e_test/test_suite/pooling.py | 3087 ----------------- 4 files changed, 137 insertions(+), 5060 deletions(-) delete mode 100644 projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py delete mode 100644 projects/pt1/python/torch_mlir_e2e_test/test_suite/pooling.py diff --git a/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py b/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py index 2ec87b9fee43..b9dc855b7c0a 100644 --- a/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/e2e/torch_mlir_e2e_test/test_suite/conv.py @@ -1088,6 +1088,94 @@ def UpSampleNearest2dStaticFactor_basic(module, tu: TestUtils): module.forward(tu.rand(2, 3, 4, 4)) +class UpSampleNearest2dVecNoneShape(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float64, True), + ] + ) + def forward(self, input): + return torch.ops.aten.upsample_nearest2d.vec( + input, output_size=None, scale_factors=[3.66, 4.2] + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2dVecNoneShape()) +def UpSampleNearest2dVecNoneShape_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 6, 12).to(torch.float64)) + + +class UpSampleNearest2dVecNoneScales(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float64, True), + ] + ) + def forward(self, input): + return torch.ops.aten.upsample_nearest2d.vec( + input, + output_size=[18, 48], + scale_factors=None, + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest2dVecNoneScales()) +def UpSampleNearest2dVecNoneScales_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 6, 12).to(torch.float64)) + + +class UpSampleNearest1dVecNoneShape(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float64, True), + ] + ) + def forward(self, input): + return torch.ops.aten.upsample_nearest1d.vec( + input, output_size=None, scale_factors=[3.0] + ) + + +@register_test_case(module_factory=lambda: UpSampleNearest1dVecNoneShape()) +def UpSampleNearest1dVecNoneShape_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 6).to(torch.float64)) + + +class UpSampleNearest1dVecNoneScales(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float64, True), + ] + ) + def forward(self, input): + return torch.ops.aten.upsample_nearest1d.vec(input, [18], None) + + +@register_test_case(module_factory=lambda: UpSampleNearest1dVecNoneScales()) +def UpSampleNearest1dVecNoneScales_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 6).to(torch.float64)) + + class Conv1dModule(torch.nn.Module): def __init__(self): super().__init__() diff --git a/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py b/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py index bb7f386f3708..b31090538aa5 100644 --- a/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py +++ b/projects/e2e/torch_mlir_e2e_test/test_suite/pooling.py @@ -180,6 +180,55 @@ def AdaptiveAvgPool2dUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): # ============================================================================== +class MaxPool1dWithIndicesModule(torch.nn.Module): + + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool1d_with_indices( + x, kernel_size=[6], stride=[2], padding=[3], dilation=2, ceil_mode=False + ) + + +@register_test_case(module_factory=lambda: MaxPool1dWithIndicesModule()) +def MaxPool1dWithIndicesModule_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 64, 112, low=-1)) + + +class MaxPool1dWithIndicesCeilModeModule(torch.nn.Module): + + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return torch.ops.aten.max_pool1d_with_indices( + x, kernel_size=[4], stride=[2], padding=[2], dilation=2, ceil_mode=True + ) + + +@register_test_case(module_factory=lambda: MaxPool1dWithIndicesCeilModeModule()) +def MaxPool1dWithIndicesCeilModeModule_basic(module, tu: TestUtils): + module.forward(tu.rand(3, 25, 37, low=-1)) + + +# ============================================================================== + + class MaxPool1dModule(torch.nn.Module): def __init__(self): diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py deleted file mode 100644 index b9dc855b7c0a..000000000000 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ /dev/null @@ -1,1973 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# Also available under a BSD-style license. See LICENSE. - -import torch -from torch_mlir_e2e_test.framework import TestUtils -from torch_mlir_e2e_test.registry import register_test_case -from torch_mlir_e2e_test.annotations import annotate_args, export - -# ============================================================================== - - -class Conv2dNoPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d(2, 10, 3, bias=False) - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv2dNoPaddingModule()) -def Conv2dNoPaddingModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10, 20) - module.forward(t) - - -class Conv2dBiasNoPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d(2, 10, 3, bias=True) - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv2dBiasNoPaddingModule()) -def Conv2dBiasNoPaddingModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10, 20) - module.forward(t) - - -class Conv2dWithPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding=3) - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv2dWithPaddingModule()) -def Conv2dWithPaddingModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10, 20) - module.forward(t) - - -class Conv2dWithPaddingDilationStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d( - in_channels=2, - out_channels=10, - kernel_size=3, - padding=3, - stride=2, - dilation=3, - bias=False, - ) - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv2dWithPaddingDilationStrideModule()) -def Conv2dWithPaddingDilationStrideModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10, 20) - module.forward(t) - - -class Conv2dWithPaddingDilationStrideStaticModule(torch.nn.Module): - def __init__(self, out_channels, groups): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d( - in_channels=4, - out_channels=out_channels, - kernel_size=3, - padding=3, - stride=2, - dilation=3, - bias=False, - groups=groups, - ) - self.train(False) - - @export - @annotate_args( - [ - None, - ([5, 4, 10, 20], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case( - module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( - out_channels=10, groups=1 - ) -) -def Conv2dWithPaddingDilationStrideStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 4, 10, 20)) - - -@register_test_case( - module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( - out_channels=4, groups=4 - ) -) -def Conv2dWithPaddingDilationStrideStaticModule_depthwise(module, tu: TestUtils): - module.forward(tu.rand(5, 4, 10, 20)) - - -@register_test_case( - module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( - out_channels=8, groups=4 - ) -) -def Conv2dWithPaddingDilationStrideStaticModule_depthwise_multiplier( - module, tu: TestUtils -): - module.forward(tu.rand(5, 4, 10, 20)) - - -@register_test_case( - module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( - out_channels=4, groups=2 - ) -) -def Conv2dWithPaddingDilationStrideStaticModule_grouped(module, tu: TestUtils): - module.forward(tu.rand(5, 4, 10, 20)) - - -@register_test_case( - module_factory=lambda: Conv2dWithPaddingDilationStrideStaticModule( - out_channels=8, groups=2 - ) -) -def Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier( - module, tu: TestUtils -): - module.forward(tu.rand(5, 4, 10, 20)) - - -class Conv2dWithSamePaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding="same") - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv2dWithSamePaddingModule()) -def Conv2dWithSamePaddingModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10, 20) - module.forward(t) - - -class Conv2dWithValidPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding="valid") - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv2dWithValidPaddingModule()) -def Conv2dWithValidPaddingModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10, 20) - module.forward(t) - - -# ============================================================================== - - -class Convolution2DModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Convolution2DModule()) -def Convolution2DModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class Convolution2DStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([3, 3, 10, 10], torch.float32, True), - ([3, 3, 2, 2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Convolution2DStaticModule()) -def Convolution2DStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class Convolution2DStridedModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Convolution2DStridedModule()) -def Convolution2DStridedModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _Convolution2DAllFalseModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=False, - cudnn_enabled=False, - allow_tf32=False, - ) - - -@register_test_case(module_factory=lambda: _Convolution2DAllFalseModule()) -def _Convolution2DAllFalseModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _Convolution2DBenchmarkModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=True, - deterministic=False, - cudnn_enabled=False, - allow_tf32=False, - ) - - -@register_test_case(module_factory=lambda: _Convolution2DBenchmarkModule()) -def _Convolution2DBenchmarkModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _Convolution2DDeterministicModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=True, - cudnn_enabled=False, - allow_tf32=False, - ) - - -@register_test_case(module_factory=lambda: _Convolution2DDeterministicModule()) -def _Convolution2DDeterministicModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _Convolution2DCudnnModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=False, - cudnn_enabled=True, - allow_tf32=False, - ) - - -@register_test_case(module_factory=lambda: _Convolution2DCudnnModule()) -def _Convolution2DCudnnModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _Convolution2DTF32Module(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=False, - cudnn_enabled=False, - allow_tf32=True, - ) - - -@register_test_case(module_factory=lambda: _Convolution2DTF32Module()) -def _Convolution2DTF32Module_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _ConvolutionDeprecated2DAllFalseModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=False, - cudnn_enabled=False, - ) - - -@register_test_case(module_factory=lambda: _ConvolutionDeprecated2DAllFalseModule()) -def _ConvolutionDeprecated2DAllFalseModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _ConvolutionDeprecated2DBenchmarkModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=True, - deterministic=False, - cudnn_enabled=False, - ) - - -@register_test_case(module_factory=lambda: _ConvolutionDeprecated2DBenchmarkModule()) -def _ConvolutionDeprecated2DBenchmarkModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _ConvolutionDeprecated2DDeterministicModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=True, - cudnn_enabled=False, - ) - - -@register_test_case( - module_factory=lambda: _ConvolutionDeprecated2DDeterministicModule() -) -def _ConvolutionDeprecated2DDeterministicModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class _ConvolutionDeprecated2DCudnnModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten._convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=1, - benchmark=False, - deterministic=False, - cudnn_enabled=True, - ) - - -@register_test_case(module_factory=lambda: _ConvolutionDeprecated2DCudnnModule()) -def _ConvolutionDeprecated2DCudnnModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 10, 10), tu.rand(3, 3, 2, 2)) - - -class ConvolutionModule2DGroups(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[3, 3], - padding=[2, 2], - dilation=[1, 1], - transposed=False, - output_padding=[0, 0], - groups=4, - ) - - -@register_test_case(module_factory=lambda: ConvolutionModule2DGroups()) -def ConvolutionModule2DGroups_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 32, 4, 4), tu.rand(32, 8, 3, 3)) - - -# ============================================================================== - - -class ConvolutionModule2DTranspose(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[1, 1], - padding=[1, 1], - dilation=[1, 1], - transposed=True, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: ConvolutionModule2DTranspose()) -def ConvolutionModule2DTranspose_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 4, 4), tu.rand(3, 3, 2, 2)) - - -class ConvolutionModule2DTransposeStrided(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - transposed=True, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: ConvolutionModule2DTransposeStrided()) -def ConvolutionModule2DTransposeStrided_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) - - -class ConvolutionModule2DTransposeStridedStatic(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([5, 2, 5, 6], torch.float32, True), - ([2, 5, 2, 2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - transposed=True, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: ConvolutionModule2DTransposeStridedStatic()) -def ConvolutionModule2DTransposeStridedStatic_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) - - -class ConvolutionModule2DTransposeNonUnitOutputPadding(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=None, - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - transposed=True, - output_padding=[1, 1], - groups=1, - ) - - -@register_test_case( - module_factory=lambda: ConvolutionModule2DTransposeNonUnitOutputPadding() -) -def ConvolutionModule2DTransposeNonUnitOutputPadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 2, 4, 4), tu.rand(2, 2, 3, 3)) - - -class Conv_Transpose1dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv_transpose1d( - inputVec, - weight, - bias=None, - stride=[2], - padding=[1], - dilation=[1], - output_padding=[0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv_Transpose1dModule()) -def Conv_Transpose1dModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 6), tu.rand(2, 5, 2)) - - -class Conv_Transpose1dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([5, 2, 6], torch.float32, True), - ([2, 5, 2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv_transpose1d( - inputVec, - weight, - bias=None, - stride=[2], - padding=[1], - dilation=[1], - output_padding=[0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv_Transpose1dStaticModule()) -def Conv_Transpose1dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 6), tu.rand(2, 5, 2)) - - -class Conv_Transpose2dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv_transpose2d( - inputVec, - weight, - bias=None, - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv_Transpose2dModule()) -def Conv_Transpose2dModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) - - -class Conv_Transpose2dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([5, 2, 5, 6], torch.float32, True), - ([2, 5, 2, 2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv_transpose2d( - inputVec, - weight, - bias=None, - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv_Transpose2dStaticModule()) -def Conv_Transpose2dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 5, 6), tu.rand(2, 5, 2, 2)) - - -class Conv_Transpose3dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv_transpose3d( - inputVec, - weight, - bias=None, - stride=[2, 2, 2], - padding=[1, 1, 1], - dilation=[1, 1, 1], - output_padding=[0, 0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv_Transpose3dModule()) -def Conv_Transpose3dModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 5, 6, 7), tu.rand(2, 5, 2, 2, 2)) - - -class Conv_Transpose3dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([5, 2, 5, 6, 7], torch.float32, True), - ([2, 5, 2, 2, 2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv_transpose3d( - inputVec, - weight, - bias=None, - stride=[2, 2, 2], - padding=[1, 1, 1], - dilation=[1, 1, 1], - output_padding=[0, 0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv_Transpose3dStaticModule()) -def Conv_Transpose3dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 2, 5, 6, 7), tu.rand(2, 5, 2, 2, 2)) - - -class UpSampleNearest2d(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float64, True), - ] - ) - def forward(self, input): - return torch.ops.aten.upsample_nearest2d( - input, output_size=[18, 48], scales_h=3.0, scales_w=4.0 - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2d()) -def UpSampleNearest2d_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 6, 12).to(torch.float64)) - - -class UpSampleNearest2dSameSize(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec): - return torch._C._nn.upsample_nearest2d( - inputVec, output_size=[11, 11], scales_h=None, scales_w=None - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2dSameSize()) -def UpSampleNearest2dStaticSize_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4)) - - -class UpSampleNearest2dDiffSize(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, inputVec): - return torch._C._nn.upsample_nearest2d( - inputVec, output_size=[8, 11], scales_h=None, scales_w=None - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2dDiffSize()) -def UpSampleNearest2dDynamicSize_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 3, 2, 2)) - - -class UpSampleNearest2dDiffFactor(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, inputVec): - return torch._C._nn.upsample_nearest2d( - inputVec, output_size=[6, 10], scales_h=2.3, scales_w=4.7 - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2dDiffFactor()) -def UpSampleNearest2dDynamicFactor_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 3, 2, 2)) - - -class UpSampleNearest2dSameFactor(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, inputVec): - return torch._C._nn.upsample_nearest2d( - inputVec, output_size=[8, 8], scales_h=2.0, scales_w=2.0 - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2dSameFactor()) -def UpSampleNearest2dStaticFactor_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 3, 4, 4)) - - -class UpSampleNearest2dVecNoneShape(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float64, True), - ] - ) - def forward(self, input): - return torch.ops.aten.upsample_nearest2d.vec( - input, output_size=None, scale_factors=[3.66, 4.2] - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2dVecNoneShape()) -def UpSampleNearest2dVecNoneShape_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 6, 12).to(torch.float64)) - - -class UpSampleNearest2dVecNoneScales(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float64, True), - ] - ) - def forward(self, input): - return torch.ops.aten.upsample_nearest2d.vec( - input, - output_size=[18, 48], - scale_factors=None, - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest2dVecNoneScales()) -def UpSampleNearest2dVecNoneScales_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 6, 12).to(torch.float64)) - - -class UpSampleNearest1dVecNoneShape(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float64, True), - ] - ) - def forward(self, input): - return torch.ops.aten.upsample_nearest1d.vec( - input, output_size=None, scale_factors=[3.0] - ) - - -@register_test_case(module_factory=lambda: UpSampleNearest1dVecNoneShape()) -def UpSampleNearest1dVecNoneShape_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 6).to(torch.float64)) - - -class UpSampleNearest1dVecNoneScales(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float64, True), - ] - ) - def forward(self, input): - return torch.ops.aten.upsample_nearest1d.vec(input, [18], None) - - -@register_test_case(module_factory=lambda: UpSampleNearest1dVecNoneScales()) -def UpSampleNearest1dVecNoneScales_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 6).to(torch.float64)) - - -class Conv1dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv1d( - inputVec, weight, bias=bias, stride=[1], padding=[0], dilation=[1], groups=1 - ) - - -@register_test_case(module_factory=lambda: Conv1dModule()) -def Conv1dModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6) - weight = torch.randn(8, 2, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class Conv1dDepthwiseWithPaddingDilationStrideStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([2, 4, 6], torch.float32, True), - ([4, 1, 3], torch.float32, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv1d( - inputVec, weight, bias=None, stride=[1], padding=[4], dilation=[1], groups=4 - ) - - -@register_test_case( - module_factory=lambda: Conv1dDepthwiseWithPaddingDilationStrideStaticModule() -) -def Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 4, 6) - weight = torch.randn(4, 1, 3) - module.forward(inputVec, weight) - - -class Conv1dWithSamePaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - torch.manual_seed(0) - self.conv = torch.nn.Conv1d(2, 10, 3, bias=False, padding="same") - self.train(False) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.conv(x) - - -@register_test_case(module_factory=lambda: Conv1dWithSamePaddingModule()) -def Conv1dWithSamePaddingModule_basic(module, tu: TestUtils): - t = tu.rand(5, 2, 10) - module.forward(t) - - -class Conv1dWithValidPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv1d( - inputVec, - weight, - bias=bias, - stride=[1], - padding="valid", - dilation=[1], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv1dWithValidPaddingModule()) -def Conv1dWithValidPaddingModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6) - weight = torch.randn(8, 2, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class Conv1dGroupModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv1d( - inputVec, weight, bias=bias, stride=[1], padding=[0], dilation=[1], groups=2 - ) - - -@register_test_case(module_factory=lambda: Conv1dGroupModule()) -def Conv1dGroupModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 4, 6) - weight = torch.randn(8, 2, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class Conv2dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv2d( - inputVec, - weight, - bias=bias, - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv2dModule()) -def Conv2dModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6, 6) - weight = torch.randn(8, 2, 3, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class Conv2dFP16NoBiasModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float16, True), - ([-1, -1, -1, -1], torch.float16, True), - ] - ) - def forward(self, inputVec, weight): - return torch.ops.aten.conv2d( - inputVec, - weight, - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv2dFP16NoBiasModule()) -def Conv2dFP16NoBiasModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6, 6).to(torch.float16) - weight = torch.randn(8, 2, 3, 3).to(torch.float16) - module.forward(inputVec, weight) - - -class Conv3dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv3d( - inputVec, - weight, - bias=bias, - stride=[1, 1, 1], - padding=[0, 0, 0], - dilation=[1, 1, 1], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv3dModule()) -def Conv3dModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6, 6, 6) - weight = torch.randn(8, 2, 3, 3, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class Conv3dWithSamePaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv3d( - inputVec, - weight, - bias=bias, - stride=[1, 1, 1], - padding="same", - dilation=[1, 1, 1], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv3dWithSamePaddingModule()) -def Conv3dWithSamePaddingModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6, 6, 6) - weight = torch.randn(8, 2, 3, 3, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class Conv3dWithValidPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.conv3d( - inputVec, - weight, - bias=bias, - stride=[1, 1, 1], - padding="valid", - dilation=[1, 1, 1], - groups=1, - ) - - -@register_test_case(module_factory=lambda: Conv3dWithValidPaddingModule()) -def Conv3dWithValidPaddingModule_basic(module, tu: TestUtils): - inputVec = tu.rand(2, 2, 6, 6, 6) - weight = torch.randn(8, 2, 3, 3, 3) - bias = torch.randn(8) - module.forward(inputVec, weight, bias) - - -class ConvTbcModule(torch.nn.Module): - def __init__(self): - super().__init__() - - # shapes from https://github.com/pytorch/pytorch/blob/3e8c8ce37bbfaafa8581fb48506c0a70ea54463d/test/nn/test_convolution.py#L623 - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1], torch.float32, True), - ] - ) - def forward(self, x, weight, bias): - return torch.conv_tbc(x, weight, bias) - - -@register_test_case(module_factory=lambda: ConvTbcModule()) -def ConvTbcModule_basic(module, tu: TestUtils): - module.forward(tu.rand(9, 4, 5), tu.rand(3, 5, 6), tu.rand(6)) - - -# For DQ-Q fake quantization ops -import torch.ao.quantization.fx._decomposed - - -class Conv2dQInt8ModuleBase(torch.nn.Module): - def __init__(self, groups=1): - self.groups = groups - super().__init__() - - def _forward(self, input, weight, bias): - input = torch.ops.quantized_decomposed.dequantize_per_tensor.default( - input, 0.01, 7, -128, 127, torch.int8 - ) - weight = torch.ops.quantized_decomposed.dequantize_per_tensor.default( - weight, 0.01, 3, -128, 127, torch.int8 - ) - bias = torch.ops.quantized_decomposed.dequantize_per_tensor.default( - bias, 1, 0, -1000, 1000, torch.int32 - ) - - conv = torch.ops.aten.conv2d( - input, - weight, - bias=bias, - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - groups=self.groups, - ) - - # Use int32 to avoid overflows - return torch.ops.quantized_decomposed.quantize_per_tensor.default( - conv, 1, 0, -(2**31), 2**31 - 1, torch.int32 - ) - - -class Conv2dQInt8ModuleDyn(Conv2dQInt8ModuleBase): - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.int8, True), - ([-1, -1, -1, -1], torch.int8, True), - ([-1], torch.int32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return self._forward(inputVec, weight, bias) - - -class Conv2dQInt8ModuleStatic(Conv2dQInt8ModuleBase): - @export - @annotate_args( - [ - None, - ([2, 3, 12, 12], torch.int8, True), - ([3, 1, 5, 3], torch.int8, True), - ([3], torch.int32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return self._forward(inputVec, weight, bias) - - -class Conv2dQInt8ModuleStatic_MoreOutChannels(Conv2dQInt8ModuleBase): - @export - @annotate_args( - [ - None, - ([2, 3, 12, 12], torch.int8, True), - ([6, 1, 5, 3], torch.int8, True), - ([6], torch.int32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return self._forward(inputVec, weight, bias) - - -@register_test_case(module_factory=lambda: Conv2dQInt8ModuleDyn()) -def Conv2dQInt8Module_basic(module, tu: TestUtils): - inputVec = tu.randint(2, 4, 7, 8, low=-128, high=127).to(torch.int8) - weight = tu.randint(3, 4, 3, 2, low=-128, high=127).to(torch.int8) - bias = tu.randint(3, low=-1000, high=1000).to(torch.int32) - module.forward(inputVec, weight, bias) - - -@register_test_case(module_factory=lambda: Conv2dQInt8ModuleDyn(groups=2)) -def Conv2dQInt8Module_grouped(module, tu: TestUtils): - inputVec = tu.randint(2, 8, 7, 8, low=-128, high=127).to(torch.int8) - weight = tu.randint(6, 4, 3, 2, low=-128, high=127).to(torch.int8) - bias = tu.randint(6, low=-1000, high=1000).to(torch.int32) - module.forward(inputVec, weight, bias) - - -@register_test_case(module_factory=lambda: Conv2dQInt8ModuleStatic(groups=3)) -def Conv2dQInt8Module_depthwise(module, tu: TestUtils): - inputVec = tu.randint(2, 3, 12, 12, low=-128, high=127).to(torch.int8) - weight = tu.randint(3, 1, 5, 3, low=-128, high=127).to(torch.int8) - bias = tu.randint(3, low=-1000, high=1000).to(torch.int32) - module.forward(inputVec, weight, bias) - - -@register_test_case( - module_factory=lambda: Conv2dQInt8ModuleStatic_MoreOutChannels(groups=3) -) -def Conv2dQInt8Module_not_depthwise(module, tu: TestUtils): - inputVec = tu.randint(2, 3, 12, 12, low=-128, high=127).to(torch.int8) - weight = tu.randint(6, 1, 5, 3, low=-128, high=127).to(torch.int8) - bias = tu.randint(6, low=-1000, high=1000).to(torch.int32) - module.forward(inputVec, weight, bias) - - -class ConvTranspose2DQInt8Module(torch.nn.Module): - - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.int8, True), - ([-1, -1, -1, -1], torch.int8, True), - ([-1], torch.float, True), - ] - ) - def forward(self, input, weight, bias): - input = torch.ops.quantized_decomposed.dequantize_per_tensor.default( - input, 0.01, -25, -128, 127, torch.int8 - ) - weight = torch.ops.quantized_decomposed.dequantize_per_tensor.default( - weight, 0.01, 50, -128, 127, torch.int8 - ) - - res = torch.ops.aten.convolution( - input, - weight, - bias=bias, - stride=[2, 1], - padding=[1, 1], - dilation=[1, 1], - transposed=True, - output_padding=[0, 0], - groups=1, - ) - - # Use int32 to avoid overflows - return torch.ops.quantized_decomposed.quantize_per_tensor.default( - res, 1, 0, -(2**31), 2**31 - 1, torch.int32 - ) - - -@register_test_case(module_factory=lambda: ConvTranspose2DQInt8Module()) -def ConvTranspose2DQInt8_basic(module, tu: TestUtils): - N = 10 - Cin = 5 - Cout = 7 - Hin = 10 - Win = 8 - Hker = 3 - Wker = 2 - module.forward( - tu.randint(N, Cin, Hin, Win, low=-128, high=127).to(torch.int8), - tu.randint(Cin, Cout, Hker, Wker, low=-128, high=127).to(torch.int8), - torch.rand(Cout), - ) - - -class Conv2dQInt8PerChannelModuleBase(torch.nn.Module): - def __init__(self, groups=1): - self.groups = groups - super().__init__() - - def _forward(self, inputVec, weight, scales, zeropoints, bias): - inputVec = torch.ops.quantized_decomposed.dequantize_per_tensor.default( - inputVec, 0.01, 7, -128, 127, torch.int8 - ) - weight = torch.ops.quantized_decomposed.dequantize_per_channel.default( - weight, scales, zeropoints, 0, -128, 127, torch.int8 - ) - - conv = torch.ops.aten.conv2d( - inputVec, - weight, - bias=bias, - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - groups=self.groups, - ) - - # Use int32 to avoid overflows - return torch.ops.quantized_decomposed.quantize_per_tensor.default( - conv, 1, 0, -(2**31), 2**31 - 1, torch.int32 - ) - - -class Conv2dQInt8PerChannelModuleDyn(Conv2dQInt8PerChannelModuleBase): - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.int8, True), - ([-1, -1, -1, -1], torch.int8, True), - ([-1], torch.float, True), - ([-1], torch.int8, True), - ([-1], torch.float, True), - ] - ) - def forward(self, inputVec, weight, scales, zeropoints, bias): - return self._forward(inputVec, weight, scales, zeropoints, bias) - - -class Conv2dQInt8PerChannelModuleStatic(Conv2dQInt8PerChannelModuleBase): - @export - @annotate_args( - [ - None, - ([2, 3, 12, 12], torch.int8, True), - ([3, 1, 5, 3], torch.int8, True), - ([3], torch.float, True), - ([3], torch.int8, True), - ([3], torch.float, True), - ] - ) - def forward(self, inputVec, weight, scales, zeropoints, bias): - return self._forward(inputVec, weight, scales, zeropoints, bias) - - -@register_test_case(module_factory=lambda: Conv2dQInt8PerChannelModuleDyn()) -def Conv2dQInt8PerChannelModule_basic(module, tu: TestUtils): - inputVec = tu.randint(2, 4, 7, 8, low=-128, high=127).to(torch.int8) - weight = tu.randint(3, 4, 3, 2, low=-128, high=127).to(torch.int8) - scales = tu.rand(3) - zeropoints = tu.rand(3).to(torch.int8) - bias = torch.rand(3) - module.forward(inputVec, weight, scales, zeropoints, bias) - - -@register_test_case(module_factory=lambda: Conv2dQInt8PerChannelModuleDyn(groups=2)) -def Conv2dQInt8PerChannelModule_grouped(module, tu: TestUtils): - inputVec = tu.randint(2, 8, 7, 8, low=-128, high=127).to(torch.int8) - weight = tu.randint(6, 4, 3, 2, low=-128, high=127).to(torch.int8) - scales = tu.rand(6) - zeropoints = tu.rand(6).to(torch.int8) - bias = torch.rand(6) - module.forward(inputVec, weight, scales, zeropoints, bias) - - -@register_test_case(module_factory=lambda: Conv2dQInt8PerChannelModuleStatic(groups=3)) -def Conv2dQInt8PerChannelModule_depthwise(module, tu: TestUtils): - inputVec = tu.randint(2, 3, 12, 12, low=-128, high=127).to(torch.int8) - weight = tu.randint(3, 1, 5, 3, low=-128, high=127).to(torch.int8) - scales = tu.rand(3) - zeropoints = tu.rand(3).to(torch.int8) - bias = torch.rand(3) - module.forward(inputVec, weight, scales, zeropoints, bias) - - -# torchvision.deform_conv2d - -import torchvision - -# This section defines a torch->onnx path for this torchvision op so we can test the onnx paths e2e. - -# Create symbolic function -from torch.onnx.symbolic_helper import parse_args, _get_tensor_sizes - - -@parse_args("v", "v", "v", "v", "v", "i", "i", "i", "i", "i", "i", "i", "i", "b") -def symbolic_deform_conv2d_forward( - g, - input, - weight, - offset, - mask, - bias, - stride_h, - stride_w, - pad_h, - pad_w, - dilation_h, - dilation_w, - groups, - offset_groups, - use_mask, -): - args = [input, weight, offset, bias] - if use_mask: - args.append(mask) - weight_size = _get_tensor_sizes(weight) - kwargs = { - "dilations_i": [dilation_h, dilation_w], - "group_i": groups, - "kernel_shape_i": weight_size[2:], - "offset_group_i": offset_groups, - # NB: ONNX supports asymmetric padding, whereas PyTorch supports only - # symmetric padding - "pads_i": [pad_h, pad_w, pad_h, pad_w], - "strides_i": [stride_h, stride_w], - } - return g.op("DeformConv", *args, **kwargs) - - -# Register symbolic function -from torch.onnx import register_custom_op_symbolic - -register_custom_op_symbolic( - "torchvision::deform_conv2d", symbolic_deform_conv2d_forward, 19 -) - -N = 1 -Cin = 1 -Hin = 7 -Win = 6 -Cout = 1 -Hker = 2 -Wker = 2 -offset_groups = 1 -Hout = 6 -Wout = 5 -offset_dim1 = 2 * offset_groups * Hker * Wker - - -class DeformableConvModule(torch.nn.Module): - @export - @annotate_args( - [ - None, - ([N, Cin, Hin, Win], torch.float32, True), - ([N, offset_dim1, Hout, Wout], torch.float32, True), - ([Cout, Cin, Hker, Wker], torch.float32, True), - ] - ) - def forward(self, input, offset, weight): - return torchvision.ops.deform_conv2d(input, offset, weight) - - -@register_test_case(module_factory=lambda: DeformableConvModule()) -def DeformConv2D_basic(module, tu: TestUtils): - input = tu.rand(N, Cin, Hin, Win) - offset = tu.rand(N, offset_dim1, Hout, Wout) - weight = tu.rand(Cout, Cin, Hker, Wker) - module.forward(input, offset, weight) - - -class ConvolutionModule2DGroupedTranspose(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([1, 2, 5, 7], torch.float32, True), - ([2, 2, 3, 3], torch.float32, True), - ([4], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=bias, - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - transposed=True, - output_padding=[0, 0], - groups=2, - ) - - -@register_test_case(module_factory=lambda: ConvolutionModule2DGroupedTranspose()) -def ConvolutionModule2DGroupedTranspose_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 2, 5, 7), tu.rand(2, 2, 3, 3), tu.rand(4)) - - -class TransposedConv1dNegativePadding(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([1, 1, 7], torch.float32, True), - ([1, 2, 3], torch.float32, True), - ([2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=bias, - stride=[1], - padding=[3], - dilation=[1], - transposed=True, - output_padding=[0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: TransposedConv1dNegativePadding()) -def TransposedConv1dNegativePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) - - -class TransposedConv2dNegativePadding(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 7], torch.float32, True), - ([1, 2, 3, 3], torch.float32, True), - ([2], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=bias, - stride=[1, 1], - padding=[0, 3], - dilation=[1, 1], - transposed=True, - output_padding=[0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: TransposedConv2dNegativePadding()) -def TransposedConv2dNegativePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 7), tu.rand(1, 2, 3, 3), tu.rand(2)) - - -class TransposedConv3dNegativePadding(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([4, 1, 8, 13, 17], torch.float32, True), - ([1, 1, 3, 7, 3], torch.float32, True), - ([1], torch.float32, True), - ] - ) - def forward(self, inputVec, weight, bias): - return torch.ops.aten.convolution( - inputVec, - weight, - bias=bias, - stride=[1, 1, 1], - padding=[2, 1, 3], - dilation=[1, 1, 1], - transposed=True, - output_padding=[0, 0, 0], - groups=1, - ) - - -@register_test_case(module_factory=lambda: TransposedConv3dNegativePadding()) -def TransposedConv3dNegativePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(4, 1, 8, 13, 17), tu.rand(1, 1, 3, 7, 3), tu.rand(1)) diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/pooling.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/pooling.py deleted file mode 100644 index b31090538aa5..000000000000 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/pooling.py +++ /dev/null @@ -1,3087 +0,0 @@ -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# Also available under a BSD-style license. See LICENSE. - -import torch - -from torch_mlir_e2e_test.framework import TestUtils -from torch_mlir_e2e_test.registry import register_test_case -from torch_mlir_e2e_test.annotations import annotate_args, export - -# ============================================================================== - - -class AdaptiveAvgPool2dNonUnitOutputSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((7, 7)) - - @export - @annotate_args( - [ - None, - ([1, 512, 7, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dNonUnitOutputSizeStaticModule() -) -def AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7, 7)) - - -class AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((7, 7)) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule() -) -def AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7, 7)) - - -class AdaptiveAvgPool2dOutputSizeDivisibleByInputDynamicModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((5, 7)) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dOutputSizeDivisibleByInputDynamicModule() -) -def AdaptiveAvgPool2dOutputSizeDivisibleByInputDynamicModule_basic( - module, tu: TestUtils -): - module.forward(tu.rand(1, 512, 15, 28)) - - -class AdaptiveAvgPool2dOutputSizeDivisibleByInputStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((3, 7)) - - @export - @annotate_args( - [ - None, - ([1, 512, 15, 14], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dOutputSizeDivisibleByInputStaticModule() -) -def AdaptiveAvgPool2dOutputSizeDivisibleByInputStaticModule_basic( - module, tu: TestUtils -): - module.forward(tu.rand(1, 512, 15, 14)) - - -class AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((2, 2)) - - @export - @annotate_args( - [ - None, - ([1, 3, 7, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule() -) -def AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 3, 7, 7)) - - -class AdaptiveAvgPool2dUnitOutputSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((1, 1)) - - @export - @annotate_args( - [ - None, - ([1, 512, 7, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dUnitOutputSizeStaticModule() -) -def AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7, 7)) - - -class AdaptiveAvgPool2dUnitOutputSizeDynamicModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d((1, 1)) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool2dUnitOutputSizeDynamicModule() -) -def AdaptiveAvgPool2dUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7, 7)) - - -# ============================================================================== - - -class MaxPool1dWithIndicesModule(torch.nn.Module): - - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool1d_with_indices( - x, kernel_size=[6], stride=[2], padding=[3], dilation=2, ceil_mode=False - ) - - -@register_test_case(module_factory=lambda: MaxPool1dWithIndicesModule()) -def MaxPool1dWithIndicesModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112, low=-1)) - - -class MaxPool1dWithIndicesCeilModeModule(torch.nn.Module): - - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool1d_with_indices( - x, kernel_size=[4], stride=[2], padding=[2], dilation=2, ceil_mode=True - ) - - -@register_test_case(module_factory=lambda: MaxPool1dWithIndicesCeilModeModule()) -def MaxPool1dWithIndicesCeilModeModule_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 25, 37, low=-1)) - - -# ============================================================================== - - -class MaxPool1dModule(torch.nn.Module): - - def __init__(self): - super().__init__() - self.mp1d = torch.nn.MaxPool1d( - kernel_size=[6], stride=[2], padding=[3], dilation=2 - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp1d(x) - - -@register_test_case(module_factory=lambda: MaxPool1dModule()) -def MaxPool1dModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, low=-1)) - - -class MaxPool1dEmptyStrideStaticModule(torch.nn.Module): - - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([1, 1, 20], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool1d(x, kernel_size=2, stride=[]) - - -@register_test_case(module_factory=lambda: MaxPool1dEmptyStrideStaticModule()) -def MaxPool1dEmptyStrideStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, low=-1)) - - -class MaxPool1dStaticModule(torch.nn.Module): - - def __init__(self): - super().__init__() - self.mp1d = torch.nn.MaxPool1d( - kernel_size=[3], stride=[2], padding=[1], dilation=[1] - ) - - @export - @annotate_args( - [ - None, - ([1, 64, 112], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp1d(x) - - -@register_test_case(module_factory=lambda: MaxPool1dStaticModule()) -def MaxPool1dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112)) - - -class MaxPool1dStaticCeilModeTrueModule(torch.nn.Module): - - def __init__(self): - super().__init__() - self.mp1d = torch.nn.MaxPool1d( - kernel_size=[3], stride=[2], padding=[1], dilation=[1], ceil_mode=True - ) - - @export - @annotate_args( - [ - None, - ([1, 64, 112], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp1d(x) - - -@register_test_case(module_factory=lambda: MaxPool1dStaticCeilModeTrueModule()) -def MaxPool1dStaticCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112)) - - -class MaxPool1dCeilModeTrueModule(torch.nn.Module): - - def __init__(self): - super().__init__() - self.mp1d = torch.nn.MaxPool1d( - kernel_size=[6], stride=[2], padding=[3], dilation=2, ceil_mode=True - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp1d(x) - - -@register_test_case(module_factory=lambda: MaxPool1dCeilModeTrueModule()) -def MaxPool1dCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, low=0.5, high=1.0)) - - -# ============================================================================== - - -class MaxPool2dModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[6, 8], stride=[2, 2], padding=[3, 4], dilation=2 - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case(module_factory=lambda: MaxPool2dModule()) -def MaxPool2dModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, low=-1)) - - -class MaxPool2dEmptyStrideStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([1, 1, 20, 20], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d(x, kernel_size=2, stride=[]) - - -@register_test_case(module_factory=lambda: MaxPool2dEmptyStrideStaticModule()) -def MaxPool2dEmptyStrideStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, low=-1)) - - -class MaxPool2dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[3, 3], stride=[2, 2], padding=[1, 1], dilation=[1, 1] - ) - - @export - @annotate_args( - [ - None, - ([1, 64, 112, 112], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case(module_factory=lambda: MaxPool2dStaticModule()) -def MaxPool2dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112, 112)) - - -class MaxPool2dStaticCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[3, 3], - stride=[2, 2], - padding=[1, 1], - dilation=[1, 1], - ceil_mode=True, - ) - - @export - @annotate_args( - [ - None, - ([1, 64, 112, 112], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case(module_factory=lambda: MaxPool2dStaticCeilModeTrueModule()) -def MaxPool2dStaticCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112, 112)) - - -class MaxPool2dCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[6, 8], - stride=[2, 2], - padding=[3, 4], - dilation=2, - ceil_mode=True, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case(module_factory=lambda: MaxPool2dCeilModeTrueModule()) -def MaxPool2dCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, low=0.5, high=1.0)) - - -class MaxPool2dStaticCeilModeTrueReduceOutputModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=6, - stride=6, - padding=3, - dilation=1, - ceil_mode=True, - ) - - @export - @annotate_args( - [ - None, - ([2, 6, 20, 10], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case( - module_factory=lambda: MaxPool2dStaticCeilModeTrueReduceOutputModule() -) -def MaxPool2dStaticCeilModeTrueReduceOutputModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 6, 20, 10, low=0.5, high=1.0)) - - -class MaxPool2dWithoutPadFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[3, 3], stride=[2, 2], padding=[0, 0] - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithoutPadFullDimIndivisibleByStrideModule() -) -def MaxPool2dWithoutPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 56, 56, low=-1)) - - -class MaxPool2dWithPadFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[3, 3], stride=[2, 2], padding=[1, 1] - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithPadFullDimIndivisibleByStrideModule() -) -def MaxPool2dWithPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 112, 112, low=-1)) - - -class MaxPool2dFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[3, 3], stride=[3, 3], padding=[1, 1] - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case(module_factory=lambda: MaxPool2dFullDimIndivisibleByStrideModule()) -def MaxPool2dFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 75, 75, low=-1)) - - -class MaxPool2dCeilModeFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp2d = torch.nn.MaxPool2d( - kernel_size=[3, 3], - stride=[3, 3], - padding=[1, 1], - ceil_mode=True, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp2d(x) - - -@register_test_case( - module_factory=lambda: MaxPool2dCeilModeFullDimIndivisibleByStrideModule() -) -def MaxPool2dCeilModeFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 75, 75, low=-1)) - - -# ============================================================================== - - -class MaxPool3dModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp3d = torch.nn.MaxPool3d( - kernel_size=[4, 4, 4], stride=[2, 2, 2], padding=[1, 1, 1], dilation=1 - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp3d(x) - - -@register_test_case(module_factory=lambda: MaxPool3dModule()) -def MaxPool3dModule_basic(module, tu: TestUtils): - module.forward(torch.arange(8 * 8 * 8).view(1, 1, 8, 8, 8).float()) - - -class MaxPool3dRandomSimpleModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp3d = torch.nn.MaxPool3d( - kernel_size=[4, 4, 4], stride=[2, 2, 2], padding=[1, 1, 1], dilation=1 - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp3d(x) - - -@register_test_case(module_factory=lambda: MaxPool3dRandomSimpleModule()) -def MaxPool3dModuleRandomSimple_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, 20, low=-1)) - - -class MaxPool3dLargeDataModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp3d = torch.nn.MaxPool3d( - kernel_size=[6, 8, 8], stride=[2, 2, 2], padding=[3, 4, 4], dilation=2 - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp3d(x) - - -@register_test_case(module_factory=lambda: MaxPool3dLargeDataModule()) -def MaxPool3dLargeDatadModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, 20, low=-1)) - - -class MaxPool3dEmptyStrideStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([1, 1, 20, 20, 20], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d(x, kernel_size=2, stride=[]) - - -@register_test_case(module_factory=lambda: MaxPool3dEmptyStrideStaticModule()) -def MaxPool3dEmptyStrideStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, 20, low=-1)) - - -class MaxPool3dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp3d = torch.nn.MaxPool3d( - kernel_size=[3, 3, 3], - stride=[2, 2, 2], - padding=[1, 1, 1], - dilation=[1, 1, 1], - ) - - @export - @annotate_args( - [ - None, - ([1, 64, 112, 112, 112], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp3d(x) - - -@register_test_case(module_factory=lambda: MaxPool3dStaticModule()) -def MaxPool3dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112, 112, 112)) - - -class MaxPool3dStaticCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp3d = torch.nn.MaxPool3d( - kernel_size=[3, 3, 3], - stride=[2, 2, 2], - padding=[1, 1, 1], - dilation=[1, 1, 1], - ceil_mode=True, - ) - - @export - @annotate_args( - [ - None, - ([1, 64, 112, 112, 112], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp3d(x) - - -@register_test_case(module_factory=lambda: MaxPool3dStaticCeilModeTrueModule()) -def MaxPool3dStaticCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 64, 112, 112, 112)) - - -class MaxPool3dCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.mp3d = torch.nn.MaxPool3d( - kernel_size=[6, 8, 8], - stride=[2, 2, 2], - padding=[3, 4, 4], - dilation=2, - ceil_mode=True, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.mp3d(x) - - -@register_test_case(module_factory=lambda: MaxPool3dCeilModeTrueModule()) -def MaxPool3dCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 20, 20, 20, low=0.5, high=1.0)) - - -# ============================================================================== - - -class MaxPool2dWithIndicesModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[2, 2], stride=[1, 1], padding=[0, 0], dilation=[1, 1] - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesModule()) -def MaxPool2dWithIndicesModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 8, 8, low=0.5, high=1.0)) - - -class MaxPool2dWithIndicesFullSizeKernelModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[4, 4], stride=1, padding=0, dilation=1 - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesFullSizeKernelModule()) -def MaxPool2dWithIndicesFullSizeKernelModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 3, 4, 4, low=0.5, high=1.0)) - - -class MaxPool2dWithIndicesNonDefaultPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[4, 8], stride=[1, 1], padding=[2, 4], dilation=1 - ) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithIndicesNonDefaultPaddingModule() -) -def MaxPool2dWithIndicesNonDefaultPaddingModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 16, 16, low=-1.5, high=1.0)) - - -class MaxPool2dWithIndicesNonDefaultStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[4, 4], stride=[1, 2], padding=0, dilation=1 - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesNonDefaultStrideModule()) -def MaxPool2dWithIndicesNonDefaultStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 4, 16, 80, low=0.5, high=2.0)) - - -class MaxPool2dWithIndicesNonDefaultDilationModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[4, 4], stride=[1, 1], padding=0, dilation=[2, 2] - ) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithIndicesNonDefaultDilationModule() -) -def MaxPool2dWithIndicesNonDefaultDilationModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 4, 16, 80, low=0.5, high=2.0)) - - -class MaxPool2dWithIndicesNonDefaultParamsModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[8, 4], stride=[2, 2], padding=[1, 2], dilation=[2, 2] - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesNonDefaultParamsModule()) -def MaxPool2dWithIndicesNonDefaultParamsModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 4, 16, 80, low=-0.5, high=4.0)) - - -class MaxPool2dWithIndicesAllNegativeValuesModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[4, 8], stride=[1, 1], padding=[2, 4], dilation=1 - ) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithIndicesAllNegativeValuesModule() -) -def MaxPool2dWithIndicesAllNegativeValuesModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 16, 16, low=-4.5, high=-1.0)) - - -class MaxPool2dWithIndicesStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([2, 4, 16, 16], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[4, 8], stride=[1, 1], padding=[2, 4], dilation=1 - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesStaticModule()) -def MaxPool2dWithIndicesStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 16, 16, low=-4.5, high=-1.0)) - - -class MaxPool2dWithIndicesAllOnesModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, kernel_size=[2, 2], stride=[1, 1], padding=[0, 0], dilation=[1, 1] - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesAllOnesModule()) -def MaxPool2dWithIndicesAllOnesModule_basic(module, tu: TestUtils): - module.forward(torch.ones(1, 1, 8, 8)) - - -class MaxPool2dWithIndicesCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool2d_with_indices( - x, - kernel_size=[2, 2], - stride=[1, 1], - padding=[0, 0], - dilation=[1, 1], - ceil_mode=True, - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesCeilModeTrueModule()) -def MaxPool2dWithIndicesCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 8, 8, low=0.5, high=1.0)) - - -# ============================================================================== - - -class MaxPool2dWithIndicesBackwardStatic4DModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([2, 4, 7, 6], torch.float32, True), - ([2, 4, 6, 5], torch.float32, True), - ([2, 4, 7, 6], torch.int64, True), - ] - ) - def forward(self, output, input, indices): - kernel_size = [2, 2] - stride = [1, 1] - padding = [1, 1] - dilation = [1, 1] - ceil_mode = False - return torch.ops.aten.max_pool2d_with_indices_backward( - output, input, kernel_size, stride, padding, dilation, ceil_mode, indices - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesBackwardStatic4DModule()) -def MaxPool2dWithIndicesBackwardStatic4DModule_basic(module, tu: TestUtils): - module.forward( - tu.rand(2, 4, 7, 6), tu.rand(2, 4, 6, 5), tu.randint(2, 4, 7, 6, high=16) - ) - - -class MaxPool2dWithIndicesBackwardStatic3DModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([4, 7, 6], torch.float32, True), - ([4, 6, 5], torch.float32, True), - ([4, 7, 6], torch.int64, True), - ] - ) - def forward(self, output, input, indices): - kernel_size = [2, 2] - stride = [1, 1] - padding = [1, 1] - dilation = [1, 1] - ceil_mode = False - return torch.ops.aten.max_pool2d_with_indices_backward( - output, input, kernel_size, stride, padding, dilation, ceil_mode, indices - ) - - -@register_test_case(module_factory=lambda: MaxPool2dWithIndicesBackwardStatic3DModule()) -def MaxPool2dWithIndicesBackwardStatic3DModule_basic(module, tu: TestUtils): - module.forward(tu.rand(4, 7, 6), tu.rand(4, 6, 5), tu.randint(4, 7, 6, high=16)) - - -class MaxPool2dWithIndicesBackwardDynamic4DModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.float32, True), - ([-1, -1, -1, -1], torch.int64, True), - ] - ) - def forward(self, output, input, indices): - kernel_size = [2, 2] - stride = [1, 1] - padding = [1, 1] - dilation = [1, 1] - ceil_mode = False - return torch.ops.aten.max_pool2d_with_indices_backward( - output, input, kernel_size, stride, padding, dilation, ceil_mode, indices - ) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithIndicesBackwardDynamic4DModule() -) -def MaxPool2dWithIndicesBackwardDynamic4DModule_basic(module, tu: TestUtils): - module.forward( - tu.rand(2, 4, 7, 6), tu.rand(2, 4, 6, 5), tu.randint(2, 4, 7, 6, high=16) - ) - - -class MaxPool2dWithIndicesBackwardDynamic3DModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.float32, True), - ([-1, -1, -1], torch.int64, True), - ] - ) - def forward(self, output, input, indices): - kernel_size = [2, 2] - stride = [1, 1] - padding = [1, 1] - dilation = [1, 1] - ceil_mode = False - return torch.ops.aten.max_pool2d_with_indices_backward( - output, input, kernel_size, stride, padding, dilation, ceil_mode, indices - ) - - -@register_test_case( - module_factory=lambda: MaxPool2dWithIndicesBackwardDynamic3DModule() -) -def MaxPool2dWithIndicesBackwardDynamic3DModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 7, 6), tu.rand(2, 6, 5), tu.randint(2, 7, 6, high=16)) - - -# ============================================================================== - - -class MaxPool3dWithIndicesModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, - kernel_size=[2, 2, 2], - stride=[1, 1, 1], - padding=[0, 0, 0], - dilation=[1, 1, 1], - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesModule()) -def MaxPool3dWithIndicesModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 8, 8, 8, low=0.5, high=1.0)) - - -class MaxPool3dWithIndicesFullSizeKernelModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, kernel_size=[4, 4, 4], stride=1, padding=0, dilation=1 - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesFullSizeKernelModule()) -def MaxPool3dWithIndicesFullSizeKernelModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 3, 4, 4, 4, low=0.5, high=1.0)) - - -class MaxPool3dWithIndicesNonDefaultPaddingModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, kernel_size=[4, 8, 4], stride=[1, 1, 1], padding=[2, 4, 2], dilation=1 - ) - - -@register_test_case( - module_factory=lambda: MaxPool3dWithIndicesNonDefaultPaddingModule() -) -def MaxPool3dWithIndicesNonDefaultPaddingModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 16, 16, 16, low=-1.5, high=1.0)) - - -class MaxPool3dWithIndicesNonDefaultStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, kernel_size=[4, 4, 4], stride=[1, 2, 1], padding=0, dilation=1 - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesNonDefaultStrideModule()) -def MaxPool3dWithIndicesNonDefaultStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 4, 16, 80, 16, low=0.5, high=2.0)) - - -class MaxPool3dWithIndicesNonDefaultDilationModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, kernel_size=[4, 4, 4], stride=[1, 1, 1], padding=0, dilation=[2, 2, 2] - ) - - -@register_test_case( - module_factory=lambda: MaxPool3dWithIndicesNonDefaultDilationModule() -) -def MaxPool3dWithIndicesNonDefaultDilationModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 4, 16, 80, 16, low=0.5, high=2.0)) - - -class MaxPool3dWithIndicesNonDefaultParamsModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, - kernel_size=[8, 4, 8], - stride=[2, 2, 2], - padding=[1, 2, 1], - dilation=[2, 2, 2], - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesNonDefaultParamsModule()) -def MaxPool3dWithIndicesNonDefaultParamsModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 4, 16, 80, 16, low=-0.5, high=4.0)) - - -class MaxPool3dWithIndicesAllNegativeValuesModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, kernel_size=[4, 8, 4], stride=[1, 1, 1], padding=[2, 4, 2], dilation=1 - ) - - -@register_test_case( - module_factory=lambda: MaxPool3dWithIndicesAllNegativeValuesModule() -) -def MaxPool3dWithIndicesAllNegativeValuesModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 16, 16, 16, low=-4.5, high=-1.0)) - - -class MaxPool3dWithIndicesStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([2, 4, 16, 16, 16], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, kernel_size=[4, 8, 4], stride=[1, 1, 1], padding=[2, 4, 2], dilation=1 - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesStaticModule()) -def MaxPool3dWithIndicesStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 16, 16, 16, low=-4.5, high=-1.0)) - - -class MaxPool3dWithIndicesAllOnesModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, - kernel_size=[2, 2, 2], - stride=[1, 1, 1], - padding=[0, 0, 0], - dilation=[1, 1, 1], - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesAllOnesModule()) -def MaxPool3dWithIndicesAllOnesModule_basic(module, tu: TestUtils): - module.forward(torch.ones(1, 1, 8, 8, 8)) - - -class MaxPool3dWithIndicesCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return torch.ops.aten.max_pool3d_with_indices( - x, - kernel_size=[2, 2, 2], - stride=[1, 1, 1], - padding=[0, 0, 0], - dilation=[1, 1, 1], - ceil_mode=True, - ) - - -@register_test_case(module_factory=lambda: MaxPool3dWithIndicesCeilModeTrueModule()) -def MaxPool3dWithIndicesCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 8, 8, 8, low=0.5, high=1.0)) - - -# ============================================================================== - - -class AvgPool2dFloatModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[6, 8], - stride=[2, 2], - padding=[3, 4], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dFloatModule()) -def AvgPool2dFloatModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 20, 20, low=-1)) - - -class AvgPool2dIntModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[6, 8], - stride=[2, 2], - padding=[3, 4], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.int64, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dIntModule()) -def AvgPool2dIntModule_basic(module, tu: TestUtils): - module.forward(tu.randint(2, 4, 20, 20, high=100)) - - -class AvgPool2dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[6, 8], - stride=[2, 2], - padding=[3, 4], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([2, 2, 10, 20], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dStaticModule()) -def AvgPool2dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 2, 10, 20, low=-1)) - - -class AvgPool2dCountIncludePadFalseStaticModule(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[1, 1], - ceil_mode=False, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([32, 384, 25, 25], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCountIncludePadFalseStaticModule()) -def AvgPool2dCountIncludePadFalseStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(32, 384, 25, 25, low=-1)) - - -class AvgPool2dDivisorOverrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[4, 8], - stride=[2, 3], - padding=[2, 4], - ceil_mode=False, - count_include_pad=True, - divisor_override=22, - ) - - @export - @annotate_args( - [ - None, - ([4, 4, 20, 20], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dDivisorOverrideModule()) -def AvgPool2dDivisorOverrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(4, 4, 20, 20, low=-1)) - - -class AvgPool2dCeilModeTrueModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[6, 8], - stride=[2, 2], - padding=[3, 4], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCeilModeTrueModule()) -def AvgPool2dCeilModeTrueModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) - - -class AvgPool2dWithoutPadModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[6, 8], - stride=[2, 2], - padding=[0, 0], - ceil_mode=False, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dWithoutPadModule()) -def AvgPool2dWithoutPadModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) - - -class AvgPool2dCHWModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[6, 8], - stride=[2, 2], - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCHWModule()) -def AvgPool2dCHWModule_basic(module, tu: TestUtils): - module.forward(tu.rand(4, 20, 20, low=0.5, high=1.0)) - - -class AvgPool2dSingleIntTupleParamsModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=(6,), - stride=(2,), - padding=(1,), - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dSingleIntTupleParamsModule()) -def AvgPool2dSingleIntTupleParamsModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) - - -class AvgPool2dSingleIntTupleParamsIncludePadModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=(6,), - stride=(2,), - padding=(1,), - count_include_pad=True, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dSingleIntTupleParamsIncludePadModule() -) -def AvgPool2dSingleIntTupleParamsIncludePadModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 20, 20, low=0.5, high=1.0)) - - -class AvgPool2dWithoutPadFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[2, 2], - padding=[0, 0], - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dWithoutPadFullDimIndivisibleByStrideModule() -) -def AvgPool2dWithoutPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 56, 56, low=-1)) - - -class AvgPool2dWithPadFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[2, 2], - padding=[1, 1], - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dWithPadFullDimIndivisibleByStrideModule() -) -def AvgPool2dWithPadFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 112, 112, low=-1)) - - -class AvgPool2dFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[3, 3], - padding=[1, 1], - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dFullDimIndivisibleByStrideModule()) -def AvgPool2dFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 75, 75, low=-1)) - - -class AvgPool2dCeilModeFullDimIndivisibleByStrideModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[3, 3], - padding=[1, 1], - ceil_mode=True, - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dCeilModeFullDimIndivisibleByStrideModule() -) -def AvgPool2dCeilModeFullDimIndivisibleByStrideModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 75, 75, low=-1)) - - -# ============================================================================== - - -class AvgPool3dStaticModule(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool3d( - kernel_size=[2, 2, 2], - stride=[2, 2, 2], - padding=[0, 0, 0], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([2, 2, 4, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool3dStaticModule()) -def AvgPool3dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 2, 4, 4, 4, low=-1)) - - -class AvgPool3dCountIncludePadFalse(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap3d = torch.nn.AvgPool3d( - kernel_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[1, 1, 1], - ceil_mode=False, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([3, 3, 12, 12, 12], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap3d(x) - - -@register_test_case(module_factory=lambda: AvgPool3dCountIncludePadFalse()) -def AvgPool3dCountIncludePadFalse_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 12, 12, 12, low=-1)) - - -class AvgPool3dCountIncludePadFalseWithoutPadding(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap3d = torch.nn.AvgPool3d( - kernel_size=[3, 3, 3], - stride=[1, 1, 1], - padding=[0, 0, 0], - ceil_mode=False, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([3, 3, 12, 12, 12], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap3d(x) - - -@register_test_case( - module_factory=lambda: AvgPool3dCountIncludePadFalseWithoutPadding() -) -def AvgPool3dCountIncludePadFalseWithoutPadding_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 3, 12, 12, 12, low=-1)) - - -# ============================================================================== - - -class AvgPool1dFloatModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap1d = torch.nn.AvgPool1d( - kernel_size=6, stride=2, padding=3, ceil_mode=False, count_include_pad=True - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap1d(x) - - -@register_test_case(module_factory=lambda: AvgPool1dFloatModule()) -def AvgPool1dFloatModule_basic(module, tu: TestUtils): - module.forward(tu.rand(2, 4, 20, low=-1)) - - -class AvgPool1dIntModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap1d = torch.nn.AvgPool1d( - kernel_size=6, stride=2, padding=3, ceil_mode=False, count_include_pad=True - ) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.int64, True), - ] - ) - def forward(self, x): - return self.ap1d(x) - - -@register_test_case(module_factory=lambda: AvgPool1dIntModule()) -def AvgPool1dIntModule_basic(module, tu: TestUtils): - module.forward(tu.randint(2, 4, 20, high=100)) - - -class AvgPool1dStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap1d = torch.nn.AvgPool1d( - kernel_size=6, stride=2, padding=3, ceil_mode=False, count_include_pad=True - ) - - @export - @annotate_args( - [ - None, - ([2, 4, 20], torch.int64, True), - ] - ) - def forward(self, x): - return self.ap1d(x) - - -@register_test_case(module_factory=lambda: AvgPool1dStaticModule()) -def AvgPool1dStaticModule_basic(module, tu: TestUtils): - module.forward(tu.randint(2, 4, 20, high=100)) - - -class AvgPool1dCountIncludePadFalseWithoutPadding(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap1d = torch.nn.AvgPool1d( - kernel_size=3, stride=1, padding=0, ceil_mode=False, count_include_pad=False - ) - - @export - @annotate_args( - [ - None, - ([3, 4, 20], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap1d(x) - - -@register_test_case( - module_factory=lambda: AvgPool1dCountIncludePadFalseWithoutPadding() -) -def AvgPool1dCountIncludePadFalseWithoutPadding_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 4, 20)) - - -class AvgPool1dCountIncludePadFalse(torch.nn.Module): - def __init__(self): - super().__init__() - self.ap1d = torch.nn.AvgPool1d( - kernel_size=3, stride=1, padding=1, ceil_mode=False, count_include_pad=False - ) - - @export - @annotate_args( - [ - None, - ([3, 4, 20], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap1d(x) - - -@register_test_case(module_factory=lambda: AvgPool1dCountIncludePadFalse()) -def AvgPool1dCountIncludePadFalse_basic(module, tu: TestUtils): - module.forward(tu.rand(3, 4, 20)) - - -# ============================================================================== - - -class AdaptiveAvgPool1dStaticLargerOutput(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=13) - - @export - @annotate_args([None, ([5, 512, 7], torch.float32, True)]) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool1dStaticLargerOutput()) -def AdaptiveAvgPool1dStaticLargerOutput_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 512, 7)) - - -class AdaptiveAvgPool1dStaticEvenMultiple(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) - - @export - @annotate_args([None, ([5, 512, 147], torch.float32, True)]) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool1dStaticEvenMultiple()) -def AdaptiveAvgPool1dStaticEvenMultiple_basic(module, tu: TestUtils): - module.forward(tu.rand(5, 512, 147)) - - -class AdaptiveAvgPool1dGeneralDynamic(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) - - @export - @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool1dGeneralDynamic()) -def AdaptiveAvgPool1dGeneralDynamic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10)) - - -class AdaptiveAvgPool1dGeneralDynamicNoBatches(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) - - @export - @annotate_args([None, ([-1, -1], torch.float32, True)]) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool1dGeneralDynamicNoBatches()) -def AdaptiveAvgPool1dGeneralDynamicNoBatches_basic(module, tu: TestUtils): - module.forward(tu.rand(512, 10)) - - -class AdaptiveAvgPool1dNonUnitOutputSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) - - @export - @annotate_args( - [ - None, - ([1, 512, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool1dNonUnitOutputSizeStaticModule() -) -def AdaptiveAvgPool1dNonUnitOutputSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7)) - - -class AdaptiveAvgPool1dNonUnitOutputSizeDynamicModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool1dNonUnitOutputSizeDynamicModule() -) -def AdaptiveAvgPool1dNonUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7)) - - -class AdaptiveAvgPool1dUnitOutputSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=1) - - @export - @annotate_args( - [ - None, - ([1, 512, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool1dUnitOutputSizeStaticModule() -) -def AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7)) - - -class AdaptiveAvgPool1dUnitOutputSizeDynamicModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=1) - - @export - @annotate_args( - [ - None, - ([-1, -1, -1], torch.float32, True), - ] - ) - def forward(self, x): - return self.aap1d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveAvgPool1dUnitOutputSizeDynamicModule() -) -def AdaptiveAvgPool1dUnitOutputSizeDynamicModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7)) - - -# AdaptiveAvgPool2d - - -class AdaptiveAvgPool2dDynamic(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d(output_size=(7, 13)) - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool2dDynamic()) -def AdaptiveAvgPool2dDynamic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16)) - - -class AdaptiveAvgPool2dDynamicNoBatch(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap2d = torch.nn.AdaptiveAvgPool2d(output_size=(7, 13)) - - @export - @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.aap2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool2dDynamicNoBatch()) -def AdaptiveAvgPool2dDynamicNoBatch_basic(module, tu: TestUtils): - module.forward(tu.rand(512, 10, 16)) - - -# AdaptiveAvgPool3d - - -class AdaptiveAvgPool3dDynamic(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap3d = torch.nn.AdaptiveAvgPool3d(output_size=(7, 13, 15)) - - @export - @annotate_args([None, ([-1, -1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.aap3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool3dDynamic()) -def AdaptiveAvgPool3dDynamic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16, 17)) - - -class AdaptiveAvgPool3dDynamicNoBatch(torch.nn.Module): - def __init__(self): - super().__init__() - self.aap3d = torch.nn.AdaptiveAvgPool3d(output_size=(7, 13, 15)) - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.aap3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveAvgPool3dDynamicNoBatch()) -def AdaptiveAvgPool3dDynamicNoBatch_basic(module, tu: TestUtils): - module.forward(tu.rand(512, 10, 16, 17)) - - -# AdaptiveMaxPool1d - - -class AdaptiveMaxPool1dDynamic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(7), return_indices=False) - - @export - @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool1dDynamic()) -def AdaptiveMaxPool1dDynamic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10)) - - -class AdaptiveMaxPool1dDynamicNoBatch(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(7), return_indices=False) - - @export - @annotate_args([None, ([-1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool1dDynamicNoBatch()) -def AdaptiveMaxPool1dDynamicNoBatch_basic(module, tu: TestUtils): - module.forward(tu.rand(512, 10)) - - -class AdaptiveMaxPool1dStatic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(7), return_indices=False) - - @export - @annotate_args([None, ([1, 512, 10], torch.float32, True)]) - def forward(self, x): - return self.amp1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool1dStatic()) -def AdaptiveMaxPool1dStatic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10)) - - -class AdaptiveMaxPool1dDimOneStatic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp1d = torch.nn.AdaptiveMaxPool1d(output_size=(1), return_indices=False) - - @export - @annotate_args([None, ([1, 512, 7], torch.float32, True)]) - def forward(self, x): - return self.amp1d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool1dDimOneStatic()) -def AdaptiveMaxPool1dDimOneStatic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7)) - - -# AdaptiveMaxPool2d - - -class AdaptiveMaxPool2dDynamic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d( - output_size=(7, 13), return_indices=False - ) - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool2dDynamic()) -def AdaptiveMaxPool2dDynamic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16)) - - -class AdaptiveMaxPool2dDynamicNoBatch(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d( - output_size=(7, 13), return_indices=False - ) - - @export - @annotate_args([None, ([-1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool2dDynamicNoBatch()) -def AdaptiveMaxPool2dDynamicNoBatch_basic(module, tu: TestUtils): - module.forward(tu.rand(512, 10, 16)) - - -class AdaptiveMaxPool2dDynamicWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d( - output_size=(7, 13), return_indices=True - ) - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool2dDynamicWithIndices()) -def AdaptiveMaxPool2dDynamicWithIndices_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16)) - - -class AdaptiveMaxPool2dStatic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d( - output_size=(7, 13), return_indices=False - ) - - @export - @annotate_args([None, ([1, 512, 10, 9], torch.float32, True)]) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool2dStatic()) -def AdaptiveMaxPool2dStatic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 9)) - - -class AdaptiveMaxPool2dStaticWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d( - output_size=(7, 13), return_indices=True - ) - - @export - @annotate_args([None, ([1, 512, 10, 16], torch.float32, True)]) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool2dStaticWithIndices()) -def AdaptiveMaxPool2dStaticWithIndices_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16)) - - -class AdaptiveMaxPool2dFixedKernelStrideSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d((2, 2)) - - @export - @annotate_args( - [ - None, - ([1, 3, 7, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveMaxPool2dFixedKernelStrideSizeStaticModule() -) -def AdaptiveMaxPool2dFixedKernelStrideSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 3, 7, 7)) - - -class AdaptiveMaxPool2dUnitOutputSizeStaticModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp2d = torch.nn.AdaptiveMaxPool2d((1, 1)) - - @export - @annotate_args( - [ - None, - ([1, 512, 7, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.amp2d(x) - - -@register_test_case( - module_factory=lambda: AdaptiveMaxPool2dUnitOutputSizeStaticModule() -) -def AdaptiveMaxPool2dUnitOutputSizeStaticModule_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 7, 7)) - - -# AdaptiveMaxPool3d - - -class AdaptiveMaxPool3dDynamic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp3d = torch.nn.AdaptiveMaxPool3d( - output_size=(7, 13, 15), return_indices=False - ) - - @export - @annotate_args([None, ([-1, -1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool3dDynamic()) -def AdaptiveMaxPool3dDynamic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16, 17)) - - -class AdaptiveMaxPool3dDynamicNoBatch(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp3d = torch.nn.AdaptiveMaxPool3d( - output_size=(7, 13, 15), return_indices=False - ) - - @export - @annotate_args([None, ([-1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool3dDynamicNoBatch()) -def AdaptiveMaxPool3dDynamicNoBatch_basic(module, tu: TestUtils): - module.forward(tu.rand(512, 10, 16, 17)) - - -class AdaptiveMaxPool3dDynamicWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp3d = torch.nn.AdaptiveMaxPool3d( - output_size=(7, 13, 15), return_indices=True - ) - - @export - @annotate_args([None, ([-1, -1, -1, -1, -1], torch.float32, True)]) - def forward(self, x): - return self.amp3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool3dDynamicWithIndices()) -def AdaptiveMaxPool3dDynamicWithIndices_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16, 17)) - - -class AdaptiveMaxPool3dStatic(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp3d = torch.nn.AdaptiveMaxPool3d( - output_size=(7, 13, 15), return_indices=False - ) - - @export - @annotate_args([None, ([1, 512, 10, 9, 5], torch.float32, True)]) - def forward(self, x): - return self.amp3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool3dStatic()) -def AdaptiveMaxPool3dStatic_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 9, 5)) - - -class AdaptiveMaxPool3dStaticWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.amp3d = torch.nn.AdaptiveMaxPool3d( - output_size=(7, 13, 15), return_indices=True - ) - - @export - @annotate_args([None, ([1, 512, 10, 16, 17], torch.float32, True)]) - def forward(self, x): - return self.amp3d(x) - - -@register_test_case(module_factory=lambda: AdaptiveMaxPool3dStaticWithIndices()) -def AdaptiveMaxPool3dStaticWithIndices_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 512, 10, 16, 17)) - - -# ============================================================================== - - -class MaxUnpool2dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([2, 2, 2, 4], torch.float32, True), - ([2, 2, 2, 4], torch.int64, True), - ] - ) - def forward(self, x, indices): - return torch.ops.aten.max_unpool2d(x, indices, (4, 8)) - - -@register_test_case(module_factory=lambda: MaxUnpool2dModule()) -def MaxUnpool2dModule_basic(module, tu: TestUtils): - input = tu.rand(2, 2, 4, 8) - pool = torch.nn.MaxPool2d(kernel_size=(2, 2), return_indices=True) - output, indices = pool(input) - - module.forward(output, indices) - - -# ============================================================================== - - -class MaxUnpool2dModule_3dInput(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([2, 2, 4], torch.float32, True), - ([2, 2, 4], torch.int64, True), - ] - ) - def forward(self, x, indices): - return torch.ops.aten.max_unpool2d(x, indices, (4, 8)) - - -@register_test_case(module_factory=lambda: MaxUnpool2dModule_3dInput()) -def MaxUnpool2dModule_3dInput_basic(module, tu: TestUtils): - input = tu.rand(2, 4, 8) - pool = torch.nn.MaxPool2d(kernel_size=(2, 2), return_indices=True) - output, indices = pool(input) - - module.forward(output, indices) - - -# ============================================================================== - - -class MaxUnpool3dModule(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, 2, 2, 4], torch.float32, True), - ([-1, -1, 2, 2, 4], torch.int64, True), - ] - ) - def forward(self, x, indices): - return torch.ops.aten.max_unpool3d(x, indices, (4, 5, 6), (2, 3, 2), (0, 0, 1)) - - -@register_test_case(module_factory=lambda: MaxUnpool3dModule()) -def MaxUnpool3dModule_basic(module, tu: TestUtils): - input = tu.rand(2, 2, 4, 5, 6) - pool = torch.nn.MaxPool3d( - kernel_size=(2, 2, 2), stride=(2, 3, 2), padding=(0, 0, 1), return_indices=True - ) - output, indices = pool(input) - - module.forward(output, indices) - - -# We have a special case for all-zeros padding, test it too. -class MaxUnpool3dModulePad0(torch.nn.Module): - def __init__(self): - super().__init__() - - @export - @annotate_args( - [ - None, - ([-1, -1, 2, 2, 3], torch.float32, True), - ([-1, -1, 2, 2, 3], torch.int64, True), - ] - ) - def forward(self, x, indices): - return torch.ops.aten.max_unpool3d(x, indices, (4, 5, 6), (2, 3, 2), (0, 0, 0)) - - -@register_test_case(module_factory=lambda: MaxUnpool3dModulePad0()) -def MaxUnpool3dModulePad0_basic(module, tu: TestUtils): - input = tu.rand(2, 2, 4, 5, 6) - pool = torch.nn.MaxPool3d( - kernel_size=(2, 2, 2), stride=(2, 3, 2), padding=(0, 0, 0), return_indices=True - ) - output, indices = pool(input) - - module.forward(output, indices) - - -class AvgPool2dCeilNoPadUnitaryStrides(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[0, 0], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCeilNoPadUnitaryStrides()) -def AvgPool2dCeilNoPadUnitaryStrides_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dCeilPadNonUnitaryStrides(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[2, 2], - padding=[1, 1], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCeilPadNonUnitaryStrides()) -def AvgPool2dCeilPadNonUnitaryStrides_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dCeilNoPadStridedIncludePadding(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[2, 2], - padding=[0, 0], - ceil_mode=True, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCeilNoPadStridedIncludePadding()) -def AvgPool2dCeilNoPadStridedIncludePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dCeilNoPadUnitaryStrideIncludePadding(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[0, 0], - ceil_mode=True, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dCeilNoPadUnitaryStrideIncludePadding() -) -def AvgPool2dCeilNoPadUnitaryStrideIncludePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dCeilPaddingUnitaryStrideIncludePaddingFalse(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[1, 1], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dCeilPaddingUnitaryStrideIncludePaddingFalse() -) -def AvgPool2dCeilPaddingUnitaryStrideIncludePaddingFalse_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dFloorNoPadUnitaryStrideIncludePadding(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[0, 0], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dFloorNoPadUnitaryStrideIncludePadding() -) -def AvgPool2dFloorNoPadUnitaryStrideIncludePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dFloorPaddingUnitaryStrideIncludePadding(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[1, 1], - ceil_mode=False, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dFloorPaddingUnitaryStrideIncludePadding() -) -def AvgPool2dFloorPaddingUnitaryStrideIncludePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dCeilPaddingUnitaryStrideIncludePadding(torch.nn.Module): - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[1, 1], - padding=[1, 1], - ceil_mode=True, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dCeilPaddingUnitaryStrideIncludePadding() -) -def AvgPool2dCeilPaddingUnitaryStrideIncludePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dCeilPaddingStridedIncludePadding(torch.nn.Module): - # Note that in this case the kernel window center will go into the padding. - # When this happens the padding elements are counted in the divisor, but - # the out of bound elements from the ceiling are not counted - # (i.e., clamped from the divisor count). - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 3], - stride=[2, 2], - padding=[1, 1], - ceil_mode=True, - count_include_pad=True, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool2dCeilPaddingStridedIncludePadding()) -def AvgPool2dCeilPaddingStridedIncludePadding_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 4, low=-1)) - - -class AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded(torch.nn.Module): - # This test captures the torch-mlir issue reported here: - # https://github.com/llvm/torch-mlir/issues/4079 - # The issue was caused by having the ceil_mode = true and - # count_include_pad = false. Also the kernel and stride sizes are - # different in this test to make sure that they are processed in - # the right order. - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 2], - stride=[2, 3], - padding=[0, 0], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 3, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded() -) -def AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 3, 4, low=-1)) - - -class AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded(torch.nn.Module): - # Different sizes used for each kernel, stride, and padding.dimensions. - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool2d( - kernel_size=[3, 4], - stride=[2, 3], - padding=[1, 2], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 3, 4], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded() -) -def AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 3, 4, low=-1)) - - -class AvgPool3dDiffKernelsStridesNoPadCeilPadNotIncluded(torch.nn.Module): - # 3D version of AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded. - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool3d( - kernel_size=[3, 2, 4], - stride=[3, 2, 5], - padding=[0, 0, 0], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 4, 5, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool3dDiffKernelsStridesNoPadCeilPadNotIncluded() -) -def AvgPool3dDiffKernelsStridesNoPadCeilPadNotIncluded_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 4, 5, 7, low=-1)) - - -class AvgPool3dDiffKernelsStridesPadCeilPadNotIncluded(torch.nn.Module): - # 3-D version of AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded. - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool3d( - kernel_size=[3, 4, 7], - stride=[2, 3, 4], - padding=[1, 2, 3], - ceil_mode=True, - count_include_pad=False, - divisor_override=None, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 3, 4, 7], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case( - module_factory=lambda: AvgPool3dDiffKernelsStridesPadCeilPadNotIncluded() -) -def AvgPool3dDiffKernelsStridesPadCeilPadNotIncluded_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 3, 4, 7, low=-1)) - - -class AvgPool1dNoPadCeilPadNotIncluded(torch.nn.Module): - # 1D version of AvgPool2dDiffKernelsStridesNoPadCeilPadNotIncluded. - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool1d( - kernel_size=[2], - stride=[2], - padding=[1], - ceil_mode=True, - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 5], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool1dNoPadCeilPadNotIncluded()) -def AvgPool1dNoPadCeilPadNotIncluded_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 5, low=-1)) - - -class AvgPool1dPadCeilPadNotIncluded(torch.nn.Module): - # 1-D version of AvgPool2dDiffKernelsStridesPadCeilPadNotIncluded. - - def __init__(self): - super().__init__() - self.ap2d = torch.nn.AvgPool1d( - kernel_size=[2], - stride=[2], - padding=[1], - ceil_mode=True, - count_include_pad=False, - ) - - @export - @annotate_args( - [ - None, - ([1, 1, 3], torch.float32, True), - ] - ) - def forward(self, x): - return self.ap2d(x) - - -@register_test_case(module_factory=lambda: AvgPool1dPadCeilPadNotIncluded()) -def AvgPool1dPadCeilPadNotIncluded_basic(module, tu: TestUtils): - module.forward(tu.rand(1, 1, 3, low=-1)) From ee73b8dd3bc8280f91f288d0080546ab37b2ba92 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 16:00:41 -0700 Subject: [PATCH 07/15] update nightly pins Signed-off-by: zjgarvey --- pytorch-hash.txt | 2 +- pytorch-requirements.txt | 2 +- torchvision-requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pytorch-hash.txt b/pytorch-hash.txt index b1886c1abddd..582695ddc3c7 100644 --- a/pytorch-hash.txt +++ b/pytorch-hash.txt @@ -1 +1 @@ -7956a1d1d0dc7cdaaaa42d0863eebb1b1e75eb65 +0dfcb1a118dd45c544a156e1d86566368e528e69 diff --git a/pytorch-requirements.txt b/pytorch-requirements.txt index 87cbf28f5a98..ac81781a6b2b 100644 --- a/pytorch-requirements.txt +++ b/pytorch-requirements.txt @@ -1,3 +1,3 @@ -f https://download.pytorch.org/whl/nightly/cpu/torch/ --pre -torch==2.9.0.dev20250820 +torch==2.10.0.dev20251016 diff --git a/torchvision-requirements.txt b/torchvision-requirements.txt index 68c96010c96f..546bfb138e43 100644 --- a/torchvision-requirements.txt +++ b/torchvision-requirements.txt @@ -1,3 +1,3 @@ -f https://download.pytorch.org/whl/nightly/cpu/torchvision/ --pre -torchvision==0.24.0.dev20250820 +torchvision==0.25.0.dev20251016 From e2d772ceb9a25094ad02ec2732b8af521e6a288b Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Thu, 16 Oct 2025 16:02:34 -0700 Subject: [PATCH 08/15] lint Signed-off-by: zjgarvey --- .../e2e/torch_mlir_e2e_test/configs/__init__.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py index 08433c7f0380..38d6b41e461e 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py @@ -8,17 +8,17 @@ from importlib import import_module CONFIG_LOCATIONS = { - "LazyTensorCoreTestConfig" : "lazy_tensor_core", - "NativeTorchTestConfig" : "native_torch", - "OnnxBackendTestConfig" : "onnx_backend", - "TorchScriptTestConfig" : "torchscript", - "TorchDynamoTestConfig" : "torchdynamo", - "JITImporterTestConfig" : "jit_importer_backend", - "FxImporterTestConfig" : "fx_importer_backend", + "LazyTensorCoreTestConfig": "lazy_tensor_core", + "NativeTorchTestConfig": "native_torch", + "OnnxBackendTestConfig": "onnx_backend", + "TorchScriptTestConfig": "torchscript", + "TorchDynamoTestConfig": "torchdynamo", + "JITImporterTestConfig": "jit_importer_backend", + "FxImporterTestConfig": "fx_importer_backend", } def load_config(name: str) -> type: source = CONFIG_LOCATIONS.get(name) assert source is not None, f"Could not find TestConfig named {name}." - module = import_module(f'.{source}', __package__) + module = import_module(f".{source}", __package__) return getattr(module, name) From f5831a2518a6960172e2dcf18cab9a5e0a3c8292 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Fri, 17 Oct 2025 10:51:50 -0700 Subject: [PATCH 09/15] With pytorch update, dynamo=True is default for onnx export. Explicitly set dynamo=False for now. Signed-off-by: zjgarvey --- projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py | 1 + 1 file changed, 1 insertion(+) diff --git a/projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py b/projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py index 207b8745d78e..44e9801ecda7 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/onnx_backend.py @@ -81,6 +81,7 @@ def convert_onnx(model, inputs): model, examples, buffer, + dynamo=False, input_names=input_names, dynamic_axes=dynamic_tensors, opset_version=max_opset_ver, From 95d0eb0f842a932719c590c745805bd9cdeb856e Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Fri, 17 Oct 2025 11:06:18 -0700 Subject: [PATCH 10/15] lint Signed-off-by: zjgarvey --- projects/e2e/e2e_testing/main.py | 28 ++++++++++++++----- .../torch_mlir_e2e_test/configs/__init__.py | 1 + 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/projects/e2e/e2e_testing/main.py b/projects/e2e/e2e_testing/main.py index e85dc041eb85..2e7540270ab0 100644 --- a/projects/e2e/e2e_testing/main.py +++ b/projects/e2e/e2e_testing/main.py @@ -142,15 +142,21 @@ def main(): # Find the selected config. if args.config == "linalg": - config = load_config("JITImporterTestConfig")(RefBackendLinalgOnTensorsBackend()) + config = load_config("JITImporterTestConfig")( + RefBackendLinalgOnTensorsBackend() + ) xfail_set = LINALG_XFAIL_SET crashing_set = LINALG_CRASHING_SET elif args.config == "stablehlo": - config = load_config("JITImporterTestConfig")(LinalgOnTensorsStablehloBackend(), "stablehlo") + config = load_config("JITImporterTestConfig")( + LinalgOnTensorsStablehloBackend(), "stablehlo" + ) xfail_set = all_test_unique_names - STABLEHLO_PASS_SET crashing_set = STABLEHLO_CRASHING_SET elif args.config == "tosa": - config = load_config("JITImporterTestConfig")(LinalgOnTensorsTosaBackend(), "tosa") + config = load_config("JITImporterTestConfig")( + LinalgOnTensorsTosaBackend(), "tosa" + ) xfail_set = all_test_unique_names - TOSA_PASS_SET crashing_set = TOSA_CRASHING_SET elif args.config == "native_torch": @@ -170,11 +176,15 @@ def main(): xfail_set = FX_IMPORTER_XFAIL_SET crashing_set = FX_IMPORTER_CRASHING_SET elif args.config == "fx_importer_stablehlo": - config = load_config("FxImporterTestConfig")(LinalgOnTensorsStablehloBackend(), "stablehlo") + config = load_config("FxImporterTestConfig")( + LinalgOnTensorsStablehloBackend(), "stablehlo" + ) xfail_set = FX_IMPORTER_STABLEHLO_XFAIL_SET crashing_set = FX_IMPORTER_STABLEHLO_CRASHING_SET elif args.config == "fx_importer_tosa": - config = load_config("FxImporterTestConfig")(LinalgOnTensorsTosaBackend(), "tosa") + config = load_config("FxImporterTestConfig")( + LinalgOnTensorsTosaBackend(), "tosa" + ) xfail_set = FX_IMPORTER_TOSA_XFAIL_SET crashing_set = FX_IMPORTER_TOSA_CRASHING_SET elif args.config == "torchdynamo": @@ -185,11 +195,15 @@ def main(): xfail_set = TORCHDYNAMO_XFAIL_SET crashing_set = TORCHDYNAMO_CRASHING_SET elif args.config == "onnx": - config = load_config("OnnxBackendTestConfig")(RefBackendLinalgOnTensorsBackend()) + config = load_config("OnnxBackendTestConfig")( + RefBackendLinalgOnTensorsBackend() + ) xfail_set = ONNX_XFAIL_SET crashing_set = ONNX_CRASHING_SET elif args.config == "onnx_tosa": - config = load_config("OnnxBackendTestConfig")(LinalgOnTensorsTosaBackend(), output_type="tosa") + config = load_config("OnnxBackendTestConfig")( + LinalgOnTensorsTosaBackend(), output_type="tosa" + ) xfail_set = ONNX_TOSA_XFAIL_SET crashing_set = ONNX_TOSA_CRASHING_SET diff --git a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py index 38d6b41e461e..1ecd8e1728af 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py @@ -17,6 +17,7 @@ "FxImporterTestConfig": "fx_importer_backend", } + def load_config(name: str) -> type: source = CONFIG_LOCATIONS.get(name) assert source is not None, f"Could not find TestConfig named {name}." From 236c07d6c95ecbb2893d4be195e76ea872a575f3 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Mon, 20 Oct 2025 10:56:10 -0700 Subject: [PATCH 11/15] Fix BatchNorm tests by passing relevant input buffers. Signed-off-by: zjgarvey --- .../e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/projects/e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py b/projects/e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py index 396d43638a42..a116a94dabd3 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/fx_importer_backend.py @@ -149,9 +149,12 @@ def _export_run(self, artifact: torch.nn.Module, trace: Trace) -> Trace: ) module = self._backend.compile(module) backend_module = self._backend.load(module) + input_buffers = prog.graph_signature.inputs_to_buffers.values() params = { # **dict(artifact.named_parameters(remove_duplicate=False)), - **dict(artifact.named_buffers(remove_duplicate=False)), + name: value + for (name, value) in artifact.named_buffers(remove_duplicate=False) + if name in input_buffers } params_flat, params_spec = pytree.tree_flatten(params) params_flat = list(params_flat) From c3bf1a3eb35b23b64d32e98d333ad7f6fcd9195a Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Mon, 20 Oct 2025 13:58:23 -0700 Subject: [PATCH 12/15] Make the test config imports a bit less hacky. Signed-off-by: zjgarvey --- projects/e2e/e2e_testing/main.py | 216 ++++++++++-------- .../torch_mlir_e2e_test/configs/__init__.py | 23 +- .../pt1_configs/__init__.py | 9 + .../jit_importer_backend.py | 2 +- .../lazy_tensor_core.py | 0 .../{configs => pt1_configs}/torchdynamo.py | 0 .../{configs => pt1_configs}/torchscript.py | 0 7 files changed, 138 insertions(+), 112 deletions(-) create mode 100644 projects/e2e/torch_mlir_e2e_test/pt1_configs/__init__.py rename projects/e2e/torch_mlir_e2e_test/{configs => pt1_configs}/jit_importer_backend.py (98%) rename projects/e2e/torch_mlir_e2e_test/{configs => pt1_configs}/lazy_tensor_core.py (100%) rename projects/e2e/torch_mlir_e2e_test/{configs => pt1_configs}/torchdynamo.py (100%) rename projects/e2e/torch_mlir_e2e_test/{configs => pt1_configs}/torchscript.py (100%) diff --git a/projects/e2e/e2e_testing/main.py b/projects/e2e/e2e_testing/main.py index 2e7540270ab0..a47f724571a6 100644 --- a/projects/e2e/e2e_testing/main.py +++ b/projects/e2e/e2e_testing/main.py @@ -11,13 +11,16 @@ torch.device("cpu") -from torch_mlir_e2e_test.framework import run_tests +from torch_mlir_e2e_test.framework import run_tests, TestConfig from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import GLOBAL_TEST_REGISTRY -# Available test configs. -from torch_mlir_e2e_test.configs import load_config +from torch_mlir_e2e_test.configs import ( + FxImporterTestConfig, + NativeTorchTestConfig, + OnnxBackendTestConfig, +) from torch_mlir_e2e_test.linalg_on_tensors_backends.refbackend import ( RefBackendLinalgOnTensorsBackend, @@ -57,30 +60,42 @@ register_all_tests() +DEPRECATED_CONFIGS = [ + "torchscript", + "linalg", + "stablehlo", + "tosa", + "lazy_tensor_core", + "torchdynamo", +] + +CONFIGS = [ + "native_torch", + "onnx", + "onnx_tosa", + "fx_importer", + "fx_importer_stablehlo", + "fx_importer_tosa", +] + def _get_argparse(): - config_choices = [ - "native_torch", - "torchscript", - "linalg", - "stablehlo", - "tosa", - "lazy_tensor_core", - "torchdynamo", - "onnx", - "onnx_tosa", - "fx_importer", - "fx_importer_stablehlo", - "fx_importer_tosa", - ] + config_choices = CONFIGS + DEPRECATED_CONFIGS parser = argparse.ArgumentParser(description="Run torchscript e2e tests.") parser.add_argument( "-c", "--config", choices=config_choices, - default="linalg", + default="fx_importer", help=f""" Meaning of options: +"onnx": export to the model via onnx and reimport using the torch-onnx-to-torch path. +"fx_importer": run the model through the fx importer frontend and execute the graph using Linalg-on-Tensors. +"fx_importer_stablehlo": run the model through the fx importer frontend and execute the graph using Stablehlo backend. +"fx_importer_tosa": run the model through the fx importer frontend and execute the graph using the TOSA backend. +"onnx_tosa": Import ONNX to Torch via the torch-onnx-to-torch path and execute the graph using the TOSA backend. + +The following options are deprecated: "linalg": run through torch-mlir"s default Linalg-on-Tensors backend. "tosa": run through torch-mlir"s default TOSA backend. "stablehlo": run through torch-mlir"s default Stablehlo backend. @@ -88,11 +103,6 @@ def _get_argparse(): "torchscript": compile the model to a torch.jit.ScriptModule, and then run that as-is (useful for verifying TorchScript is modeling the program correctly). "lazy_tensor_core": run the model through the Lazy Tensor Core frontend and execute the traced graph. "torchdynamo": run the model through the TorchDynamo frontend and execute the graph using Linalg-on-Tensors. -"onnx": export to the model via onnx and reimport using the torch-onnx-to-torch path. -"fx_importer": run the model through the fx importer frontend and execute the graph using Linalg-on-Tensors. -"fx_importer_stablehlo": run the model through the fx importer frontend and execute the graph using Stablehlo backend. -"fx_importer_tosa": run the model through the fx importer frontend and execute the graph using the TOSA backend. -"onnx_tosa": Import ONNX to Torch via the torch-onnx-to-torch path and execute the graph using the TOSA backend. """, ) parser.add_argument( @@ -135,77 +145,106 @@ def _get_argparse(): return parser -def main(): - args = _get_argparse().parse_args() - - all_test_unique_names = set(test.unique_name for test in GLOBAL_TEST_REGISTRY) - - # Find the selected config. - if args.config == "linalg": - config = load_config("JITImporterTestConfig")( - RefBackendLinalgOnTensorsBackend() +def _setup_config( + config: str, all_test_unique_names: set[str] +) -> tuple[TestConfig, set[str], set[str]]: + if config in DEPRECATED_CONFIGS: + return _setup_deprecated_config(config, all_test_unique_names) + if config == "native_torch": + return ( + NativeTorchTestConfig(), + set(), + set(), + ) + if config == "fx_importer": + return ( + FxImporterTestConfig(RefBackendLinalgOnTensorsBackend()), + FX_IMPORTER_XFAIL_SET, + FX_IMPORTER_CRASHING_SET, + ) + if config == "fx_importer_stablehlo": + return ( + FxImporterTestConfig(LinalgOnTensorsStablehloBackend(), "stablehlo"), + FX_IMPORTER_STABLEHLO_XFAIL_SET, + FX_IMPORTER_STABLEHLO_CRASHING_SET, + ) + if config == "fx_importer_tosa": + return ( + FxImporterTestConfig(LinalgOnTensorsTosaBackend(), "tosa"), + FX_IMPORTER_TOSA_XFAIL_SET, + FX_IMPORTER_TOSA_CRASHING_SET, ) - xfail_set = LINALG_XFAIL_SET - crashing_set = LINALG_CRASHING_SET - elif args.config == "stablehlo": - config = load_config("JITImporterTestConfig")( - LinalgOnTensorsStablehloBackend(), "stablehlo" + if config == "onnx": + return ( + OnnxBackendTestConfig(RefBackendLinalgOnTensorsBackend()), + ONNX_XFAIL_SET, + ONNX_CRASHING_SET, ) - xfail_set = all_test_unique_names - STABLEHLO_PASS_SET - crashing_set = STABLEHLO_CRASHING_SET - elif args.config == "tosa": - config = load_config("JITImporterTestConfig")( - LinalgOnTensorsTosaBackend(), "tosa" + if config == "onnx_tosa": + return ( + OnnxBackendTestConfig(LinalgOnTensorsTosaBackend(), output_type="tosa"), + ONNX_TOSA_XFAIL_SET, + ONNX_TOSA_CRASHING_SET, + ) + raise ValueError(f'Got invalid config, "{config}". Choices: {CONFIGS}') + + +def _setup_deprecated_config( + config: str, all_test_unique_names: set[str] +) -> tuple[TestConfig, set[str], set[str]]: + print(f"Warning: the selected config, '{config}', is not actively supported.") + import torch_mlir_e2e_test.pt1_configs as _configs + + if config == "linalg": + return ( + _configs.JITImporterTestConfig(RefBackendLinalgOnTensorsBackend()), + LINALG_XFAIL_SET, + LINALG_CRASHING_SET, ) - xfail_set = all_test_unique_names - TOSA_PASS_SET - crashing_set = TOSA_CRASHING_SET - elif args.config == "native_torch": - config = load_config("NativeTorchTestConfig")() - xfail_set = set() - crashing_set = set() - elif args.config == "torchscript": - config = load_config("TorchScriptTestConfig")() - xfail_set = set() - crashing_set = set() - elif args.config == "lazy_tensor_core": - config = load_config("LazyTensorCoreTestConfig")() - xfail_set = LTC_XFAIL_SET - crashing_set = LTC_CRASHING_SET - elif args.config == "fx_importer": - config = load_config("FxImporterTestConfig")(RefBackendLinalgOnTensorsBackend()) - xfail_set = FX_IMPORTER_XFAIL_SET - crashing_set = FX_IMPORTER_CRASHING_SET - elif args.config == "fx_importer_stablehlo": - config = load_config("FxImporterTestConfig")( - LinalgOnTensorsStablehloBackend(), "stablehlo" + if config == "stablehlo": + return ( + _configs.JITImporterTestConfig( + LinalgOnTensorsStablehloBackend(), "stablehlo" + ), + all_test_unique_names - STABLEHLO_PASS_SET, + STABLEHLO_CRASHING_SET, ) - xfail_set = FX_IMPORTER_STABLEHLO_XFAIL_SET - crashing_set = FX_IMPORTER_STABLEHLO_CRASHING_SET - elif args.config == "fx_importer_tosa": - config = load_config("FxImporterTestConfig")( - LinalgOnTensorsTosaBackend(), "tosa" + if config == "tosa": + return ( + _configs.JITImporterTestConfig(LinalgOnTensorsTosaBackend(), "tosa"), + all_test_unique_names - TOSA_PASS_SET, + TOSA_CRASHING_SET, ) - xfail_set = FX_IMPORTER_TOSA_XFAIL_SET - crashing_set = FX_IMPORTER_TOSA_CRASHING_SET - elif args.config == "torchdynamo": - # TODO: Enanble runtime verification and extend crashing set. - config = load_config("TorchDynamoTestConfig")( - RefBackendLinalgOnTensorsBackend(generate_runtime_verification=False) + if config == "torchscript": + return ( + _configs.TorchScriptTestConfig(), + set(), + set(), ) - xfail_set = TORCHDYNAMO_XFAIL_SET - crashing_set = TORCHDYNAMO_CRASHING_SET - elif args.config == "onnx": - config = load_config("OnnxBackendTestConfig")( - RefBackendLinalgOnTensorsBackend() + if config == "lazy_tensor_core": + return ( + _configs.LazyTensorCoreTestConfig(), + LTC_XFAIL_SET, + LTC_CRASHING_SET, ) - xfail_set = ONNX_XFAIL_SET - crashing_set = ONNX_CRASHING_SET - elif args.config == "onnx_tosa": - config = load_config("OnnxBackendTestConfig")( - LinalgOnTensorsTosaBackend(), output_type="tosa" + if config == "torchdynamo": + return ( + _configs.TorchDynamoTestConfig( + RefBackendLinalgOnTensorsBackend(generate_runtime_verification=False) + ), + TORCHDYNAMO_XFAIL_SET, + TORCHDYNAMO_CRASHING_SET, ) - xfail_set = ONNX_TOSA_XFAIL_SET - crashing_set = ONNX_TOSA_CRASHING_SET + raise ValueError(f"Unhandled config {config}.") + + +def main(): + args = _get_argparse().parse_args() + + all_test_unique_names = set(test.unique_name for test in GLOBAL_TEST_REGISTRY) + + # Find the selected config. + config, xfail_set, crashing_set = _setup_config(args.config, all_test_unique_names) do_not_attempt = set( args.crashing_tests_to_not_attempt_to_run_and_a_bug_is_filed or [] @@ -237,11 +276,6 @@ def main(): # Report the test results. failed = report_results(results, xfail_set, args.verbose, args.config) - if args.config == "torchdynamo": - print( - "\033[91mWarning: the TorchScript based dynamo support is deprecated. " - "The config for torchdynamo is planned to be removed in the future.\033[0m" - ) if args.ignore_failures: sys.exit(0) sys.exit(1 if failed else 0) diff --git a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py index 1ecd8e1728af..77a874b357d5 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/__init__.py +++ b/projects/e2e/torch_mlir_e2e_test/configs/__init__.py @@ -3,23 +3,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # Also available under a BSD-style license. See LICENSE. -__all__ = ["load_config"] - -from importlib import import_module - -CONFIG_LOCATIONS = { - "LazyTensorCoreTestConfig": "lazy_tensor_core", - "NativeTorchTestConfig": "native_torch", - "OnnxBackendTestConfig": "onnx_backend", - "TorchScriptTestConfig": "torchscript", - "TorchDynamoTestConfig": "torchdynamo", - "JITImporterTestConfig": "jit_importer_backend", - "FxImporterTestConfig": "fx_importer_backend", -} - - -def load_config(name: str) -> type: - source = CONFIG_LOCATIONS.get(name) - assert source is not None, f"Could not find TestConfig named {name}." - module = import_module(f".{source}", __package__) - return getattr(module, name) +from .fx_importer_backend import FxImporterTestConfig +from .native_torch import NativeTorchTestConfig +from .onnx_backend import OnnxBackendTestConfig diff --git a/projects/e2e/torch_mlir_e2e_test/pt1_configs/__init__.py b/projects/e2e/torch_mlir_e2e_test/pt1_configs/__init__.py new file mode 100644 index 000000000000..0f88ab202de5 --- /dev/null +++ b/projects/e2e/torch_mlir_e2e_test/pt1_configs/__init__.py @@ -0,0 +1,9 @@ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# Also available under a BSD-style license. See LICENSE. + +from .jit_importer_backend import JITImporterTestConfig +from .lazy_tensor_core import LazyTensorCoreTestConfig +from .torchdynamo import TorchDynamoTestConfig +from .torchscript import TorchScriptTestConfig diff --git a/projects/e2e/torch_mlir_e2e_test/configs/jit_importer_backend.py b/projects/e2e/torch_mlir_e2e_test/pt1_configs/jit_importer_backend.py similarity index 98% rename from projects/e2e/torch_mlir_e2e_test/configs/jit_importer_backend.py rename to projects/e2e/torch_mlir_e2e_test/pt1_configs/jit_importer_backend.py index 4f547d531294..4377f9b4b616 100644 --- a/projects/e2e/torch_mlir_e2e_test/configs/jit_importer_backend.py +++ b/projects/e2e/torch_mlir_e2e_test/pt1_configs/jit_importer_backend.py @@ -12,7 +12,7 @@ from torch_mlir_e2e_test.framework import TestConfig, Trace, TraceItem from torch_mlir_e2e_test.utils import convert_annotations_to_placeholders -from .utils import ( +from torch_mlir_e2e_test.configs.utils import ( recursively_convert_to_numpy, recursively_convert_from_numpy, ) diff --git a/projects/e2e/torch_mlir_e2e_test/configs/lazy_tensor_core.py b/projects/e2e/torch_mlir_e2e_test/pt1_configs/lazy_tensor_core.py similarity index 100% rename from projects/e2e/torch_mlir_e2e_test/configs/lazy_tensor_core.py rename to projects/e2e/torch_mlir_e2e_test/pt1_configs/lazy_tensor_core.py diff --git a/projects/e2e/torch_mlir_e2e_test/configs/torchdynamo.py b/projects/e2e/torch_mlir_e2e_test/pt1_configs/torchdynamo.py similarity index 100% rename from projects/e2e/torch_mlir_e2e_test/configs/torchdynamo.py rename to projects/e2e/torch_mlir_e2e_test/pt1_configs/torchdynamo.py diff --git a/projects/e2e/torch_mlir_e2e_test/configs/torchscript.py b/projects/e2e/torch_mlir_e2e_test/pt1_configs/torchscript.py similarity index 100% rename from projects/e2e/torch_mlir_e2e_test/configs/torchscript.py rename to projects/e2e/torch_mlir_e2e_test/pt1_configs/torchscript.py From 1e1ca24bb474960b0bc3fa9b7d8777c13e71b39b Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Mon, 20 Oct 2025 14:04:22 -0700 Subject: [PATCH 13/15] Fix imports in pt1 tests with config changes Signed-off-by: zjgarvey --- projects/pt1/python/test/torchscript_e2e_test/basic.py | 2 +- .../pt1/python/test/torchscript_e2e_test/compilation_failure.py | 2 +- projects/pt1/python/test/torchscript_e2e_test/error_reports.py | 2 +- .../pt1/python/test/torchscript_e2e_test/non_tensor_values.py | 2 +- .../pt1/python/test/torchscript_e2e_test/runtime_failure.py | 2 +- projects/pt1/python/test/torchscript_e2e_test/submodule.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/projects/pt1/python/test/torchscript_e2e_test/basic.py b/projects/pt1/python/test/torchscript_e2e_test/basic.py index 54361f244b21..096855e77f06 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/basic.py +++ b/projects/pt1/python/test/torchscript_e2e_test/basic.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig +from torch_mlir_e2e_test.pt1_configs import TorchScriptTestConfig class MmModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py b/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py index 0d715b5a65a3..7f1c8d6424a5 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py +++ b/projects/pt1/python/test/torchscript_e2e_test/compilation_failure.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig +from torch_mlir_e2e_test.pt1_configs import TorchScriptTestConfig class MmModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/error_reports.py b/projects/pt1/python/test/torchscript_e2e_test/error_reports.py index 4aa203f09ef7..62062203ce73 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/error_reports.py +++ b/projects/pt1/python/test/torchscript_e2e_test/error_reports.py @@ -12,7 +12,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig +from torch_mlir_e2e_test.pt1_configs import TorchScriptTestConfig # CHECK: Unexpected outcome summary: # CHECK: FAIL - "ErroneousModule_basic" diff --git a/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py b/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py index f84dc112224c..b3c09efc4276 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py +++ b/projects/pt1/python/test/torchscript_e2e_test/non_tensor_values.py @@ -12,7 +12,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig +from torch_mlir_e2e_test.pt1_configs import TorchScriptTestConfig class NonTensorValuesModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py b/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py index 7bd17139d5f9..415785472316 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py +++ b/projects/pt1/python/test/torchscript_e2e_test/runtime_failure.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig +from torch_mlir_e2e_test.pt1_configs import TorchScriptTestConfig class MmModule(torch.nn.Module): diff --git a/projects/pt1/python/test/torchscript_e2e_test/submodule.py b/projects/pt1/python/test/torchscript_e2e_test/submodule.py index 4a882a0270cd..6656da1b61c5 100644 --- a/projects/pt1/python/test/torchscript_e2e_test/submodule.py +++ b/projects/pt1/python/test/torchscript_e2e_test/submodule.py @@ -10,7 +10,7 @@ from torch_mlir_e2e_test.framework import run_tests, TestUtils from torch_mlir_e2e_test.reporting import report_results from torch_mlir_e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY -from torch_mlir_e2e_test.configs.torchscript import TorchScriptTestConfig +from torch_mlir_e2e_test.pt1_configs import TorchScriptTestConfig class Submodule2(torch.nn.Module): From f6c0a144902e10d94d04f63c435757018bef8128 Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Mon, 20 Oct 2025 14:13:36 -0700 Subject: [PATCH 14/15] update developer docs Signed-off-by: zjgarvey --- docs/adding_an_e2e_test.md | 2 +- docs/development.md | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/adding_an_e2e_test.md b/docs/adding_an_e2e_test.md index 91eee0520f56..99df4fd216fe 100644 --- a/docs/adding_an_e2e_test.md +++ b/docs/adding_an_e2e_test.md @@ -5,7 +5,7 @@ Adding support for a Torch operator in Torch-MLIR should always be accompanied by at least one end-to-end test to make sure the implementation of the op matches the behavior of PyTorch. The tests live in the -`torch-mlir/projects/pt1/python/torch_mlir_e2e_test/test_suite` directory. When adding a new +`torch-mlir/projects/e2e/torch_mlir_e2e_test/test_suite` directory. When adding a new test, choose a file that best matches the op you're testing, and if there is no file that best matches add a new file for your op. diff --git a/docs/development.md b/docs/development.md index 360dff8f9df8..6c947985be0f 100644 --- a/docs/development.md +++ b/docs/development.md @@ -463,10 +463,10 @@ Torch-MLIR has two types of tests: 1. End-to-end execution tests. These compile and run a program and check the result against the expected output from execution on native Torch. These use a homegrown testing framework (see - `projects/pt1/python/torch_mlir_e2e_test/framework.py`) and the test suite - lives at `projects/pt1/python/torch_mlir_e2e_test/test_suite/__init__.py`. - The tests require to build with `TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS` (and - the dependent option `TORCH_MLIR_ENABLE_JIT_IR_IMPORTER`) set to `ON`. + `projects/e2e/torch_mlir_e2e_test/framework.py`) and the test suite + lives at `projects/e2e/torch_mlir_e2e_test/test_suite/__init__.py`. + Some old configs require building with `TORCH_MLIR_ENABLE_PYTORCH_EXTENSIONS` + (and the dependent option `TORCH_MLIR_ENABLE_JIT_IR_IMPORTER`) set to `ON`. 2. Compiler and Python API unit tests. These use LLVM's `lit` testing framework. For example, these might involve using `torch-mlir-opt` to run a pass and @@ -482,7 +482,7 @@ Torch-MLIR has two types of tests: > An `.env` file must be generated via `build_tools/write_env_file.sh` before these commands can be run. -The following assumes you are in the `projects/pt1` directory: +The following assumes you are in the `projects/e2e` directory: ```shell # Run all tests on the reference backend @@ -496,7 +496,7 @@ The following assumes you are in the `projects/pt1` directory: Alternatively, you can run the tests via Python directly: ```shell -cd projects/pt1 +cd projects/e2e python -m e2e_testing.main -f 'AtenEmbeddingBag' ``` @@ -621,10 +621,10 @@ Here are some examples of PRs updating the LLVM and MLIR-HLO submodules: To enable ASAN, pass `-DLLVM_USE_SANITIZER=Address` to CMake. This should "just work" with all C++ tools like `torch-mlir-opt`. When running a Python script -such as through `./projects/pt1/tools/e2e_test.sh`, you will need to do: +such as through `./projects/e2e/tools/e2e_test.sh`, you will need to do: ``` -LD_PRELOAD="$(clang -print-file-name=libclang_rt.asan-x86_64.so)" ./projects/pt1/tools/e2e_test.sh -s +LD_PRELOAD="$(clang -print-file-name=libclang_rt.asan-x86_64.so)" ./projects/e2e/tools/e2e_test.sh -s # See instructions here for how to get the libasan path for GCC: # https://stackoverflow.com/questions/48833176/get-location-of-libasan-from-gcc-clang ``` From d84b51dd037ab120977073c1a097d5d0f1ae8f7f Mon Sep 17 00:00:00 2001 From: zjgarvey Date: Mon, 20 Oct 2025 14:22:10 -0700 Subject: [PATCH 15/15] Small nit fixes Signed-off-by: zjgarvey --- projects/e2e/CMakeLists.txt | 2 +- projects/pt1/python/torch_mlir/torchscript.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/projects/e2e/CMakeLists.txt b/projects/e2e/CMakeLists.txt index 1fdd60f89e1d..25b51aedfe7a 100644 --- a/projects/e2e/CMakeLists.txt +++ b/projects/e2e/CMakeLists.txt @@ -1,4 +1,4 @@ -message(STATUS "Building PyTorch1 compatibility project") +message(STATUS "Building end-to-end testing package.") ################################################################################ # Setup python. diff --git a/projects/pt1/python/torch_mlir/torchscript.py b/projects/pt1/python/torch_mlir/torchscript.py index d0392879bf53..cf979838f0f0 100644 --- a/projects/pt1/python/torch_mlir/torchscript.py +++ b/projects/pt1/python/torch_mlir/torchscript.py @@ -5,7 +5,6 @@ from typing import Optional, Sequence, Union, List, Dict, Tuple, Callable, Iterable from enum import Enum -from warnings import warn import sys from io import StringIO @@ -25,6 +24,7 @@ from torch_mlir.jit_ir_importer import ClassAnnotator, ImportOptions, ModuleBuilder from torch_mlir.jit_ir_importer.build_tools.library_generator import generate_library + _example_arg = Union[TensorPlaceholder, torch.Tensor] _example_args_for_one_method = Union[_example_arg, Sequence[_example_arg]] _example_args = Union[_example_args_for_one_method, "ExampleArgs"]