From 3616eefeddf319aa9b4f8a80c62a5e0f5135058c Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Fri, 7 Nov 2025 15:54:20 -0500 Subject: [PATCH 1/8] Fix out-of-bounds indexing with transposedconv negative padding --- lib/Conversion/TorchToLinalg/Linear.cpp | 73 ++++++++++++++----- .../torch_mlir_e2e_test/test_suite/conv.py | 36 ++++++++- .../Conversion/TorchToLinalg/convolution.mlir | 45 ++++++++++-- 3 files changed, 128 insertions(+), 26 deletions(-) diff --git a/lib/Conversion/TorchToLinalg/Linear.cpp b/lib/Conversion/TorchToLinalg/Linear.cpp index a3a486d14136..50c5e92f0ee3 100644 --- a/lib/Conversion/TorchToLinalg/Linear.cpp +++ b/lib/Conversion/TorchToLinalg/Linear.cpp @@ -1583,7 +1583,6 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( SmallVector insertSliceOffsets{c0, c0}; SmallVector inputSizes = getTensorSizes(rewriter, loc, input); - SmallVector sliceSizes{inputSizes[0], inputSizes[1]}; // For the case in which the padding dimension value is negative, // we will need to shrink the dimension. Note in the PyTorch @@ -1597,10 +1596,12 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( Value c2 = rewriter.create(loc, rewriter.getIndexAttr(2)); for (size_t i = 0; i < numSpatialDims; i++) { + // Calculate inner size: (input_size - 1) * stride + 1 Value innerSize = rewriter.createOrFold(loc, inDims[i], c1); innerSize = rewriter.createOrFold( loc, innerSize, castIntToIndex(rewriter, loc, strideIntValues[i])); innerSize = rewriter.createOrFold(loc, innerSize, c1); + innerSizes.push_back(innerSize); Value offset = rewriter.createOrFold(loc, weightDims[i], c1); offset = rewriter.createOrFold( @@ -1608,8 +1609,14 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( offset = rewriter.createOrFold( loc, offset, castIntToIndex(rewriter, loc, paddingIntValues[i])); + // We need to crop or pad from two sides - top&bottom or left&right. + // Therefore multiply by 2. Value outerSize = rewriter.createOrFold(loc, offset, c2); + + // Crop or pad based on the sign of offset outerSize = rewriter.createOrFold(loc, outerSize, innerSize); + + // Add optional padding values outerSize = rewriter.createOrFold( loc, outerSize, castIntToIndex(rewriter, loc, outputPaddingIntValues[i])); @@ -1624,39 +1631,69 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( auto posOffset = rewriter.createOrFold(loc, offset, negOneConst); - // Compute the reduced dimension size due to negative padding. - auto sizeReduction = - rewriter.createOrFold(loc, posOffset, c2); - sliceSizes.push_back(rewriter.createOrFold( - loc, inputSizes[i + 2], sizeReduction)); - extractSliceOffsets.push_back(posOffset); insertSliceOffsets.push_back(c0); } else { - sliceSizes.push_back(inputSizes[i + 2]); extractSliceOffsets.push_back(c0); insertSliceOffsets.push_back(offset); } } - Value initTensor = createInitTensor(rewriter, loc, outerSizes, inputDTy, pad); // Insert input into allocated tensor SmallVector strideIndexValues{c1, c1}; for (auto stride : strideIntValues) strideIndexValues.push_back(castIntToIndex(rewriter, loc, stride)); - auto insertSliceOpInput = input; if (anyDimensionPaddingIsNegative) { - insertSliceOpInput = rewriter.create( + + // Some dimensions may need padding and some dimensions need cropping + + // 1. Allocate a maxSizes buffer (max of inner and outer for each dim) + // 2. Insert the input into maxSizes buffer at appropriate offsets (if + // insertSliceOffsets is positive, pad; 0 no padding) and stride + // 3. Extract the final outerSizes from maxSizes buffer + + // Create the "max size" tensor to accommodate both padding and cropping + SmallVector maxSizes{inBatch, inChannels}; + for (size_t i = 0; i < numSpatialDims; ++i) { + Value innerDim = innerSizes[i + 2]; + Value outerDim = outerSizes[i + 2]; + Value isPadding = rewriter.create( + loc, arith::CmpIPredicate::ugt, outerDim, innerDim); + Value maxDim = + rewriter.create(loc, isPadding, outerDim, innerDim); + maxSizes.push_back(maxDim); + } + + Value initMaxTensor = + createInitTensor(rewriter, loc, maxSizes, inputDTy, pad); + + // Insert input + auto paddedTensor = rewriter.create( loc, torch_to_linalg::removeSizeInformation(rewriter, loc, input), - extractSliceOffsets, sliceSizes, strideIndexValues); - } + initMaxTensor, insertSliceOffsets, inputSizes, strideIndexValues); - auto paddedInput = rewriter.create( - loc, - torch_to_linalg::removeSizeInformation(rewriter, loc, insertSliceOpInput), - initTensor, insertSliceOffsets, sliceSizes, strideIndexValues); - return paddedInput; + SmallVector allOnesStrides(inputSizes.size(), c1); + + // Crop. Extract the final tensor from the "max" tensor + auto finalTensor = rewriter.create( + loc, + torch_to_linalg::removeSizeInformation(rewriter, loc, paddedTensor), + extractSliceOffsets, outerSizes, allOnesStrides); + + return finalTensor; + + } else { + + Value initPaddedTensor = + createInitTensor(rewriter, loc, outerSizes, inputDTy, pad); + + // Insert the original input into the outer tensor with calculated offsets + auto paddedInput = rewriter.create( + loc, torch_to_linalg::removeSizeInformation(rewriter, loc, input), + initPaddedTensor, insertSliceOffsets, inputSizes, strideIndexValues); + return paddedInput; + } } namespace { diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index 2a1c627f6ee5..cc073db586fd 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -1988,7 +1988,7 @@ def forward(self, inputVec, weight, bias): inputVec, weight, bias=bias, - stride=[1], + stride=[4], padding=[3], dilation=[1], transposed=True, @@ -2034,6 +2034,38 @@ def TransposedConv2dNegativePadding_basic(module, tu: TestUtils): module.forward(tu.rand(1, 1, 4, 7), tu.rand(1, 2, 3, 3), tu.rand(2)) +class TransposedConv2dPositiveAndNegativePadding(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 4, 7], torch.float32, True), + ([1, 2, 3, 3], torch.float32, True), + ([2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[4, 4], + padding=[0, 3], + dilation=[1, 1], + transposed=True, + output_padding=[0, 0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: TransposedConv2dPositiveAndNegativePadding()) +def TransposedConv2dPositiveAndNegativePadding_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 4, 7), tu.rand(1, 2, 3, 3), tu.rand(2)) + + class TransposedConv3dNegativePadding(torch.nn.Module): def __init__(self): super().__init__() @@ -2052,7 +2084,7 @@ def forward(self, inputVec, weight, bias): inputVec, weight, bias=bias, - stride=[1, 1, 1], + stride=[4, 4, 4], padding=[2, 1, 3], dilation=[1, 1, 1], transposed=True, diff --git a/test/Conversion/TorchToLinalg/convolution.mlir b/test/Conversion/TorchToLinalg/convolution.mlir index 88627c166877..523e93effb35 100644 --- a/test/Conversion/TorchToLinalg/convolution.mlir +++ b/test/Conversion/TorchToLinalg/convolution.mlir @@ -152,12 +152,17 @@ func.func @transposedGroupedConvolution2D(%arg0: !torch.vtensor<[1,2,5,7],f32>) } // CHECK-LABEL: func.func @tranConv2dNegativePadding( -// CHECK-SAME: %[[INPUT_VTENSOR:.*]]: !torch.vtensor<[1,1,4,7],f32>) -> !torch.vtensor<[1,2,6,3],f32> -// CHECK: %[[IN_TENSOR:.*]] = torch_c.to_builtin_tensor %[[INPUT_VTENSOR]] : !torch.vtensor<[1,1,4,7],f32> -> tensor<1x1x4x7xf32> -// CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[IN_TENSOR]][0, 0, 0, 1] [1, 1, 4, 5] [1, 1, 1, 1] : tensor<1x1x4x7xf32> to tensor<1x1x4x5xf32> -// CHECK: %[[INSERTED_SLICE:.*]] = tensor.insert_slice %[[EXTRACTED_SLICE]] into %[[INIT_TENSOR:.*]][0, 0, 2, 0] [1, 1, 4, 5] [1, 1, 1, 1] : tensor<1x1x4x5xf32> into tensor<1x1x8x5xf32> -// CHECK: %[[OUT_TENSOR:.*]] = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%[[INSERTED_SLICE]], %[[WEIGHTS:.*]] : tensor<1x1x8x5xf32>, tensor<2x1x3x3xf32>) outs(%[[INIT_OUT_TENSOR:.*]] : tensor<1x2x6x3xf32>) -> tensor<1x2x6x3xf32> -// CHECK: %[[OUT_VTENSOR:.*]] = torch_c.from_builtin_tensor %[[OUT_TENSOR]] : tensor<1x2x6x3xf32> -> !torch.vtensor<[1,2,6,3],f32> +// CHECK-SAME: %[[INPUT_VTENSOR:.*]]: !torch.vtensor<[1,1,4,7],f32>) -> !torch.vtensor<[1,2,6,3],f32> attributes {torch.assume_strict_symbolic_shapes} { +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C0F:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[INPUT_TENSOR:.*]] = torch_c.to_builtin_tensor %[[INPUT_VTENSOR]] : !torch.vtensor<[1,1,4,7],f32> -> tensor<1x1x4x7xf32> +// CHECK: %[[EMPTY_UNSTRIDED_TENSOR:.*]] = tensor.empty() : tensor<1x1x8x7xf32> +// CHECK: %[[ZEROS_UNSTRIDED_TENSOR:.*]] = linalg.fill ins(%[[C0F]] : f32) outs(%[[EMPTY_UNSTRIDED_TENSOR]] : tensor<1x1x8x7xf32>) -> tensor<1x1x8x7xf32> +// CHECK: %[[INPUT_UNSTRIDED_TENSOR:.*]] = tensor.insert_slice %[[INPUT_TENSOR]] into %[[ZEROS_UNSTRIDED_TENSOR]][0, 0, 2, 0] [1, 1, 4, 7] [1, 1, 1, 1] : tensor<1x1x4x7xf32> into tensor<1x1x8x7xf32> +// CHECK: %[[CROPPED_UNSTRIDED_TENSOR:.*]] = tensor.extract_slice %[[INPUT_UNSTRIDED_TENSOR]][0, 0, 0, 1] [1, 1, 8, 5] [1, 1, 1, 1] : tensor<1x1x8x7xf32> to tensor<1x1x8x5xf32> +// CHECK: %[[OUT_TENSOR:.*]] = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%[[CROPPED_UNSTRIDED_TENSOR]], %[[WEIGHTS:.*]] : tensor<1x1x8x5xf32>, tensor<2x1x3x3xf32>) outs(%[[INIT_OUT_TENSOR:.*]] : tensor<1x2x6x3xf32>) -> tensor<1x2x6x3xf32> +// CHECK: %[[OUT_VTENSOR:.*]] = torch_c.from_builtin_tensor %[[OUT_TENSOR]] : tensor<1x2x6x3xf32> -> !torch.vtensor<[1,2,6,3],f32> func.func @tranConv2dNegativePadding(%arg0: !torch.vtensor<[1, 1, 4, 7],f32>) -> !torch.vtensor<[1, 2, 6, 3],f32> attributes {torch.assume_strict_symbolic_shapes} { %int0 = torch.constant.int 0 %true = torch.constant.bool true @@ -174,3 +179,31 @@ func.func @tranConv2dNegativePadding(%arg0: !torch.vtensor<[1, 1, 4, 7],f32>) -> %6 = torch.aten.convolution %arg0, %0, %1, %2, %3, %4, %true, %5, %int1 : !torch.vtensor<[1, 1, 4, 7],f32>, !torch.vtensor<[1,2,3,3],f32>, !torch.vtensor<[2],f32>, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int -> !torch.vtensor<[1, 2, 6, 3],f32> return %6 : !torch.vtensor<[1, 2, 6, 3],f32> } + +// CHECK-LABEL: func.func @tranConv2dNegativeAndPositivePadding( +// CHECK-SAME: %[[INPUT_VTENSOR:.*]]: !torch.vtensor<[1,1,4,7],f32>, +// CHECK-SAME: %[[WEIGHTS_VTENSOR:.*]]: !torch.vtensor<[1,2,3,3],f32>, +// CHECK-SAME: %[[BIAS_VTENSOR:.*]]: !torch.vtensor<[2],f32>) -> !torch.vtensor<[1,2,15,21],f32> { +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C0F:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[INPUT_TENSOR:.*]] = torch_c.to_builtin_tensor %[[INPUT_VTENSOR]] : !torch.vtensor<[1,1,4,7],f32> -> tensor<1x1x4x7xf32> +// CHECK: %[[EMPTY_UNSTRIDED_TENSOR:.*]] = tensor.empty() : tensor<1x1x17x25xf32> +// CHECK: %[[ZEROS_UNSTRIDED_TENSOR:.*]] = linalg.fill ins(%[[C0F]] : f32) outs(%[[EMPTY_UNSTRIDED_TENSOR]] : tensor<1x1x17x25xf32>) -> tensor<1x1x17x25xf32> +// CHECK: %[[INPUT_UNSTRIDED_TENSOR:.*]] = tensor.insert_slice %[[INPUT_TENSOR]] into %[[ZEROS_UNSTRIDED_TENSOR]][0, 0, 2, 0] [1, 1, 4, 7] [1, 1, 4, 4] : tensor<1x1x4x7xf32> into tensor<1x1x17x25xf32> +// CHECK: %[[CROPPED_UNSTRIDED_TENSOR:.*]] = tensor.extract_slice %[[INPUT_UNSTRIDED_TENSOR]][0, 0, 0, 1] [1, 1, 17, 23] [1, 1, 1, 1] : tensor<1x1x17x25xf32> to tensor<1x1x17x23xf32> +// CHECK: %[[OUT_TENSOR:.*]] = linalg.conv_2d_nchw_fchw {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%[[CROPPED_UNSTRIDED_TENSOR]], %[[WEIGHTS:.*]] : tensor<1x1x17x23xf32>, tensor<2x1x3x3xf32>) outs(%[[INIT_OUT_TENSOR:.*]] : tensor<1x2x15x21xf32>) -> tensor<1x2x15x21xf32> +// CHECK: %[[OUT_VTENSOR:.*]] = torch_c.from_builtin_tensor %[[OUT_TENSOR]] : tensor<1x2x15x21xf32> -> !torch.vtensor<[1,2,15,21],f32> +func.func @tranConv2dNegativeAndPositivePadding(%arg0: !torch.vtensor<[1,1,4,7],f32>, %arg1: !torch.vtensor<[1,2,3,3],f32>, %arg2: !torch.vtensor<[2],f32>) -> !torch.vtensor<[1,2,15,21],f32> { + %int1 = torch.constant.int 1 + %int3 = torch.constant.int 3 + %int0 = torch.constant.int 0 + %int4 = torch.constant.int 4 + %true = torch.constant.bool true + %0 = torch.prim.ListConstruct %int4, %int4 : (!torch.int, !torch.int) -> !torch.list + %1 = torch.prim.ListConstruct %int0, %int3 : (!torch.int, !torch.int) -> !torch.list + %2 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list + %3 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list + %4 = torch.aten.convolution %arg0, %arg1, %arg2, %0, %1, %2, %true, %3, %int1 : !torch.vtensor<[1,1,4,7],f32>, !torch.vtensor<[1,2,3,3],f32>, !torch.vtensor<[2],f32>, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int -> !torch.vtensor<[1,2,15,21],f32> + return %4 : !torch.vtensor<[1,2,15,21],f32> +} From 3b8a12d1d6c036e9f06654f188a981bc1e8024dc Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Fri, 7 Nov 2025 18:28:44 -0500 Subject: [PATCH 2/8] Added additional e2e test for transposedConv1d --- .../torch_mlir_e2e_test/test_suite/conv.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index cc073db586fd..c7e78c4131a9 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -2002,6 +2002,38 @@ def TransposedConv1dNegativePadding_basic(module, tu: TestUtils): module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) +class ConvTranspose1dNegativePaddingLarge(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 17, 5], torch.float32, True), + ([17, 6, 3], torch.float32, True), + ([6], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[7], + padding=[10], + dilation=[4], + transposed=True, + output_padding=[0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: ConvTranspose1dNegativePaddingLarge()) +def ConvTranspose1dNegativePaddingLarge_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 17, 5), tu.rand(17, 6, 3), tu.rand(6)) + + class TransposedConv2dNegativePadding(torch.nn.Module): def __init__(self): super().__init__() From da0f39ccc611aaec3023e9d3743c26d71339e29f Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Fri, 7 Nov 2025 20:17:04 -0500 Subject: [PATCH 3/8] Filter tests for TOSA and ONNX. Fix deprecation warnings --- lib/Conversion/TorchToLinalg/Linear.cpp | 10 +++++----- projects/pt1/e2e_testing/xfail_sets.py | 4 ++++ .../pt1/python/torch_mlir_e2e_test/test_suite/conv.py | 6 +++--- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/Conversion/TorchToLinalg/Linear.cpp b/lib/Conversion/TorchToLinalg/Linear.cpp index ab9619b26ab8..3f4c63422e14 100644 --- a/lib/Conversion/TorchToLinalg/Linear.cpp +++ b/lib/Conversion/TorchToLinalg/Linear.cpp @@ -1594,8 +1594,8 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( // Make the negative value positive by multiplying by -1. anyDimensionPaddingIsNegative = true; auto offsetType = offset.getType(); - auto negOneConst = rewriter.createOrFold( - loc, offsetType, rewriter.getIntegerAttr(offsetType, -1)); + auto negOneConst = arith::ConstantOp::create( + rewriter, loc, rewriter.getIntegerAttr(offsetType, -1)); auto posOffset = rewriter.createOrFold(loc, offset, negOneConst); @@ -1626,10 +1626,10 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( for (size_t i = 0; i < numSpatialDims; ++i) { Value innerDim = innerSizes[i + 2]; Value outerDim = outerSizes[i + 2]; - Value isPadding = rewriter.create( + Value isPadding = rewriter.createOrFold( loc, arith::CmpIPredicate::ugt, outerDim, innerDim); - Value maxDim = - rewriter.create(loc, isPadding, outerDim, innerDim); + Value maxDim = rewriter.createOrFold(loc, isPadding, + outerDim, innerDim); maxSizes.push_back(maxDim); } diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 0e1b3b67d102..87c455011120 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -3961,7 +3961,9 @@ "TraceModule_empty", "TraceUnsignedIntModule_empty", "TransposedConv1dNegativePadding_basic", + "TransposedConv1dNegativePaddingLarge_basic", "TransposedConv2dNegativePadding_basic", + "TransposedConv2dPositiveAndNegativePadding_basic", "TransposedConv3dNegativePadding_basic", "UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic", "InterpolateDynamicModule_sizes_nearest", @@ -5039,7 +5041,9 @@ "TraceUnsignedIntModule_basic", "TraceUnsignedIntModule_empty", "TransposedConv1dNegativePadding_basic", + "TransposedConv1dNegativePaddingLarge_basic", "TransposedConv2dNegativePadding_basic", + "TransposedConv2dPositiveAndNegativePadding_basic", "TransposedConv3dNegativePadding_basic", "TupleModule_basic", "TypeAsDifferentModule_basic", diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index c7e78c4131a9..f3fd695bbeed 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -2002,7 +2002,7 @@ def TransposedConv1dNegativePadding_basic(module, tu: TestUtils): module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) -class ConvTranspose1dNegativePaddingLarge(torch.nn.Module): +class TransposedConv1dNegativePaddingLarge(torch.nn.Module): def __init__(self): super().__init__() @@ -2029,8 +2029,8 @@ def forward(self, inputVec, weight, bias): ) -@register_test_case(module_factory=lambda: ConvTranspose1dNegativePaddingLarge()) -def ConvTranspose1dNegativePaddingLarge_basic(module, tu: TestUtils): +@register_test_case(module_factory=lambda: TransposedConv1dNegativePaddingLarge()) +def TransposedConv1dNegativePaddingLarge_basic(module, tu: TestUtils): module.forward(tu.rand(1, 17, 5), tu.rand(17, 6, 3), tu.rand(6)) From e5f1fe5cd0cb3245cb3c68437ec5be972e0c15fb Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Fri, 7 Nov 2025 15:54:20 -0500 Subject: [PATCH 4/8] Fix deprecated rewriter.create usages --- lib/Conversion/TorchToLinalg/Linear.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/Conversion/TorchToLinalg/Linear.cpp b/lib/Conversion/TorchToLinalg/Linear.cpp index 3f4c63422e14..6856db3cab30 100644 --- a/lib/Conversion/TorchToLinalg/Linear.cpp +++ b/lib/Conversion/TorchToLinalg/Linear.cpp @@ -1628,8 +1628,8 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( Value outerDim = outerSizes[i + 2]; Value isPadding = rewriter.createOrFold( loc, arith::CmpIPredicate::ugt, outerDim, innerDim); - Value maxDim = rewriter.createOrFold(loc, isPadding, - outerDim, innerDim); + Value maxDim = + rewriter.createOrFold(loc, isPadding, outerDim, innerDim); maxSizes.push_back(maxDim); } @@ -1637,14 +1637,14 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( createInitTensor(rewriter, loc, maxSizes, inputDTy, pad); // Insert input - auto paddedTensor = rewriter.create( + auto paddedTensor = tensor::InsertSliceOp::create(rewriter, loc, torch_to_linalg::removeSizeInformation(rewriter, loc, input), initMaxTensor, insertSliceOffsets, inputSizes, strideIndexValues); SmallVector allOnesStrides(inputSizes.size(), c1); // Crop. Extract the final tensor from the "max" tensor - auto finalTensor = rewriter.create( + auto finalTensor = tensor::ExtractSliceOp::create(rewriter, loc, torch_to_linalg::removeSizeInformation(rewriter, loc, paddedTensor), extractSliceOffsets, outerSizes, allOnesStrides); @@ -1657,7 +1657,7 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( createInitTensor(rewriter, loc, outerSizes, inputDTy, pad); // Insert the original input into the outer tensor with calculated offsets - auto paddedInput = rewriter.create( + auto paddedInput = tensor::InsertSliceOp::create(rewriter, loc, torch_to_linalg::removeSizeInformation(rewriter, loc, input), initPaddedTensor, insertSliceOffsets, inputSizes, strideIndexValues); return paddedInput; From b5ec9a4aec0791b215d62f10640c23d83c77793e Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Mon, 10 Nov 2025 17:12:33 -0500 Subject: [PATCH 5/8] Fix pre-commit failure --- lib/Conversion/TorchToLinalg/Linear.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/Conversion/TorchToLinalg/Linear.cpp b/lib/Conversion/TorchToLinalg/Linear.cpp index 6856db3cab30..8c6f2c3efa28 100644 --- a/lib/Conversion/TorchToLinalg/Linear.cpp +++ b/lib/Conversion/TorchToLinalg/Linear.cpp @@ -1628,8 +1628,8 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( Value outerDim = outerSizes[i + 2]; Value isPadding = rewriter.createOrFold( loc, arith::CmpIPredicate::ugt, outerDim, innerDim); - Value maxDim = - rewriter.createOrFold(loc, isPadding, outerDim, innerDim); + Value maxDim = rewriter.createOrFold(loc, isPadding, + outerDim, innerDim); maxSizes.push_back(maxDim); } @@ -1637,15 +1637,16 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( createInitTensor(rewriter, loc, maxSizes, inputDTy, pad); // Insert input - auto paddedTensor = tensor::InsertSliceOp::create(rewriter, - loc, torch_to_linalg::removeSizeInformation(rewriter, loc, input), + auto paddedTensor = tensor::InsertSliceOp::create( + rewriter, loc, + torch_to_linalg::removeSizeInformation(rewriter, loc, input), initMaxTensor, insertSliceOffsets, inputSizes, strideIndexValues); SmallVector allOnesStrides(inputSizes.size(), c1); // Crop. Extract the final tensor from the "max" tensor - auto finalTensor = tensor::ExtractSliceOp::create(rewriter, - loc, + auto finalTensor = tensor::ExtractSliceOp::create( + rewriter, loc, torch_to_linalg::removeSizeInformation(rewriter, loc, paddedTensor), extractSliceOffsets, outerSizes, allOnesStrides); @@ -1657,8 +1658,9 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( createInitTensor(rewriter, loc, outerSizes, inputDTy, pad); // Insert the original input into the outer tensor with calculated offsets - auto paddedInput = tensor::InsertSliceOp::create(rewriter, - loc, torch_to_linalg::removeSizeInformation(rewriter, loc, input), + auto paddedInput = tensor::InsertSliceOp::create( + rewriter, loc, + torch_to_linalg::removeSizeInformation(rewriter, loc, input), initPaddedTensor, insertSliceOffsets, inputSizes, strideIndexValues); return paddedInput; } From 6bffa30ac94ae29085001569ae67ec68ccd1e610 Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Tue, 11 Nov 2025 12:45:47 -0500 Subject: [PATCH 6/8] Addressed Ivan's feedback --- lib/Conversion/TorchToLinalg/Linear.cpp | 25 +++++++++---- projects/pt1/e2e_testing/xfail_sets.py | 2 ++ .../torch_mlir_e2e_test/test_suite/conv.py | 36 +++++++++++++++++-- 3 files changed, 55 insertions(+), 8 deletions(-) diff --git a/lib/Conversion/TorchToLinalg/Linear.cpp b/lib/Conversion/TorchToLinalg/Linear.cpp index 8c6f2c3efa28..9c3da6405d59 100644 --- a/lib/Conversion/TorchToLinalg/Linear.cpp +++ b/lib/Conversion/TorchToLinalg/Linear.cpp @@ -1538,6 +1538,25 @@ class ConvertAtenConvolutionOp : public OpConversionPattern { }; } // namespace +/* + * Calculates the dimensions and offsets needed to emulate a Transposed + * Convolution (like PyTorch's ConvTranspose2d) using a standard + * Forward Convolution. + * + * This involves creating a new tensor by: + * 1. Calculating `innerSizes`: The input size after dilation by `stride`. + * innerSize[i] = (inDim[i] - 1) * stride[i] + 1 + * + * 2. Calculating `outerSizes`: The final padded tensor size. + * offset[i] = (weightDim[i] - 1) * dilation[i] - padding[i] + * outerSize[i] = innerSize[i] + (2 * offset[i]) + outputPadding[i] + * + * If `offset[i]` is negative, this is treated as *cropping* the + * `innerSizes` tensor. This function calculates the + * `insertSliceOffsets` (padding) and `extractSliceOffsets` (cropping) + * to correctly place the (potentially cropped) inner tensor within the + * new outer tensor. + */ Value ConvertAtenConvolutionOp::createTransposedInputPadding( Value inBatch, Value inChannels, SmallVector &inDims, SmallVector &weightDims, SmallVector &paddingIntValues, @@ -1552,12 +1571,6 @@ Value ConvertAtenConvolutionOp::createTransposedInputPadding( SmallVector inputSizes = getTensorSizes(rewriter, loc, input); - // For the case in which the padding dimension value is negative, - // we will need to shrink the dimension. Note in the PyTorch - // ConvTranspose2d operator documentation that the padding is - // defined by dilation * (kernel_size - 1) - padding. If the - // resulting padding is negative, PyTorch will extract elements - // from both sides of the dimension. SmallVector extractSliceOffsets{c0, c0}; bool anyDimensionPaddingIsNegative = false; diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 87c455011120..952d09620477 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -3961,6 +3961,7 @@ "TraceModule_empty", "TraceUnsignedIntModule_empty", "TransposedConv1dNegativePadding_basic", + "TransposedConv1dNegativePaddingUnitStride_basic", "TransposedConv1dNegativePaddingLarge_basic", "TransposedConv2dNegativePadding_basic", "TransposedConv2dPositiveAndNegativePadding_basic", @@ -5041,6 +5042,7 @@ "TraceUnsignedIntModule_basic", "TraceUnsignedIntModule_empty", "TransposedConv1dNegativePadding_basic", + "TransposedConv1dNegativePaddingUnitStride_basic", "TransposedConv1dNegativePaddingLarge_basic", "TransposedConv2dNegativePadding_basic", "TransposedConv2dPositiveAndNegativePadding_basic", diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index f3fd695bbeed..16b62901d137 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -2002,6 +2002,38 @@ def TransposedConv1dNegativePadding_basic(module, tu: TestUtils): module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) +class TransposedConv1dNegativePaddingUnitStride(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([1, 1, 7], torch.float32, True), + ([1, 2, 3], torch.float32, True), + ([2], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.convolution( + inputVec, + weight, + bias=bias, + stride=[1], + padding=[3], + dilation=[1], + transposed=True, + output_padding=[0], + groups=1, + ) + + +@register_test_case(module_factory=lambda: TransposedConv1dNegativePaddingUnitStride()) +def TransposedConv1dNegativePaddingUnitStride_basic(module, tu: TestUtils): + module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) + + class TransposedConv1dNegativePaddingLarge(torch.nn.Module): def __init__(self): super().__init__() @@ -2116,9 +2148,9 @@ def forward(self, inputVec, weight, bias): inputVec, weight, bias=bias, - stride=[4, 4, 4], + stride=[1, 5, 3], padding=[2, 1, 3], - dilation=[1, 1, 1], + dilation=[1, 2, 1], transposed=True, output_padding=[0, 0, 0], groups=1, From 6fbd5510dc8e5dddcacc599684839c7f63107dc3 Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Tue, 11 Nov 2025 16:21:21 -0500 Subject: [PATCH 7/8] Add dynamic dims testpoint --- projects/pt1/e2e_testing/xfail_sets.py | 4 ++-- .../pt1/python/torch_mlir_e2e_test/test_suite/conv.py | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 952d09620477..a02de3fa195b 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -3961,7 +3961,7 @@ "TraceModule_empty", "TraceUnsignedIntModule_empty", "TransposedConv1dNegativePadding_basic", - "TransposedConv1dNegativePaddingUnitStride_basic", + "TransposedConv1dNegativePaddingUnitStrideDyn_basic", "TransposedConv1dNegativePaddingLarge_basic", "TransposedConv2dNegativePadding_basic", "TransposedConv2dPositiveAndNegativePadding_basic", @@ -5042,7 +5042,7 @@ "TraceUnsignedIntModule_basic", "TraceUnsignedIntModule_empty", "TransposedConv1dNegativePadding_basic", - "TransposedConv1dNegativePaddingUnitStride_basic", + "TransposedConv1dNegativePaddingUnitStrideDyn_basic", "TransposedConv1dNegativePaddingLarge_basic", "TransposedConv2dNegativePadding_basic", "TransposedConv2dPositiveAndNegativePadding_basic", diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index 16b62901d137..c11e9b7470c1 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -2002,7 +2002,7 @@ def TransposedConv1dNegativePadding_basic(module, tu: TestUtils): module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) -class TransposedConv1dNegativePaddingUnitStride(torch.nn.Module): +class TransposedConv1dNegativePaddingUnitStrideDyn(torch.nn.Module): def __init__(self): super().__init__() @@ -2010,7 +2010,7 @@ def __init__(self): @annotate_args( [ None, - ([1, 1, 7], torch.float32, True), + ([1, 1, -1], torch.float32, True), ([1, 2, 3], torch.float32, True), ([2], torch.float32, True), ] @@ -2029,8 +2029,10 @@ def forward(self, inputVec, weight, bias): ) -@register_test_case(module_factory=lambda: TransposedConv1dNegativePaddingUnitStride()) -def TransposedConv1dNegativePaddingUnitStride_basic(module, tu: TestUtils): +@register_test_case( + module_factory=lambda: TransposedConv1dNegativePaddingUnitStrideDyn() +) +def TransposedConv1dNegativePaddingUnitStrideDyn_basic(module, tu: TestUtils): module.forward(tu.rand(1, 1, 7), tu.rand(1, 2, 3), tu.rand(2)) From aa843509479076815511f6cfca7a0a489535c3da Mon Sep 17 00:00:00 2001 From: Hariprasad Ravishankar Date: Tue, 11 Nov 2025 16:53:51 -0500 Subject: [PATCH 8/8] Filter dynamic dims test for ONNX backend --- projects/pt1/e2e_testing/xfail_sets.py | 1 + projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index a02de3fa195b..356ab8dec558 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -3267,6 +3267,7 @@ "TraceSignedIntModule_basic", "TraceUnsignedIntModule_basic", "TraceUnsignedIntModule_empty", + "TransposedConv1dNegativePaddingUnitStrideDyn_basic", "UniformModule_basic", "UniformNoCorrelationModule_basic", "UniformStaticShapeModule_basic", diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index c11e9b7470c1..25c6b03f5424 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -2010,7 +2010,7 @@ def __init__(self): @annotate_args( [ None, - ([1, 1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), ([1, 2, 3], torch.float32, True), ([2], torch.float32, True), ]