11// RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=2,4,8 vectorize vectorize-contraction-to=matrixintrinsics unroll-vector-transfers=true" -split-input-file | FileCheck %s --check-prefix=CHECK-INTRINSIC
22// RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 promote promote-full-tile-pad register-tile-sizes=2,4,8 vectorize vectorize-contraction-to=outerproduct split-transfers=true unroll-vector-transfers=false" -split-input-file | FileCheck %s --check-prefix=CHECK-OUTER
33// RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 tile-interchange=1,2,0 generalize iterator-interchange=0,2,1" -split-input-file | FileCheck %s --check-prefix=CHECK-INTERCHANGE
4- // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 pad pack-paddings=1,1,0 hoist-paddings=3,3,0" -split-input-file | FileCheck %s --check-prefix=CHECK-PAD
5- // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 fuse pad vectorize" -split-input-file | FileCheck %s --check-prefix=CHECK-FUSE
6- // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=conv anchor-op=linalg.conv_2d_nhwc_hwcf tile-sizes=1,1,8,32,1,1,8 fuse pad decompose vectorize vectorize-padding" -split-input-file | FileCheck %s --check-prefix=CHECK-DECOMP
4+ // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 pad padding-values=0.:f32,0.:f32,0.:f32 pack-paddings=1,1,0 hoist-paddings=3,3,0" -split-input-file | FileCheck %s --check-prefix=CHECK-PAD
5+ // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 fuse pad padding-values=0.:f32,0.:f32,0.:f32 vectorize" -split-input-file | FileCheck %s --check-prefix=CHECK-FUSE
6+ // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=conv anchor-op=linalg.conv_2d_nhwc_hwcf tile-sizes=1,1,8,32,1,1,8 fuse pad padding-values=0.:f32,0.:f32,0.:f32 decompose vectorize vectorize-padding" -split-input-file | FileCheck %s --check-prefix=CHECK-DECOMP
77
88// CHECK-INTRINSIC: func @matmul(
99// CHECK-OUTER: func @matmul(
10- func @matmul (%arg0: memref <72 x72 xf32 >, %arg1: memref <72 x72 xf32 >, %arg2: memref <72 x72 xf32 >) {
10+ func.func @matmul (%arg0: memref <72 x72 xf32 >, %arg1: memref <72 x72 xf32 >, %arg2: memref <72 x72 xf32 >) {
1111
1212 // Check the matrix intrinsic lowering is triggered.
1313 // CHECK-INTRINSIC: vector.matrix_multiply
@@ -17,13 +17,13 @@ func @matmul(%arg0: memref<72x72xf32>, %arg1: memref<72x72xf32>, %arg2: memref<7
1717 // Check the outer product lowering is triggered.
1818 // CHECK-OUTER: vector.outerproduct {{.*}} : vector<2xf32>, vector<4xf32>
1919 linalg.matmul ins (%arg0 , %arg1: memref <72 x72 xf32 >, memref <72 x72 xf32 >) outs (%arg2: memref <72 x72 xf32 >)
20- return
20+ func. return
2121}
2222
2323// -----
2424
2525// CHECK-INTERCHANGE: func @matmul(
26- func @matmul (%arg0: tensor <72 x72 xf32 >, %arg1: tensor <72 x72 xf32 >, %arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 > {
26+ func.func @matmul (%arg0: tensor <72 x72 xf32 >, %arg1: tensor <72 x72 xf32 >, %arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 > {
2727 // CHECK-INTERCHANGE-DAG: %[[C16:.*]] = arith.constant 16
2828 // CHECK-INTERCHANGE-DAG: %[[C32:.*]] = arith.constant 32
2929 // CHECK-INTERCHANGE-DAG: %[[C64:.*]] = arith.constant 64
@@ -37,15 +37,15 @@ func @matmul(%arg0: tensor<72x72xf32>, %arg1: tensor<72x72xf32>, %arg2: tensor<7
3737 // CHECK-INTERCHANGE: linalg.generic
3838 // CHECK-INTERCHANGE-SAME: iterator_types = ["parallel", "reduction", "parallel"]
3939 %0 = linalg.matmul ins (%arg0 , %arg1: tensor <72 x72 xf32 >, tensor <72 x72 xf32 >) outs (%arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 >
40- return %0 : tensor <72 x72 xf32 >
40+ func. return %0 : tensor <72 x72 xf32 >
4141}
4242
4343// -----
4444
4545// CHECK-PAD-DAG: #[[MAP0:[0-9a-z]+]] = affine_map<(d0) -> (-d0 + 72, 16)>
4646
4747// CHECK-PAD: func @matmul(
48- func @matmul (%arg0: tensor <72 x72 xf32 >, %arg1: tensor <72 x72 xf32 >, %arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 > {
48+ func.func @matmul (%arg0: tensor <72 x72 xf32 >, %arg1: tensor <72 x72 xf32 >, %arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 > {
4949
5050 // Check the padding of the input operands has been hoisted out of the tile loop nest.
5151 // CHECK-PAD-COUNT=2: tensor.pad %{{.*}} nofold
@@ -56,13 +56,13 @@ func @matmul(%arg0: tensor<72x72xf32>, %arg1: tensor<72x72xf32>, %arg2: tensor<7
5656 // CHECK-PAD-COUNT=2: scf.for
5757 // CHECK-PAD: linalg.matmul
5858 %0 = linalg.matmul ins (%arg0 , %arg1: tensor <72 x72 xf32 >, tensor <72 x72 xf32 >) outs (%arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 >
59- return %0 : tensor <72 x72 xf32 >
59+ func. return %0 : tensor <72 x72 xf32 >
6060}
6161
6262// -----
6363
6464// CHECK-FUSE: func @matmul(
65- func @matmul (%arg0: tensor <72 x72 xf32 >, %arg1: tensor <72 x72 xf32 >, %arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 > {
65+ func.func @matmul (%arg0: tensor <72 x72 xf32 >, %arg1: tensor <72 x72 xf32 >, %arg2: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 > {
6666
6767 // Check the padding and vectorization applies to the fill operation due to the empty anchor op string.
6868 // CHECK-FUSE: %[[CST:.*]] = arith.constant dense<0.000000e+00>
@@ -73,13 +73,13 @@ func @matmul(%arg0: tensor<72x72xf32>, %arg1: tensor<72x72xf32>, %arg2: tensor<7
7373 // Check the matmul is padded and vectorized despite the empty anchor op string.
7474 // CHECK-FUSE: vector.outerproduct
7575 %1 = linalg.matmul ins (%arg0 , %arg1: tensor <72 x72 xf32 >, tensor <72 x72 xf32 >) outs (%0: tensor <72 x72 xf32 >) -> tensor <72 x72 xf32 >
76- return %1 : tensor <72 x72 xf32 >
76+ func. return %1 : tensor <72 x72 xf32 >
7777}
7878
7979// -----
8080
8181// CHECK-DECOMP: func @conv(
82- func @conv (%arg0: tensor <8 x18 x17 x32 xf32 >, %arg1: tensor <3 x3 x32 x64 xf32 >, %arg2: tensor <8 x16 x15 x64 xf32 >) -> tensor <8 x16 x15 x64 xf32 > {
82+ func.func @conv (%arg0: tensor <8 x18 x17 x32 xf32 >, %arg1: tensor <3 x3 x32 x64 xf32 >, %arg2: tensor <8 x16 x15 x64 xf32 >) -> tensor <8 x16 x15 x64 xf32 > {
8383 %cst = arith.constant 0.000000e+00 : f32
8484 %0 = linalg.fill ins (%cst : f32 ) outs (%arg2 : tensor <8 x16 x15 x64 xf32 >) -> tensor <8 x16 x15 x64 xf32 >
8585
@@ -88,5 +88,5 @@ func @conv(%arg0: tensor<8x18x17x32xf32>, %arg1: tensor<3x3x32x64xf32>, %arg2: t
8888 // CHECK-DECOMP: vector.outerproduct
8989 // CHECK-DECOMP: vector.transfer_write {{.*}}: vector<1x8x32xf32>, tensor<1x1x?x32xf32>
9090 %1 = linalg.conv_2d_nhwc_hwcf {dilations = dense <1 > : tensor <2 xi64 >, strides = dense <1 > : tensor <2 xi64 >} ins (%arg0 , %arg1 : tensor <8 x18 x17 x32 xf32 >, tensor <3 x3 x32 x64 xf32 >) outs (%0 : tensor <8 x16 x15 x64 xf32 >) -> tensor <8 x16 x15 x64 xf32 >
91- return %1 : tensor <8 x16 x15 x64 xf32 >
91+ func. return %1 : tensor <8 x16 x15 x64 xf32 >
9292}
0 commit comments