|
| 1 | +#include "mlir/IR/TypeUtilities.h" |
| 2 | +#include "mlir/Pass/PassManager.h" |
| 3 | +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" |
| 4 | +#include "triton/Dialect/Triton/IR/Dialect.h" |
| 5 | +#include "triton/Dialect/Triton/IR/Types.h" |
| 6 | +#include "triton/Dialect/TritonGPU/IR/Attributes.h" |
| 7 | +#include "triton/Dialect/TritonGPU/IR/Dialect.h" |
| 8 | +#include "triton/Dialect/TritonGPU/IR/TritonGPUInterfaces.h" |
| 9 | +#include "triton/Dialect/TritonGPU/Transforms/Utility.h" |
| 10 | +#include "triton/Dialect/TritonNvidiaGPU/IR/Dialect.h" |
| 11 | +#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h" |
| 12 | +#include "triton/Dialect/TritonNvidiaGPU/Transforms/TMAUtilities.h" |
| 13 | + |
| 14 | +namespace { |
| 15 | + |
| 16 | +using namespace mlir; |
| 17 | + |
| 18 | +namespace ttng = triton::nvidia_gpu; |
| 19 | +namespace ttg = triton::gpu; |
| 20 | +namespace tt = triton; |
| 21 | + |
| 22 | +#define GEN_PASS_CLASSES |
| 23 | +#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h.inc" |
| 24 | + |
| 25 | +// If we don't know the effects of the op, we add all possible effects. |
| 26 | +static void addAllValuelessEffects( |
| 27 | + SmallVectorImpl<MemoryEffects::EffectInstance> &effects) { |
| 28 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Read>()); |
| 29 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Write>()); |
| 30 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Allocate>()); |
| 31 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Free>()); |
| 32 | +} |
| 33 | + |
| 34 | +static bool |
| 35 | +collectEffects(Operation *op, |
| 36 | + SmallVectorImpl<MemoryEffects::EffectInstance> &effects) { |
| 37 | + // Collect effect instances the operation. Note that the implementation of |
| 38 | + // getEffects erases all effect instances that have the type other than the |
| 39 | + // template parameter so we collect them first in a local buffer and then |
| 40 | + // copy. |
| 41 | + if (auto iface = dyn_cast<MemoryEffectOpInterface>(op)) { |
| 42 | + SmallVector<MemoryEffects::EffectInstance> localEffects; |
| 43 | + iface.getEffects(localEffects); |
| 44 | + llvm::append_range(effects, localEffects); |
| 45 | + return true; |
| 46 | + } |
| 47 | + if (op->hasTrait<OpTrait::HasRecursiveMemoryEffects>()) { |
| 48 | + for (auto ®ion : op->getRegions()) { |
| 49 | + for (auto &block : region) { |
| 50 | + for (auto &innerOp : block) |
| 51 | + if (!collectEffects(&innerOp, effects)) |
| 52 | + return false; |
| 53 | + } |
| 54 | + } |
| 55 | + return true; |
| 56 | + } |
| 57 | + |
| 58 | + // We need to be conservative here in case the op doesn't have the interface |
| 59 | + // and assume it can have any possible effect. |
| 60 | + addAllValuelessEffects(effects); |
| 61 | + return false; |
| 62 | +} |
| 63 | + |
| 64 | +// Sink tmem_loads as close to their use as possible to reduce register |
| 65 | +// pressure. |
| 66 | +static void sinkLoad(ttng::TMEMLoadOp load, Operation *cvt) { |
| 67 | + Operation *insertBefore = nullptr; |
| 68 | + Operation *next = cvt->getNextNode(); |
| 69 | + while (next && !next->hasTrait<OpTrait::IsTerminator>()) { |
| 70 | + insertBefore = next; |
| 71 | + bool dep = false; |
| 72 | + for (auto operand : getNestedOperands(next)) { |
| 73 | + if (operand == cvt->getResult(0)) { |
| 74 | + dep = true; |
| 75 | + break; |
| 76 | + } |
| 77 | + } |
| 78 | + if (!isMemoryEffectFree(next)) { |
| 79 | + SmallVector<MemoryEffects::EffectInstance> effects; |
| 80 | + collectEffects(next, effects); |
| 81 | + for (auto effect : effects) { |
| 82 | + if (effect.getEffect() == |
| 83 | + MemoryEffects::Effect::get<MemoryEffects::Write>() || |
| 84 | + effect.getEffect() == |
| 85 | + MemoryEffects::Effect::get<MemoryEffects::Allocate>()) { |
| 86 | + if (effect.getResource() == |
| 87 | + mlir::SideEffects::DefaultResource::get() || |
| 88 | + effect.getResource() == |
| 89 | + mlir::triton::nvidia_gpu::TensorMemory::get()) { |
| 90 | + dep = true; |
| 91 | + break; |
| 92 | + } |
| 93 | + } |
| 94 | + } |
| 95 | + } |
| 96 | + if (dep) |
| 97 | + break; |
| 98 | + next = next->getNextNode(); |
| 99 | + } |
| 100 | + if (insertBefore) { |
| 101 | + load->moveBefore(insertBefore); |
| 102 | + cvt->moveBefore(insertBefore); |
| 103 | + } |
| 104 | +} |
| 105 | + |
| 106 | +// clang-format off |
| 107 | +// Converts: |
| 108 | +// %l = ttng.tmem_load %o : !ttg.memdesc<128x256xf32, #tmem, #ttng.tensor_memory, mutable> -> tensor<128x256xf32, #blocked> |
| 109 | +// %r = tt.reshape %l : tensor<128x256xf32, #blocked> -> tensor<128x2x128xf32, #blocked4> |
| 110 | +// %t = tt.trans %r {order = array<i32: 0, 2, 1>} : tensor<128x2x128xf32, #blocked4> -> tensor<128x128x2xf32, #blocked5> |
| 111 | +// %outLHS, %outRHS = tt.split %t : tensor<128x128x2xf32, #blocked5> -> tensor<128x128xf32, #blocked2> |
| 112 | +// To: |
| 113 | +// %o0 = ttng.tmem_subslice %o { N = 0 }: !ttg.memdesc<128x256xf32, #tmem, #ttng.tensor_memory, mutable> -> !ttg.memdesc<128x128xf32, #tmem, #ttng.tensor_memory, mutable> |
| 114 | +// %outLHS = ttng.tmem_load %o0 : !ttg.memdesc<128x128xf32, #tmem, #ttng.tensor_memory, mutable> -> tensor<128x128xf32, #blocked> |
| 115 | +// %o1 = ttng.tmem_subslice %o { N = 128 }: !ttg.memdesc<128x256xf32, #tmem, #ttng.tensor_memory, mutable> -> !ttg.memdesc<128x128xf32, #tmem, #ttng.tensor_memory, mutable> |
| 116 | +// %outRHS = ttng.tmem_load %o1 : !ttg.memdesc<128x128xf32, #tmem, #ttng.tensor_memory, mutable> -> tensor<128x128xf32, #blocked> |
| 117 | +// clang-format on |
| 118 | +// This will change the layout of the destination tensor to distribute each |
| 119 | +// slice across warps. It currently only supports simple cases where tmem can be |
| 120 | +// sliced easily. This could be extended if needed with more powerful slicing |
| 121 | +// support of tmem. |
| 122 | +class TMemSplitLoadPattern : public OpRewritePattern<tt::SplitOp> { |
| 123 | +public: |
| 124 | + using OpRewritePattern::OpRewritePattern; |
| 125 | + |
| 126 | + LogicalResult matchAndRewrite(tt::SplitOp splitOp, |
| 127 | + PatternRewriter &rewriter) const override { |
| 128 | + auto src = splitOp.getSrc(); |
| 129 | + // Skip convert layout ops. |
| 130 | + while (auto cvt = src.getDefiningOp<ttg::ConvertLayoutOp>()) { |
| 131 | + src = cvt.getSrc(); |
| 132 | + } |
| 133 | + // Only support splitting N dimension on the outer most. |
| 134 | + auto transOp = src.getDefiningOp<tt::TransOp>(); |
| 135 | + if (!transOp || transOp.getOrder() != ArrayRef<int>({0, 2, 1})) |
| 136 | + return failure(); |
| 137 | + auto reshapeOp = transOp.getSrc().getDefiningOp<tt::ReshapeOp>(); |
| 138 | + if (!reshapeOp) |
| 139 | + return failure(); |
| 140 | + auto shape = reshapeOp.getResult().getType().getShape(); |
| 141 | + if (shape[0] != reshapeOp.getSrc().getType().getShape()[0]) |
| 142 | + return failure(); |
| 143 | + auto tmemLoad = reshapeOp.getSrc().getDefiningOp<ttng::TMEMLoadOp>(); |
| 144 | + if (!tmemLoad) |
| 145 | + return failure(); |
| 146 | + // We found a tmem_load that is split on the N dimension. We can split it |
| 147 | + // into multiple tmem_loads. |
| 148 | + int mDim = getShapePerCTA(tmemLoad.getSrc().getType())[0]; |
| 149 | + // TODO: enable other M cases. (the layout is a bit more complex). |
| 150 | + if (mDim != 128) |
| 151 | + return failure(); |
| 152 | + int splitNSize = shape[2]; |
| 153 | + if (splitNSize < 8) |
| 154 | + return failure(); |
| 155 | + Value tmem = tmemLoad.getSrc(); |
| 156 | + int numWarps = ttg::lookupNumWarps(tmemLoad); |
| 157 | + rewriter.setInsertionPoint(tmemLoad); |
| 158 | + // First slice. |
| 159 | + Value subSlice0 = rewriter.create<ttng::TMEMSubSliceOp>( |
| 160 | + tmemLoad.getLoc(), tmem, 0, splitNSize); |
| 161 | + Attribute distLayout = ttng::getTmemCompatibleLayout( |
| 162 | + mDim, splitNSize, splitOp.getOutLHS().getType(), numWarps); |
| 163 | + RankedTensorType newLoadType = RankedTensorType::get( |
| 164 | + splitOp.getOutLHS().getType().getShape(), |
| 165 | + splitOp.getOutLHS().getType().getElementType(), distLayout); |
| 166 | + auto load0 = rewriter.create<ttng::TMEMLoadOp>(tmemLoad.getLoc(), |
| 167 | + newLoadType, subSlice0); |
| 168 | + auto cvt0 = rewriter.create<ttg::ConvertLayoutOp>( |
| 169 | + tmemLoad.getLoc(), splitOp.getOutLHS().getType(), load0); |
| 170 | + // Second slice. |
| 171 | + Value subSlice1 = rewriter.create<ttng::TMEMSubSliceOp>( |
| 172 | + tmemLoad.getLoc(), tmem, splitNSize, splitNSize); |
| 173 | + auto load1 = rewriter.create<ttng::TMEMLoadOp>(tmemLoad.getLoc(), |
| 174 | + newLoadType, subSlice1); |
| 175 | + auto cvt1 = rewriter.create<ttg::ConvertLayoutOp>( |
| 176 | + tmemLoad.getLoc(), splitOp.getOutRHS().getType(), load1); |
| 177 | + rewriter.replaceOp(splitOp, {cvt0, cvt1}); |
| 178 | + sinkLoad(load0, cvt0); |
| 179 | + sinkLoad(load1, cvt1); |
| 180 | + return success(); |
| 181 | + } |
| 182 | +}; |
| 183 | + |
| 184 | +class TritonNvidiaGPUOptimizeTMemSubtilingPass |
| 185 | + : public TritonNvidiaGPUOptimizeTMemSubtilingPassBase< |
| 186 | + TritonNvidiaGPUOptimizeTMemSubtilingPass> { |
| 187 | +public: |
| 188 | + using BaseT = TritonNvidiaGPUOptimizeTMemSubtilingPassBase< |
| 189 | + TritonNvidiaGPUOptimizeTMemSubtilingPass>; |
| 190 | + using BaseT::BaseT; |
| 191 | + |
| 192 | + void runOnOperation() override { |
| 193 | + MLIRContext *context = &getContext(); |
| 194 | + ModuleOp m = getOperation(); |
| 195 | + |
| 196 | + mlir::RewritePatternSet patterns(context); |
| 197 | + patterns.add<TMemSplitLoadPattern>(context); |
| 198 | + if (failed(applyPatternsGreedily(m, std::move(patterns)))) |
| 199 | + signalPassFailure(); |
| 200 | + } |
| 201 | +}; |
| 202 | + |
| 203 | +} // namespace |
| 204 | + |
| 205 | +std::unique_ptr<Pass> mlir::createTritonNvidiaGPUOptimizeTMemSubtilingPass() { |
| 206 | + return std::make_unique<TritonNvidiaGPUOptimizeTMemSubtilingPass>(); |
| 207 | +} |
0 commit comments