diff --git a/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h b/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h --- a/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h +++ b/mlir/include/mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h @@ -19,45 +19,6 @@ namespace mlir { namespace edsc { namespace intrinsics { - -template -struct FoldedValueBuilder { - // Builder-based - template - FoldedValueBuilder(OperationFolder *folder, Args... args) { - value = folder ? folder->create(ScopedContext::getBuilderRef(), - ScopedContext::getLocation(), args...) - : ScopedContext::getBuilderRef().create( - ScopedContext::getLocation(), args...); - } - - operator Value() { return value; } - Value value; -}; - -using folded_math_tanh = FoldedValueBuilder; -using folded_memref_alloc = FoldedValueBuilder; -using folded_memref_cast = FoldedValueBuilder; -using folded_memref_dim = FoldedValueBuilder; -using folded_memref_load = FoldedValueBuilder; -using folded_memref_sub_view = FoldedValueBuilder; -using folded_memref_tensor_load = FoldedValueBuilder; -using folded_memref_view = FoldedValueBuilder; -using folded_std_muli = FoldedValueBuilder; -using folded_std_addi = FoldedValueBuilder; -using folded_std_addf = FoldedValueBuilder; -using folded_std_constant = FoldedValueBuilder; -using folded_std_constant_float = FoldedValueBuilder; -using folded_std_constant_index = FoldedValueBuilder; -using folded_std_constant_int = FoldedValueBuilder; -using folded_std_index_cast = FoldedValueBuilder; -using folded_std_muli = FoldedValueBuilder; -using folded_std_mulf = FoldedValueBuilder; -using folded_std_select = FoldedValueBuilder; -using folded_std_subi = FoldedValueBuilder; -using folded_std_zero_extendi = FoldedValueBuilder; -using folded_std_sign_extendi = FoldedValueBuilder; -using folded_tensor_extract = FoldedValueBuilder; } // namespace intrinsics } // namespace edsc } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -220,10 +220,9 @@ /// smallest constant value for the size of the buffer needed for each /// dimension. If that is not possible, contains the dynamic size of the /// subview. The call back should return the buffer to use. -using AllocBufferCallbackFn = - std::function(OpBuilder &b, memref::SubViewOp subView, - ArrayRef boundingSubViewSize, - DataLayout &layout, OperationFolder *folder)>; +using AllocBufferCallbackFn = std::function( + OpBuilder &b, memref::SubViewOp subView, + ArrayRef boundingSubViewSize, DataLayout &layout)>; /// Callback function type used to deallocate the buffers used to hold the /// promoted subview. @@ -321,8 +320,7 @@ Optional promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView, AllocBufferCallbackFn allocationFn, - DataLayout &layout, - OperationFolder *folder = nullptr); + DataLayout &layout); /// Promotes the `subViews` into a new buffer allocated at the insertion point /// `b`. Promotion occurs in 3 steps: @@ -335,8 +333,7 @@ /// Returns the modified linalg op (the modification happens in place) as well /// as all the copy ops created. Optional promoteSubViews(OpBuilder &b, LinalgOp op, - LinalgPromotionOptions options, - OperationFolder *folder = nullptr); + LinalgPromotionOptions options); /// Emit a suitable vector form for a Linalg op with fully static shape. LogicalResult vectorizeLinalgOp(OpBuilder &builder, Operation *op, diff --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h --- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h +++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h @@ -24,7 +24,6 @@ class AffineExpr; class AffineForOp; class AffineMap; -class OperationFolder; class PatternRewriter; namespace linalg { diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -25,6 +25,7 @@ #include "mlir/IR/AffineExpr.h" #include "mlir/IR/AffineExprVisitor.h" #include "mlir/IR/AffineMap.h" +#include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/Support/LLVM.h" #include "mlir/Transforms/FoldUtils.h" #include "llvm/ADT/MapVector.h" @@ -38,75 +39,66 @@ using llvm::MapVector; -using folded_affine_min = FoldedValueBuilder; -using folded_linalg_range = FoldedValueBuilder; -using folded_memref_dim = FoldedValueBuilder; -using folded_memref_subview = FoldedValueBuilder; -using folded_memref_view = FoldedValueBuilder; - #define DEBUG_TYPE "linalg-promotion" -/// Alloc a new buffer of `size`. If `dynamicBuffers` is true allocate exactly -/// the size needed, otherwise try to allocate a static bounding box. -static Value allocBuffer(const LinalgPromotionOptions &options, - Type elementType, Value size, bool dynamicBuffers, - DataLayout &layout, OperationFolder *folder, +/// Alloc a new buffer of `size` * `width` i8; where `width` is given by the +/// data `layout` for `elementType`. +/// Use AllocOp or AllocaOp depending on `options`. +/// Take an optional alignment. +static Value allocBuffer(ImplicitLocOpBuilder &b, + const LinalgPromotionOptions &options, + Type elementType, Value allocSize, DataLayout &layout, Optional alignment = None) { - auto *ctx = size.getContext(); auto width = layout.getTypeSize(elementType); - IntegerAttr alignment_attr; + + IntegerAttr alignmentAttr; if (alignment.hasValue()) - alignment_attr = - IntegerAttr::get(IntegerType::get(ctx, 64), alignment.getValue()); - if (!dynamicBuffers) - if (auto cst = size.getDefiningOp()) - return options.useAlloca - ? memref_alloca(MemRefType::get(width * cst.getValue(), - IntegerType::get(ctx, 8)), - ValueRange{}, alignment_attr) - .value - : memref_alloc(MemRefType::get(width * cst.getValue(), - IntegerType::get(ctx, 8)), - ValueRange{}, alignment_attr) - .value; + alignmentAttr = b.getI64IntegerAttr(alignment.getValue()); + + // Static buffer. + if (auto cst = allocSize.getDefiningOp()) { + auto staticBufferType = + MemRefType::get(width * cst.getValue(), b.getIntegerType(8)); + if (options.useAlloca) { + return b.createOrFold(staticBufferType, ValueRange{}, + alignmentAttr); + } + return b.createOrFold(staticBufferType, ValueRange{}, + alignmentAttr); + } + + // Fallback dynamic buffer. + auto dynamicBufferType = MemRefType::get(-1, b.getIntegerType(8)); Value mul = - folded_std_muli(folder, folded_std_constant_index(folder, width), size); - return options.useAlloca - ? memref_alloca(MemRefType::get(-1, IntegerType::get(ctx, 8)), mul, - alignment_attr) - .value - : memref_alloc(MemRefType::get(-1, IntegerType::get(ctx, 8)), mul, - alignment_attr) - .value; + b.createOrFold(b.create(width), allocSize); + if (options.useAlloca) + return b.create(dynamicBufferType, mul, alignmentAttr); + return b.create(dynamicBufferType, mul, alignmentAttr); } /// Default allocation callback function. This allocates a promoted buffer when /// no call back to do so is provided. The default is to allocate a /// memref<..xi8> and return a view to get a memref type of shape /// boundingSubViewSize. -static Optional -defaultAllocBufferCallBack(const LinalgPromotionOptions &options, - OpBuilder &builder, memref::SubViewOp subView, - ArrayRef boundingSubViewSize, - bool dynamicBuffers, Optional alignment, - DataLayout &layout, OperationFolder *folder) { +static Optional defaultAllocBufferCallBack( + const LinalgPromotionOptions &options, OpBuilder &builder, + memref::SubViewOp subView, ArrayRef boundingSubViewSize, + bool dynamicBuffers, Optional alignment, DataLayout &layout) { ShapedType viewType = subView.getType(); - int64_t rank = viewType.getRank(); - (void)rank; - assert(rank > 0 && boundingSubViewSize.size() == static_cast(rank)); - auto zero = folded_std_constant_index(folder, 0); - auto one = folded_std_constant_index(folder, 1); + ImplicitLocOpBuilder b(subView.getLoc(), builder); + auto zero = b.createOrFold(0); + auto one = b.createOrFold(1); Value allocSize = one; for (auto size : llvm::enumerate(boundingSubViewSize)) - allocSize = folded_std_muli(folder, allocSize, size.value()); - Value buffer = allocBuffer(options, viewType.getElementType(), allocSize, - dynamicBuffers, layout, folder, alignment); + allocSize = b.createOrFold(allocSize, size.value()); + Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize, + layout, alignment); SmallVector dynSizes(boundingSubViewSize.size(), ShapedType::kDynamicSize); - Value view = folded_memref_view( - folder, MemRefType::get(dynSizes, viewType.getElementType()), buffer, - zero, boundingSubViewSize); + Value view = b.createOrFold( + MemRefType::get(dynSizes, viewType.getElementType()), buffer, zero, + boundingSubViewSize); return view; } @@ -171,16 +163,15 @@ } } - allocationFn = - (options.allocationFn - ? *(options.allocationFn) - : [&](OpBuilder &builder, memref::SubViewOp subViewOp, - ArrayRef boundingSubViewSize, DataLayout &layout, - OperationFolder *folder) -> Optional { - return defaultAllocBufferCallBack(options, builder, subViewOp, - boundingSubViewSize, dynamicBuffers, - alignment, layout, folder); - }); + allocationFn = (options.allocationFn + ? *(options.allocationFn) + : [&](OpBuilder &builder, memref::SubViewOp subViewOp, + ArrayRef boundingSubViewSize, + DataLayout &layout) -> Optional { + return defaultAllocBufferCallBack(options, builder, subViewOp, + boundingSubViewSize, dynamicBuffers, + alignment, layout); + }); deallocationFn = (options.deallocationFn ? *(options.deallocationFn) @@ -215,8 +206,7 @@ // by a partial `copy` op. Optional mlir::linalg::promoteSubviewAsNewBuffer( OpBuilder &b, Location loc, memref::SubViewOp subView, - AllocBufferCallbackFn allocationFn, DataLayout &layout, - OperationFolder *folder) { + AllocBufferCallbackFn allocationFn, DataLayout &layout) { ScopedContext scopedContext(b, loc); auto viewType = subView.getType(); auto rank = viewType.getRank(); @@ -233,27 +223,24 @@ (!sizeAttr) ? rangeValue.size : b.create(loc, sizeAttr); LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n"); fullSizes.push_back(size); - partialSizes.push_back( - folded_memref_dim(folder, subView, en.index()).value); + partialSizes.push_back(memref_dim(subView, en.index()).value); } SmallVector dynSizes(fullSizes.size(), -1); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. - Optional fullLocalView = - allocationFn(b, subView, fullSizes, layout, folder); + Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); if (!fullLocalView) return {}; SmallVector zeros(fullSizes.size(), b.getIndexAttr(0)); SmallVector ones(fullSizes.size(), b.getIndexAttr(1)); - auto partialLocalView = - folded_memref_subview(folder, *fullLocalView, zeros, partialSizes, ones); + auto partialLocalView = b.createOrFold( + loc, *fullLocalView, zeros, partialSizes, ones); return PromotionInfo{*fullLocalView, partialLocalView}; } static Optional> promoteSubViews(OpBuilder &b, Location loc, - LinalgOpInstancePromotionOptions options, DataLayout &layout, - OperationFolder *folder) { + LinalgOpInstancePromotionOptions options, DataLayout &layout) { if (options.subViews.empty()) return {}; @@ -264,7 +251,7 @@ memref::SubViewOp subView = cast(v.second.getDefiningOp()); Optional promotionInfo = promoteSubviewAsNewBuffer( - b, loc, subView, options.allocationFn, layout, folder); + b, loc, subView, options.allocationFn, layout); if (!promotionInfo) return {}; promotionInfoMap[v.first] = *promotionInfo; @@ -274,16 +261,16 @@ continue; Value fillVal; if (auto t = subView.getType().getElementType().dyn_cast()) { - fillVal = folded_std_constant(folder, FloatAttr::get(t, 0.0)); + fillVal = std_constant(FloatAttr::get(t, 0.0)); } else if (auto t = subView.getType().getElementType().dyn_cast()) { - fillVal = folded_std_constant_int(folder, 0, t); + fillVal = std_constant_int(0, t); } else if (auto t = subView.getType().getElementType().dyn_cast()) { if (auto et = t.getElementType().dyn_cast()) - fillVal = folded_std_constant(folder, FloatAttr::get(et, 0.0)); + fillVal = std_constant(FloatAttr::get(et, 0.0)); else if (auto et = t.getElementType().cast()) - fillVal = folded_std_constant_int(folder, 0, et); + fillVal = std_constant_int(0, et); fillVal = b.create(loc, t, fillVal, fillVal); } else { return {}; @@ -306,8 +293,7 @@ static Optional promoteSubViews(OpBuilder &b, LinalgOp op, - LinalgOpInstancePromotionOptions options, DataLayout &layout, - OperationFolder *folder) { + LinalgOpInstancePromotionOptions options, DataLayout &layout) { assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); if (auto convOp = dyn_cast(op.getOperation())) { @@ -318,8 +304,7 @@ // 1. Promote the specified views and use them in the new op. auto loc = op.getLoc(); - auto promotedBuffersAndViews = - promoteSubViews(b, loc, options, layout, folder); + auto promotedBuffersAndViews = promoteSubViews(b, loc, options, layout); if (!promotedBuffersAndViews || promotedBuffersAndViews->size() != options.subViews.size()) return {}; @@ -386,13 +371,12 @@ return failure(); } -Optional mlir::linalg::promoteSubViews(OpBuilder &b, - LinalgOp linalgOp, - LinalgPromotionOptions options, - OperationFolder *folder) { +Optional +mlir::linalg::promoteSubViews(OpBuilder &b, LinalgOp linalgOp, + LinalgPromotionOptions options) { LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options); auto layout = DataLayout::closest(linalgOp); - return ::promoteSubViews(b, linalgOp, linalgOptions, layout, folder); + return ::promoteSubViews(b, linalgOp, linalgOptions, layout); } namespace { @@ -404,8 +388,7 @@ } void runOnFunction() override { - OperationFolder folder(&getContext()); - getFunction().walk([this, &folder](LinalgOp op) { + getFunction().walk([this](LinalgOp op) { auto options = LinalgPromotionOptions() .setDynamicBuffers(dynamicBuffers) .setUseAlloca(useAlloca); @@ -413,7 +396,7 @@ return; LLVM_DEBUG(llvm::dbgs() << "Promote: " << *(op.getOperation()) << "\n"); OpBuilder b(op); - promoteSubViews(b, op, options, &folder); + promoteSubViews(b, op, options); }); } }; diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir --- a/mlir/test/Dialect/Linalg/transform-patterns.mlir +++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir @@ -48,7 +48,7 @@ // CHECK: scf.parallel {{.*}} step (%[[c5]]) // CHECK: scf.for {{.*}} step %[[c6]] // CHECK: linalg.matvec -// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: ins({{.*}}: memref, memref) // CHECK: outs({{.*}}: memref) func @matmul(%A: memref, @@ -87,7 +87,7 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] { // CHECK: linalg.matmul -// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: ins({{.*}}: memref, memref) // CHECK: outs({{.*}}: memref) #matmul_accesses = [ @@ -141,7 +141,7 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]] // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]] // CHECK: linalg.matvec -// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: ins({{.*}}: memref, memref) // CHECK: outs({{.*}}: memref) func @matmul_perm(%A: memref, @@ -174,7 +174,7 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] { // CHECK: linalg.matmul -// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: ins({{.*}}: memref, memref) // CHECK: outs({{.*}}: memref) func @promote_subview_matmul(%arg0: memref, @@ -214,19 +214,19 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] { -// CHECK: %[[s0:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[s1:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[s2:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[a0:.*]] = memref.alloc({{%.*}}) : memref -// CHECK: %[[v0:.*]] = memref.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[s0:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[s1:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[s2:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[a0:.*]] = memref.alloc() : memref<32000000xi8> +// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<32000000xi8> to memref // CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] // CHECK-SAME: memref to memref -// CHECK: %[[a1:.*]] = memref.alloc({{%.*}}) : memref -// CHECK: %[[v1:.*]] = memref.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[a1:.*]] = memref.alloc() : memref<48000000xi8> +// CHECK: %[[v1:.*]] = memref.view %[[a1]]{{.*}} : memref<48000000xi8> to memref // CHECK: %[[l1:.*]] = memref.subview %[[v1]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] // CHECK-SAME: memref to memref -// CHECK: %[[a2:.*]] = memref.alloc({{%.*}}) : memref -// CHECK: %[[v2:.*]] = memref.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[a2:.*]] = memref.alloc() : memref<24000000xi8> +// CHECK: %[[v2:.*]] = memref.view %[[a2]]{{.*}} : memref<24000000xi8> to memref // CHECK: %[[l2:.*]] = memref.subview %[[v2]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] // CHECK-SAME: memref to memref // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref @@ -273,21 +273,17 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] { -// CHECK: %[[s0:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[s1:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[s2:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[a0:.*]] = memref.alloc({{%.*}}) : memref -// CHECK: %[[v0:.*]] = memref.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[s0:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[s1:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[s2:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[a0:.*]] = memref.alloc() : memref<32000000xi8> +// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<32000000xi8> to memref // CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref -// CHECK-NOT: %{{.*}} = memref.alloc({{%.*}}) : memref -// CHECK-NOT: %{{.*}} = memref.view %{{.*}}[{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK-NOT: %{{.*}} = memref.subview %{{.*}}[0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref -// CHECK-NOT: %{{.*}} = memref.alloc({{%.*}}) : memref -// CHECK-NOT: %{{.*}} = memref.view %{{.*}}[{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK-NOT: %{{.*}} = memref.subview %{{.*}}[0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref +// CHECK-NOT: memref.alloc +// CHECK-NOT: memref.view +// CHECK-NOT: memref.subview // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref -// CHECK-NOT: linalg.copy(%[[s1]], %{{.*}}) : memref, memref -// CHECK-NOT: linalg.copy(%[[s2]], %{{.*}}) : memref, memref^ +// CHECK-NOT: linalg.copy // CHECK: linalg.matmul // CHECK-SAME: ins(%[[v0]], %[[s1]] : memref, memref) // CHECK-SAME: outs(%[[s2]] : memref) @@ -306,11 +302,11 @@ } // CHECK-LABEL: func @aligned_promote_fill // CHECK: %[[cf:.*]] = constant {{.*}} : f32 -// CHECK: %[[s0:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[a0:.*]] = memref.alloc({{%.*}}) {alignment = 32 : i64} : memref -// CHECK: %[[v0:.*]] = memref.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[s0:.*]] = memref.subview {{.*}}: memref to memref +// CHECK: %[[a0:.*]] = memref.alloc() {alignment = 32 : i64} : memref<32000000xi8> +// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<32000000xi8> to memref // CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref -// CHECK: linalg.fill(%[[v0]], {{%.*}}) : memref, f32 +// CHECK: linalg.fill(%[[v0]], {{.*}}) : memref, f32 // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK: linalg.fill(%[[v0]], %[[cf]]) : memref, f32 @@ -329,11 +325,11 @@ } // CHECK-LABEL: func @aligned_promote_fill_complex // CHECK: %[[cc:.*]] = complex.create {{.*}} : complex -// CHECK: %[[s0:.*]] = memref.subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref, #map{{.*}}> to memref, #map{{.*}}> -// CHECK: %[[a0:.*]] = memref.alloc({{%.*}}) {alignment = 32 : i64} : memref -// CHECK: %[[v0:.*]] = memref.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref> +// CHECK: %[[s0:.*]] = memref.subview {{.*}}: memref, #map{{.*}}> to memref, #map{{.*}}> +// CHECK: %[[a0:.*]] = memref.alloc() {alignment = 32 : i64} : memref<64000000xi8> +// CHECK: %[[v0:.*]] = memref.view %[[a0]]{{.*}} : memref<64000000xi8> to memref> // CHECK: %[[l0:.*]] = memref.subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref> to memref, #[[$STRIDED_2D_u_1]]> -// CHECK: linalg.fill(%[[v0]], {{%.*}}) : memref>, complex +// CHECK: linalg.fill(%[[v0]], {{.*}}) : memref>, complex // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, #map{{.*}}>, memref, #map{{.*}}> // CHECK: linalg.fill(%[[v0]], %[[cc]]) : memref>, complex diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp @@ -271,8 +271,7 @@ // Allocation call back static Optional allocCallBackFn(OpBuilder &b, memref::SubViewOp subView, ArrayRef boundingSubViewSize, - DataLayout &layout, - OperationFolder *folder) { + DataLayout &layout) { SmallVector shape(boundingSubViewSize.size(), -1); return b .create(