diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td --- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td @@ -1959,14 +1959,19 @@ let builders = [ // Build a ReinterpretCastOp with mixed static and dynamic entries. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, - "int64_t":$staticOffset, "ArrayRef":$staticSizes, - "ArrayRef":$staticStrides, "ValueRange":$offset, - "ValueRange":$sizes, "ValueRange":$strides, + "OpFoldResult":$offset, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, - // Build a ReinterpretCastOp with all dynamic entries. + // Build a ReinterpretCastOp with static entries. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, - "Value":$offset, "ValueRange":$sizes, "ValueRange":$strides, + "int64_t":$offset, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, + // Build a ReinterpretCastOp with dynamic entries. + OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, + "Value":$offset, "ValueRange":$sizes, + "ValueRange":$strides, + CArg<"ArrayRef", "{}">:$attrs)> ]; let extraClassDeclaration = extraBaseClassDeclaration # [{ @@ -2927,23 +2932,33 @@ let results = (outs AnyMemRef:$result); let builders = [ - // Build a SubViewOp with mixed static and dynamic entries. - OpBuilderDAG<(ins "Value":$source, "ArrayRef":$staticOffsets, - "ArrayRef":$staticSizes, "ArrayRef":$staticStrides, - "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, + // Build a SubViewOp with mixed static and dynamic entries and custom + // result type. If the type passed is nullptr, it is inferred. + OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, + "ArrayRef":$sizes, "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubViewOp with all dynamic entries. - OpBuilderDAG<(ins "Value":$source, "ValueRange":$offsets, - "ValueRange":$sizes, "ValueRange":$strides, + // Build a SubViewOp with mixed static and dynamic entries and inferred + // result type. + OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, + "ArrayRef":$offsets, "ArrayRef":$sizes, + "ArrayRef":$strides, CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubViewOp with mixed static and dynamic entries - // and custom result type. + // Build a SubViewOp with static entries and custom result type. If the + // type passed is nullptr, it is inferred. + OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, + "ArrayRef":$sizes, "ArrayRef":$strides, + CArg<"ArrayRef", "{}">:$attrs)>, + // Build a SubViewOp with static entries and inferred result type. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, - "ArrayRef":$staticOffsets, "ArrayRef":$staticSizes, - "ArrayRef":$staticStrides, "ValueRange":$offsets, + "ArrayRef":$offsets, "ArrayRef":$sizes, + "ArrayRef":$strides, + CArg<"ArrayRef", "{}">:$attrs)>, + // Build a SubViewOp with dynamic entries and custom result type. If the + // type passed is nullptr, it is inferred. + OpBuilderDAG<(ins "Value":$source, "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubViewOp with all dynamic entries and custom result type. + // Build a SubViewOp with dynamic entries and inferred result type. OpBuilderDAG<(ins "MemRefType":$resultType, "Value":$source, "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, CArg<"ArrayRef", "{}">:$attrs)> @@ -3039,26 +3054,6 @@ let results = (outs AnyRankedTensor:$result); let builders = [ - // Build a SubTensorOp with mixed static and dynamic entries. - OpBuilderDAG<(ins "Value":$source, "ArrayRef":$staticOffsets, - "ArrayRef":$staticSizes, "ArrayRef":$staticStrides, - "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, - CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubTensorOp with all dynamic entries. - OpBuilderDAG<(ins "Value":$source, "ValueRange":$offsets, - "ValueRange":$sizes, "ValueRange":$strides, - CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubTensorOp with mixed static and dynamic entries - // and custom result type. - OpBuilderDAG<(ins "RankedTensorType":$resultType, "Value":$source, - "ArrayRef":$staticOffsets, "ArrayRef":$staticSizes, - "ArrayRef":$staticStrides, "ValueRange":$offsets, - "ValueRange":$sizes, "ValueRange":$strides, - CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubTensorOp with all dynamic entries and custom result type. - OpBuilderDAG<(ins "RankedTensorType":$resultType, "Value":$source, - "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, - CArg<"ArrayRef", "{}">:$attrs)>, // Build a SubTensorOp with mixed static and dynamic entries and inferred // result type. OpBuilderDAG<(ins "Value":$source, "ArrayRef":$offsets, @@ -3069,6 +3064,15 @@ OpBuilderDAG<(ins "RankedTensorType":$resultType, "Value":$source, "ArrayRef":$offsets, "ArrayRef":$sizes, "ArrayRef":$strides, + CArg<"ArrayRef", "{}">:$attrs)>, + // Build a SubTensorOp with dynamic entries and custom result type. If the + // type passed is nullptr, it is inferred. + OpBuilderDAG<(ins "Value":$source, "ValueRange":$offsets, + "ValueRange":$sizes, "ValueRange":$strides, + CArg<"ArrayRef", "{}">:$attrs)>, + // Build a SubTensorOp with dynamic entries and inferred result type. + OpBuilderDAG<(ins "RankedTensorType":$resultType, "Value":$source, + "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, CArg<"ArrayRef", "{}">:$attrs)> ]; @@ -3157,19 +3161,13 @@ let builders = [ // Build a SubTensorInsertOp with mixed static and dynamic entries. - OpBuilderDAG<(ins "Value":$source, "Value":$dest, - "ArrayRef":$staticOffsets, "ArrayRef":$staticSizes, - "ArrayRef":$staticStrides, "ValueRange":$offsets, - "ValueRange":$sizes, "ValueRange":$strides, - CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubTensorInsertOp with all dynamic entries. - OpBuilderDAG<(ins "Value":$source, "Value":$dest, "ValueRange":$offsets, - "ValueRange":$sizes, "ValueRange":$strides, - CArg<"ArrayRef", "{}">:$attrs)>, - // Build a SubTensorInsertOp with mixed static and dynamic entries. OpBuilderDAG<(ins "Value":$source, "Value":$dest, "ArrayRef":$offsets, "ArrayRef":$sizes, "ArrayRef":$strides, + CArg<"ArrayRef", "{}">:$attrs)>, + // Build a SubTensorInsertOp with dynamic entries. + OpBuilderDAG<(ins "Value":$source, "Value":$dest, + "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides, CArg<"ArrayRef", "{}">:$attrs)> ]; diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h --- a/mlir/include/mlir/IR/OpDefinition.h +++ b/mlir/include/mlir/IR/OpDefinition.h @@ -213,11 +213,25 @@ return lhs.getOperation() != rhs.getOperation(); } +raw_ostream &operator<<(raw_ostream &os, OpFoldResult ofr); + /// This class represents a single result from folding an operation. class OpFoldResult : public PointerUnion { using PointerUnion::PointerUnion; + +public: + void dump() { llvm::errs() << *this << "\n"; } }; +/// Allow printing to a stream. +inline raw_ostream &operator<<(raw_ostream &os, OpFoldResult ofr) { + if (Value value = ofr.dyn_cast()) + value.print(os); + else + ofr.dyn_cast().print(os); + return os; +} + /// Allow printing to a stream. inline raw_ostream &operator<<(raw_ostream &os, OpState &op) { op.print(os, OpPrintingFlags().useLocalScope()); diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.td b/mlir/include/mlir/Interfaces/ViewLikeInterface.td --- a/mlir/include/mlir/Interfaces/ViewLikeInterface.td +++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.td @@ -108,28 +108,6 @@ return $_op.sizes(); }] >, - InterfaceMethod< - /*desc=*/[{ - Return a vector of all the static or dynamic sizes of the op. - }], - /*retTy=*/"SmallVector", - /*methodName=*/"getMixedSizes", - /*args=*/(ins), - /*methodBody=*/"", - /*defaultImplementation=*/[{ - SmallVector res; - std::array ranks = $_op.getArrayAttrRanks(); - unsigned numDynamic = 0; - unsigned count = ranks[getOffsetOperandGroupPosition()]; - for (unsigned idx = 0; idx < count; ++idx) { - if (isDynamicSize(idx)) - res.push_back($_op.sizes()[numDynamic++]); - else - res.push_back($_op.static_sizes()[idx]); - } - return res; - }] - >, InterfaceMethod< /*desc=*/[{ Return the dynamic stride operands. @@ -178,6 +156,72 @@ return $_op.static_strides(); }] >, + InterfaceMethod< + /*desc=*/[{ + Return a vector of all the static or dynamic sizes of the op. + }], + /*retTy=*/"SmallVector", + /*methodName=*/"getMixedOffsets", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + SmallVector res; + std::array ranks = $_op.getArrayAttrRanks(); + unsigned numDynamic = 0; + unsigned count = ranks[getOffsetOperandGroupPosition()]; + for (unsigned idx = 0; idx < count; ++idx) { + if (isDynamicOffset(idx)) + res.push_back($_op.offsets()[numDynamic++]); + else + res.push_back($_op.static_offsets()[idx]); + } + return res; + }] + >, + InterfaceMethod< + /*desc=*/[{ + Return a vector of all the static or dynamic sizes of the op. + }], + /*retTy=*/"SmallVector", + /*methodName=*/"getMixedSizes", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + SmallVector res; + std::array ranks = $_op.getArrayAttrRanks(); + unsigned numDynamic = 0; + unsigned count = ranks[getSizeOperandGroupPosition()]; + for (unsigned idx = 0; idx < count; ++idx) { + if (isDynamicSize(idx)) + res.push_back($_op.sizes()[numDynamic++]); + else + res.push_back($_op.static_sizes()[idx]); + } + return res; + }] + >, + InterfaceMethod< + /*desc=*/[{ + Return a vector of all the static or dynamic strides of the op. + }], + /*retTy=*/"SmallVector", + /*methodName=*/"getMixedStrides", + /*args=*/(ins), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + SmallVector res; + std::array ranks = $_op.getArrayAttrRanks(); + unsigned numDynamic = 0; + unsigned count = ranks[getStrideOperandGroupPosition()]; + for (unsigned idx = 0; idx < count; ++idx) { + if (isDynamicStride(idx)) + res.push_back($_op.strides()[numDynamic++]); + else + res.push_back($_op.static_strides()[idx]); + } + return res; + }] + >, InterfaceMethod< /*desc=*/[{ diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp @@ -241,10 +241,8 @@ Value alloc = rewriter.create(op.getLoc(), subviewMemRefType, op.sizes()); Value subView = rewriter.create( - op.getLoc(), sourceMemref, extractFromI64ArrayAttr(op.static_offsets()), - extractFromI64ArrayAttr(op.static_sizes()), - extractFromI64ArrayAttr(op.static_strides()), op.offsets(), op.sizes(), - op.strides()); + op.getLoc(), sourceMemref, op.getMixedOffsets(), op.getMixedSizes(), + op.getMixedStrides()); rewriter.create(op.getLoc(), subView, alloc); rewriter.replaceOp(op, alloc); return success(); @@ -283,10 +281,8 @@ // Take a subview to copy the small memref. Value subview = rewriter.create( - op.getLoc(), destMemRef, extractFromI64ArrayAttr(op.static_offsets()), - extractFromI64ArrayAttr(op.static_sizes()), - extractFromI64ArrayAttr(op.static_strides()), adaptor.offsets(), - adaptor.sizes(), adaptor.strides()); + op.getLoc(), destMemRef, op.getMixedOffsets(), op.getMixedSizes(), + op.getMixedStrides()); // Copy the small memref. rewriter.create(op.getLoc(), sourceMemRef, subview); rewriter.replaceOp(op, destMemRef); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -61,9 +61,9 @@ // by `permutationMap`. static void inferShapeComponents(AffineMap permutationMap, ArrayRef loopRanges, - SmallVectorImpl &offsets, - SmallVectorImpl &sizes, - SmallVectorImpl &strides) { + SmallVectorImpl &offsets, + SmallVectorImpl &sizes, + SmallVectorImpl &strides) { assert(permutationMap.isProjectedPermutation() && "expected some subset of a permutation map"); SmallVector shapeRanges(permutationMap.getNumResults()); @@ -101,7 +101,7 @@ AffineMap map = op.getIndexingMap(shapedOperandIdx); LLVM_DEBUG(llvm::dbgs() << "shapedOperandIdx: " << shapedOperandIdx << " with indexingMap: " << map << "\n"); - SmallVector offsets, sizes, strides; + SmallVector offsets, sizes, strides; inferShapeComponents(map, loopRanges, offsets, sizes, strides); Value shape = en.value(); Value sub = shape.getType().isa() diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -214,7 +214,8 @@ ScopedContext scopedContext(b, loc); auto viewType = subView.getType(); auto rank = viewType.getRank(); - SmallVector fullSizes, partialSizes; + SmallVector fullSizes; + SmallVector partialSizes; fullSizes.reserve(rank); partialSizes.reserve(rank); for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { @@ -226,7 +227,7 @@ (!sizeAttr) ? rangeValue.size : b.create(loc, sizeAttr); LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n"); fullSizes.push_back(size); - partialSizes.push_back(folded_std_dim(folder, subView, en.index())); + partialSizes.push_back(folded_std_dim(folder, subView, en.index()).value); } SmallVector dynSizes(fullSizes.size(), -1); // If a callback is not specified, then use the default implementation for @@ -234,10 +235,8 @@ Optional fullLocalView = allocationFn(b, subView, fullSizes, folder); if (!fullLocalView) return {}; - auto zero = folded_std_constant_index(folder, 0); - auto one = folded_std_constant_index(folder, 1); - SmallVector zeros(fullSizes.size(), zero); - SmallVector ones(fullSizes.size(), one); + SmallVector zeros(fullSizes.size(), b.getIndexAttr(0)); + SmallVector ones(fullSizes.size(), b.getIndexAttr(1)); auto partialLocalView = folded_std_subview(folder, *fullLocalView, zeros, partialSizes, ones); return PromotionInfo{*fullLocalView, partialLocalView}; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -255,15 +255,15 @@ } // Construct a new subview / subtensor for the tile. - SmallVector offsets, sizes, strides; + SmallVector offsets, sizes, strides; offsets.reserve(rank); sizes.reserve(rank); strides.reserve(rank); for (unsigned r = 0; r < rank; ++r) { if (!isTiled(map.getSubMap({r}), tileSizes)) { - offsets.push_back(std_constant_index(0)); - sizes.push_back(std_dim(shapedOp, r)); - strides.push_back(std_constant_index(1)); + offsets.push_back(b.getIndexAttr(0)); + sizes.push_back(std_dim(shapedOp, r).value); + strides.push_back(b.getIndexAttr(1)); continue; } @@ -297,7 +297,7 @@ } sizes.push_back(size); - strides.push_back(std_constant_index(1)); + strides.push_back(b.getIndexAttr(1)); } if (shapedType.isa()) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -192,19 +192,17 @@ // This later folds away. SmallVector paddedSubviewResults; paddedSubviewResults.reserve(opToPad->getNumResults()); - Value zero = rewriter.create(loc, 0); - Value one = rewriter.create(loc, 1); llvm::SetVector newUsersOfOpToPad; for (auto it : llvm::zip(opToPad->getResults(), paddedOp->getResults())) { auto rank = std::get<0>(it).getType().cast().getRank(); - SmallVector offsets(rank, zero); - auto sizes = llvm::to_vector<4>( - llvm::map_range(llvm::seq(0, rank), [&](unsigned d) -> Value { + SmallVector offsets(rank, rewriter.getIndexAttr(0)); + auto sizes = llvm::to_vector<4>(llvm::map_range( + llvm::seq(0, rank), [&](unsigned d) -> OpFoldResult { auto dimOp = rewriter.create(loc, std::get<0>(it), d); newUsersOfOpToPad.insert(dimOp); - return dimOp; + return dimOp.getResult(); })); - SmallVector strides(rank, one); + SmallVector strides(rank, rewriter.getIndexAttr(1)); paddedSubviewResults.push_back(rewriter.create( loc, std::get<1>(it), offsets, sizes, strides)); } diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp --- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp @@ -33,6 +33,32 @@ using namespace mlir; +/// Helper function to dispatch an OpFoldResult into either the `dynamicVec` if +/// it is a Value or into `staticVec` if it is an IntegerAttr. +/// In the case of a Value, a copy of the `sentinel` value is also pushed to +/// `staticVec`. This is useful to extract mixed static and dynamic entries that +/// come from an AttrSizedOperandSegments trait. +static void dispatchIndexOpFoldResult(OpFoldResult ofr, + SmallVectorImpl &dynamicVec, + SmallVectorImpl &staticVec, + int64_t sentinel) { + if (auto v = ofr.dyn_cast()) { + dynamicVec.push_back(v); + staticVec.push_back(sentinel); + return; + } + APInt apInt = ofr.dyn_cast().cast().getValue(); + staticVec.push_back(apInt.getSExtValue()); +} + +static void dispatchIndexOpFoldResults(ArrayRef ofrs, + SmallVectorImpl &dynamicVec, + SmallVectorImpl &staticVec, + int64_t sentinel) { + for (auto ofr : ofrs) + dispatchIndexOpFoldResult(ofr, dynamicVec, staticVec, sentinel); +} + //===----------------------------------------------------------------------===// // StandardOpsDialect Interfaces //===----------------------------------------------------------------------===// @@ -2069,32 +2095,57 @@ // MemRefReinterpretCastOp //===----------------------------------------------------------------------===// -void mlir::MemRefReinterpretCastOp::build( - OpBuilder &b, OperationState &result, MemRefType resultType, Value source, - int64_t staticOffset, ArrayRef staticSizes, - ArrayRef staticStrides, ValueRange offset, ValueRange sizes, - ValueRange strides, ArrayRef attrs) { - build(b, result, resultType, source, offset, sizes, strides, - b.getI64ArrayAttr(staticOffset), b.getI64ArrayAttr(staticSizes), - b.getI64ArrayAttr(staticStrides)); - result.addAttributes(attrs); -} - /// Build a MemRefReinterpretCastOp with all dynamic entries: `staticOffsets`, /// `staticSizes` and `staticStrides` are automatically filled with /// source-memref-rank sentinel values that encode dynamic entries. +void mlir::MemRefReinterpretCastOp::build(OpBuilder &b, OperationState &result, + MemRefType resultType, Value source, + OpFoldResult offset, + ArrayRef sizes, + ArrayRef strides, + ArrayRef attrs) { + SmallVector staticOffsets, staticSizes, staticStrides; + SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; + dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); + build(b, result, resultType, source, dynamicOffsets, dynamicSizes, + dynamicStrides, b.getI64ArrayAttr(staticOffsets), + b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); + result.addAttributes(attrs); +} + +void mlir::MemRefReinterpretCastOp::build(OpBuilder &b, OperationState &result, + MemRefType resultType, Value source, + int64_t offset, + ArrayRef sizes, + ArrayRef strides, + ArrayRef attrs) { + SmallVector sizeValues = + llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues, + strideValues, attrs); +} + void mlir::MemRefReinterpretCastOp::build(OpBuilder &b, OperationState &result, MemRefType resultType, Value source, Value offset, ValueRange sizes, ValueRange strides, ArrayRef attrs) { - unsigned rank = resultType.getRank(); - SmallVector staticSizesVector(rank, ShapedType::kDynamicSize); - SmallVector staticStridesVector( - rank, ShapedType::kDynamicStrideOrOffset); - build(b, result, resultType, source, - /*staticOffset=*/ShapedType::kDynamicStrideOrOffset, staticSizesVector, - staticStridesVector, offset, sizes, strides, attrs); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + build(b, result, resultType, source, offset, sizeValues, strideValues, attrs); } /// Print a memref_reinterpret_cast op of the form: @@ -2866,69 +2917,110 @@ return parser.addTypeToList(dstType, result.types); } -void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source, - ArrayRef staticOffsets, - ArrayRef staticSizes, - ArrayRef staticStrides, ValueRange offsets, - ValueRange sizes, ValueRange strides, +// Build a SubViewOp with mixed static and dynamic entries and custom result +// type. If the type passed is nullptr, it is inferred. +void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, + MemRefType resultType, Value source, + ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { + SmallVector staticOffsets, staticSizes, staticStrides; + SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; + dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); auto sourceMemRefType = source.getType().cast(); - auto resultType = inferResultType(sourceMemRefType, staticOffsets, - staticSizes, staticStrides); - build(b, result, resultType, source, offsets, sizes, strides, - b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), - b.getI64ArrayAttr(staticStrides)); + // Structuring implementation this way avoids duplication between builders. + if (!resultType) { + resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets, + staticSizes, staticStrides) + .cast(); + } + build(b, result, resultType, source, dynamicOffsets, dynamicSizes, + dynamicStrides, b.getI64ArrayAttr(staticOffsets), + b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); result.addAttributes(attrs); } -/// Build a SubViewOp with all dynamic entries: `staticOffsets`, `staticSizes` -/// and `staticStrides` are automatically filled with source-memref-rank -/// sentinel values that encode dynamic entries. +// Build a SubViewOp with mixed static and dynamic entries and inferred result +// type. void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source, - ValueRange offsets, ValueRange sizes, - ValueRange strides, + ArrayRef offsets, + ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { - auto sourceMemRefType = source.getType().cast(); - unsigned rank = sourceMemRefType.getRank(); - SmallVector staticOffsetsVector; - staticOffsetsVector.assign(rank, ShapedType::kDynamicStrideOrOffset); - SmallVector staticSizesVector; - staticSizesVector.assign(rank, ShapedType::kDynamicSize); - SmallVector staticStridesVector; - staticStridesVector.assign(rank, ShapedType::kDynamicStrideOrOffset); - build(b, result, source, staticOffsetsVector, staticSizesVector, - staticStridesVector, offsets, sizes, strides, attrs); + build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); } -/// Build a SubViewOp as above but with custom result type. +// Build a SubViewOp with static entries and inferred result type. +void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source, + ArrayRef offsets, ArrayRef sizes, + ArrayRef strides, + ArrayRef attrs) { + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + SmallVector sizeValues = + llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + build(b, result, source, offsetValues, sizeValues, strideValues, attrs); +} + +// Build a SubViewOp with dynamic entries and custom result type. If the +// type passed is nullptr, it is inferred. void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, MemRefType resultType, Value source, - ArrayRef staticOffsets, - ArrayRef staticSizes, - ArrayRef staticStrides, ValueRange offsets, - ValueRange sizes, ValueRange strides, + ArrayRef offsets, ArrayRef sizes, + ArrayRef strides, ArrayRef attrs) { - build(b, result, resultType, source, offsets, sizes, strides, - b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), - b.getI64ArrayAttr(staticStrides)); - result.addAttributes(attrs); + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + SmallVector sizeValues = + llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { + return b.getI64IntegerAttr(v); + })); + build(b, result, resultType, source, offsetValues, sizeValues, strideValues, + attrs); } -/// Build a SubViewOp as above but with custom result type. +// Build a SubViewOp with dynamic entries and custom result type. If the type +// passed is nullptr, it is inferred. void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, MemRefType resultType, Value source, ValueRange offsets, ValueRange sizes, ValueRange strides, ArrayRef attrs) { - auto sourceMemRefType = source.getType().cast(); - unsigned rank = sourceMemRefType.getRank(); - SmallVector staticOffsetsVector( - rank, ShapedType::kDynamicStrideOrOffset); - SmallVector staticSizesVector(rank, ShapedType::kDynamicSize); - SmallVector staticStridesVector( - rank, ShapedType::kDynamicStrideOrOffset); - build(b, result, resultType, source, staticOffsetsVector, staticSizesVector, - staticStridesVector, offsets, sizes, strides, attrs); + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + build(b, result, resultType, source, offsetValues, sizeValues, strideValues); +} + +// Build a SubViewOp with dynamic entries and inferred result type. +void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source, + ValueRange offsets, ValueRange sizes, + ValueRange strides, + ArrayRef attrs) { + build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); } /// For ViewLikeOpInterface. @@ -3130,33 +3222,16 @@ namespace { -/// Take a list of `values` with potential new constant to extract and a list -/// of `constantValues` with`values.size()` sentinel that evaluate to true by -/// applying `isDynamic`. /// Detects the `values` produced by a ConstantIndexOp and places the new /// constant in place of the corresponding sentinel value. -void canonicalizeSubViewPart(SmallVectorImpl &values, - SmallVectorImpl &constantValues, +void canonicalizeSubViewPart(SmallVectorImpl &values, llvm::function_ref isDynamic) { - bool hasNewStaticValue = llvm::any_of( - values, [](Value val) { return matchPattern(val, m_ConstantIndex()); }); - if (hasNewStaticValue) { - for (unsigned cstIdx = 0, valIdx = 0, e = constantValues.size(); - cstIdx != e; ++cstIdx) { - // Was already static, skip. - if (!isDynamic(constantValues[cstIdx])) - continue; - // Newly static, move from Value to constant. - if (matchPattern(values[valIdx], m_ConstantIndex())) { - constantValues[cstIdx] = - cast(values[valIdx].getDefiningOp()).getValue(); - // Erase for impl. simplicity. Reverse iterator if we really must. - values.erase(std::next(values.begin(), valIdx)); - continue; - } - // Remains dynamic move to next value. - ++valIdx; - } + for (OpFoldResult &ofr : values) { + if (ofr.is()) + continue; + // Newly static, move from Value to constant. + if (auto cstOp = ofr.dyn_cast().getDefiningOp()) + ofr = OpBuilder(cstOp).getIndexAttr(cstOp.getValue()); } } @@ -3187,32 +3262,16 @@ // At least one of offsets/sizes/strides is a new constant. // Form the new list of operands and constant attributes from the existing. - SmallVector newOffsets(op.offsets()); - SmallVector newStaticOffsets = - extractFromI64ArrayAttr(op.static_offsets()); - std::array ranks = op.getArrayAttrRanks(); - (void)ranks; - assert(newStaticOffsets.size() == ranks[0]); - canonicalizeSubViewPart(newOffsets, newStaticOffsets, - ShapedType::isDynamicStrideOrOffset); - - SmallVector newSizes(op.sizes()); - SmallVector newStaticSizes = - extractFromI64ArrayAttr(op.static_sizes()); - assert(newStaticSizes.size() == ranks[1]); - canonicalizeSubViewPart(newSizes, newStaticSizes, ShapedType::isDynamic); - - SmallVector newStrides(op.strides()); - SmallVector newStaticStrides = - extractFromI64ArrayAttr(op.static_strides()); - assert(newStaticStrides.size() == ranks[2]); - canonicalizeSubViewPart(newStrides, newStaticStrides, - ShapedType::isDynamicStrideOrOffset); + SmallVector mixedOffsets(op.getMixedOffsets()); + SmallVector mixedSizes(op.getMixedSizes()); + SmallVector mixedStrides(op.getMixedStrides()); + canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset); + canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic); + canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset); // Create the new op in canonical form. - auto newOp = rewriter.create( - op.getLoc(), op.source(), newStaticOffsets, newStaticSizes, - newStaticStrides, newOffsets, newSizes, newStrides); + auto newOp = rewriter.create(op.getLoc(), op.source(), mixedOffsets, + mixedSizes, mixedStrides); replaceWithNewOp(rewriter, op, newOp); @@ -3439,97 +3498,6 @@ sourceRankedTensorType.getElementType()); } -void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, - Value source, ArrayRef staticOffsets, - ArrayRef staticSizes, - ArrayRef staticStrides, - ValueRange offsets, ValueRange sizes, - ValueRange strides, - ArrayRef attrs) { - auto sourceRankedTensorType = source.getType().cast(); - auto resultType = inferResultType(sourceRankedTensorType, staticOffsets, - staticSizes, staticStrides); - build(b, result, resultType, source, offsets, sizes, strides, - b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), - b.getI64ArrayAttr(staticStrides)); - result.addAttributes(attrs); -} - -/// Build a SubTensorOp with all dynamic entries: `staticOffsets`, `staticSizes` -/// and `staticStrides` are automatically filled with sentinel values that -/// encode dynamic entries. -void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, - Value source, ValueRange offsets, - ValueRange sizes, ValueRange strides, - ArrayRef attrs) { - auto sourceRankedTensorType = source.getType().cast(); - unsigned rank = sourceRankedTensorType.getRank(); - SmallVector staticOffsetsVector( - rank, ShapedType::kDynamicStrideOrOffset); - SmallVector staticSizesVector(rank, ShapedType::kDynamicSize); - SmallVector staticStridesVector( - rank, ShapedType::kDynamicStrideOrOffset); - build(b, result, source, staticOffsetsVector, staticSizesVector, - staticStridesVector, offsets, sizes, strides, attrs); -} - -/// Build a SubTensorOp as above but with custom result type. -void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, - RankedTensorType resultType, Value source, - ArrayRef staticOffsets, - ArrayRef staticSizes, - ArrayRef staticStrides, - ValueRange offsets, ValueRange sizes, - ValueRange strides, - ArrayRef attrs) { - build(b, result, resultType, source, offsets, sizes, strides, - b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), - b.getI64ArrayAttr(staticStrides)); - result.addAttributes(attrs); -} - -/// Build a SubTensorOp as above but with custom result type. -void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, - RankedTensorType resultType, Value source, - ValueRange offsets, ValueRange sizes, - ValueRange strides, - ArrayRef attrs) { - auto sourceRankedTensorType = source.getType().cast(); - unsigned rank = sourceRankedTensorType.getRank(); - SmallVector staticOffsetsVector( - rank, ShapedType::kDynamicStrideOrOffset); - SmallVector staticSizesVector(rank, ShapedType::kDynamicSize); - SmallVector staticStridesVector( - rank, ShapedType::kDynamicStrideOrOffset); - build(b, result, resultType, source, staticOffsetsVector, staticSizesVector, - staticStridesVector, offsets, sizes, strides, attrs); -} - -/// Dispatch `ofr` into either `dynamicVec` if it is a Value or into `staticVec` -/// otherwise. In the dynamic case, `sentinel` is appended to `staticVec` to -/// represent the dynamic value `?`. -static void unpackOpFoldResult(OpFoldResult ofr, - SmallVectorImpl &dynamicVec, - SmallVectorImpl &staticVec, - int64_t sentinel) { - Value v = ofr.dyn_cast(); - if (v) { - dynamicVec.push_back(v); - staticVec.push_back(sentinel); - } else { - APInt apInt = ofr.dyn_cast().cast().getValue(); - staticVec.push_back(apInt.getSExtValue()); - } -} - -static void unpackOpFoldResults(ArrayRef ofrs, - SmallVector &dynamicVec, - SmallVector &staticVec, - int64_t sentinel) { - for (auto ofr : ofrs) - unpackOpFoldResult(ofr, dynamicVec, staticVec, sentinel); -} - // Build a SubTensorOp with mixed static and dynamic entries and custom result // type. If the type passed is nullptr, it is inferred. void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, @@ -3540,12 +3508,12 @@ ArrayRef attrs) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - unpackOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); - unpackOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - unpackOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); auto sourceRankedTensorType = source.getType().cast(); // Structuring implementation this way avoids duplication between builders. if (!resultType) { @@ -3554,8 +3522,10 @@ staticSizes, staticStrides) .cast(); } - build(b, result, resultType, source, staticOffsets, staticSizes, - staticStrides, dynamicOffsets, dynamicSizes, dynamicStrides, attrs); + build(b, result, resultType, source, dynamicOffsets, dynamicSizes, + dynamicStrides, b.getI64ArrayAttr(staticOffsets), + b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); + result.addAttributes(attrs); } // Build a SubTensorOp with mixed static and dynamic entries and inferred result @@ -3568,6 +3538,30 @@ build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs); } +// Build a SubTensorOp with dynamic entries and custom result type. If the type +// passed is nullptr, it is inferred. +void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, + RankedTensorType resultType, Value source, + ValueRange offsets, ValueRange sizes, + ValueRange strides, + ArrayRef attrs) { + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + build(b, result, resultType, source, offsetValues, sizeValues, strideValues); +} + +// Build a SubTensorOp with dynamic entries and inferred result type. +void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result, + Value source, ValueRange offsets, + ValueRange sizes, ValueRange strides, + ArrayRef attrs) { + build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs); +} + /// Verifier for SubTensorOp. static LogicalResult verify(SubTensorOp op) { // Verify result type against inferred type. @@ -3633,36 +3627,6 @@ return parser.addTypeToList(dstType, result.types); } -void mlir::SubTensorInsertOp::build( - OpBuilder &b, OperationState &result, Value source, Value dest, - ArrayRef staticOffsets, ArrayRef staticSizes, - ArrayRef staticStrides, ValueRange offsets, ValueRange sizes, - ValueRange strides, ArrayRef attrs) { - build(b, result, dest.getType(), source, dest, offsets, sizes, strides, - b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), - b.getI64ArrayAttr(staticStrides)); - result.addAttributes(attrs); -} - -/// Build a SubViewOp with all dynamic entries: `staticOffsets`, `staticSizes` -/// and `staticStrides` are automatically filled with source-memref-rank -/// sentinel values that encode dynamic entries. -void mlir::SubTensorInsertOp::build(OpBuilder &b, OperationState &result, - Value source, Value dest, - ValueRange offsets, ValueRange sizes, - ValueRange strides, - ArrayRef attrs) { - auto destRankedTensorType = dest.getType().cast(); - unsigned rank = destRankedTensorType.getRank(); - SmallVector staticOffsetsVector( - rank, ShapedType::kDynamicStrideOrOffset); - SmallVector staticSizesVector(rank, ShapedType::kDynamicSize); - SmallVector staticStridesVector( - rank, ShapedType::kDynamicStrideOrOffset); - build(b, result, source, dest, staticOffsetsVector, staticSizesVector, - staticStridesVector, offsets, sizes, strides, attrs); -} - // Build a SubTensorInsertOp with mixed static and dynamic entries. void mlir::SubTensorInsertOp::build(OpBuilder &b, OperationState &result, Value source, Value dest, @@ -3672,14 +3636,31 @@ ArrayRef attrs) { SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; - unpackOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); - unpackOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - unpackOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); - build(b, result, source, dest, staticOffsets, staticSizes, staticStrides, - dynamicOffsets, dynamicSizes, dynamicStrides, attrs); + dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, + ShapedType::kDynamicStrideOrOffset); + dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, + ShapedType::kDynamicSize); + dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, + ShapedType::kDynamicStrideOrOffset); + build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes, + dynamicStrides, b.getI64ArrayAttr(staticOffsets), + b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); + result.addAttributes(attrs); +} + +// Build a SubTensorInsertOp with dynamic entries. +void mlir::SubTensorInsertOp::build(OpBuilder &b, OperationState &result, + Value source, Value dest, + ValueRange offsets, ValueRange sizes, + ValueRange strides, + ArrayRef attrs) { + SmallVector offsetValues = llvm::to_vector<4>( + llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); + SmallVector sizeValues = llvm::to_vector<4>( + llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); + SmallVector strideValues = llvm::to_vector<4>( + llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); + build(b, result, source, dest, offsetValues, sizeValues, strideValues); } /// Verifier for SubViewOp. diff --git a/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp --- a/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp +++ b/mlir/lib/Dialect/StandardOps/Transforms/ExpandOps.cpp @@ -83,7 +83,7 @@ return failure(); int64_t rank = shapeType.cast().getDimSize(0); - SmallVector sizes, strides; + SmallVector sizes, strides; sizes.resize(rank); strides.resize(rank); @@ -99,12 +99,9 @@ if (i > 0) stride = rewriter.create(loc, stride, size); } - SmallVector staticSizes(rank, ShapedType::kDynamicSize); - SmallVector staticStrides(rank, - ShapedType::kDynamicStrideOrOffset); rewriter.replaceOpWithNewOp( - op, op.getType(), op.source(), /*staticOffset = */ 0, staticSizes, - staticStrides, /*offset=*/llvm::None, sizes, strides); + op, op.getType(), op.source(), /*offset=*/rewriter.getIndexAttr(0), + sizes, strides); return success(); } }; diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -2233,10 +2233,9 @@ // TODO: relax this precondition, will require rank-reducing subviews. assert(memrefRank == alloc.getType().cast().getRank() && "Expected memref rank to match the alloc rank"); - Value one = std_constant_index(1); ValueRange leadingIndices = xferOp.indices().take_front(xferOp.getLeadingShapedRank()); - SmallVector sizes; + SmallVector sizes; sizes.append(leadingIndices.begin(), leadingIndices.end()); xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) { using MapList = ArrayRef>; @@ -2252,8 +2251,12 @@ ValueRange{dimMemRef, index, dimAlloc}); sizes.push_back(affineMin); }); - return std_sub_view(xferOp.source(), xferOp.indices(), sizes, - SmallVector(memrefRank, one)); + + SmallVector indices = llvm::to_vector<4>(llvm::map_range( + xferOp.indices(), [](Value idx) -> OpFoldResult { return idx; })); + return std_sub_view( + xferOp.source(), indices, sizes, + SmallVector(memrefRank, OpBuilder(xferOp).getIndexAttr(1))); } /// Given an `xferOp` for which: diff --git a/mlir/test/Dialect/Linalg/fusion-sequence.mlir b/mlir/test/Dialect/Linalg/fusion-sequence.mlir --- a/mlir/test/Dialect/Linalg/fusion-sequence.mlir +++ b/mlir/test/Dialect/Linalg/fusion-sequence.mlir @@ -238,7 +238,7 @@ // CHECK-SAME: ins(%[[T1]], %[[ARG5]] // CHECK-SAME: ) outs(%[[STARG6]] : tensor) // CHECK: %[[R1:.+]] = subtensor_insert %[[T2]] -// CHECK-SAME: into %[[ARG8]][%[[IV0]], %[[C0]]] +// CHECK-SAME: into %[[ARG8]][%[[IV0]], 0] // CHECK: scf.yield %[[R1]] // CHECK: } // CHECK: return %[[R0]] diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir --- a/mlir/test/Dialect/Linalg/promote.mlir +++ b/mlir/test/Dialect/Linalg/promote.mlir @@ -7,7 +7,6 @@ #map3 = affine_map<(d0) -> (d0 + 3)> // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> -// CHECK-DAG: #[[$strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> func @matmul_f32(%A: memref, %M: index, %N: index, %K: index) { %c4 = constant 4 : index @@ -49,28 +48,28 @@ // ALLOCA: %[[tmpA:.*]] = alloca() : memref<32xi8> // CHECK: %[[fullA:.*]] = std.view %[[tmpA]][{{.*}}][{{.*}}] : memref<32xi8> to memref // DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECK: %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref to memref +// CHECK: %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref to memref /// // CHECK: %[[tmpB:.*]] = alloc() : memref<48xi8> // ALLOCA: %[[tmpB:.*]] = alloca() : memref<48xi8> // CHECK: %[[fullB:.*]] = std.view %[[tmpB]][{{.*}}][{{.*}}] : memref<48xi8> to memref // DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECK: %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref to memref +// CHECK: %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref to memref /// // CHECK: %[[tmpC:.*]] = alloc() : memref<24xi8> // ALLOCA: %[[tmpC:.*]] = alloca() : memref<24xi8> // CHECK: %[[fullC:.*]] = std.view %[[tmpC]][{{.*}}][{{.*}}] : memref<24xi8> to memref // DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECK: %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref to memref +// CHECK: %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref to memref -// CHECK: linalg.copy(%[[vA]], %[[partialA]]) : memref, memref -// CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref, memref -// CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref, memref +// CHECK: linalg.copy(%[[vA]], %[[partialA]]) : memref, memref +// CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref, memref +// CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref, memref // // CHECK: linalg.matmul ins(%[[partialA]], %[[partialB]]{{.*}} outs(%[[partialC]] // // CHECK: linalg.copy(%[[partialC]], %[[vC]]) : -// CHECK: memref, +// CHECK: memref, // CHECK: memref // // CHECK: dealloc %[[tmpA]] : memref<32xi8> @@ -121,26 +120,26 @@ // CHECK: %[[tmpA_f64:.*]] = alloc() : memref<64xi8> // CHECK: %[[fullA_f64:.*]] = std.view %[[tmpA_f64]][{{.*}}][{{.*}}] : memref<64xi8> to memref // DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECK: %[[partialA_f64:.*]] = subview %[[fullA_f64]][%{{.*}}, %{{.*}}] : memref to memref +// CHECK: %[[partialA_f64:.*]] = subview %[[fullA_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref /// // CHECK: %[[tmpB_f64:.*]] = alloc() : memref<96xi8> // CHECK: %[[fullB_f64:.*]] = std.view %[[tmpB_f64]][{{.*}}][{{.*}}] : memref<96xi8> to memref // DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECK: %[[partialB_f64:.*]] = subview %[[fullB_f64]][%{{.*}}, %{{.*}}] : memref to memref +// CHECK: %[[partialB_f64:.*]] = subview %[[fullB_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref /// // CHECK: %[[tmpC_f64:.*]] = alloc() : memref<48xi8> // CHECK: %[[fullC_f64:.*]] = std.view %[[tmpC_f64]][{{.*}}][{{.*}}] : memref<48xi8> to memref // DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECK: %[[partialC_f64:.*]] = subview %[[fullC_f64]][%{{.*}}, %{{.*}}] : memref to memref +// CHECK: %[[partialC_f64:.*]] = subview %[[fullC_f64]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref -// CHECK: linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref, memref -// CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref, memref -// CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref, memref +// CHECK: linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref, memref +// CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref, memref +// CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref, memref // // CHECK: linalg.matmul ins(%[[partialA_f64]], %[[partialB_f64]]{{.*}} outs(%[[partialC_f64]] // // CHECK: linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : -// CHECK: memref, +// CHECK: memref, // CHECK: memref // // CHECK: dealloc %[[tmpA_f64]] : memref<64xi8> diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir --- a/mlir/test/Dialect/Linalg/transform-patterns.mlir +++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -test-linalg-transform-patterns=test-patterns | FileCheck %s -// CHECK-DAG: #[[$STRIDED_1D:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> +// CHECK-DAG: #[[$STRIDED_1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> // Map corresponding to a 2D memory access where the stride along the last dim is known to be 1. // CHECK-DAG: #[[$STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> // Map corresponding to a 2D memory access where the stride along all dims are unknown. @@ -48,7 +48,7 @@ // CHECK: scf.parallel {{.*}} step (%[[c5]]) // CHECK: scf.for {{.*}} step %[[c6]] // CHECK: linalg.matvec -// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: ins({{.*}}, {{.*}}: memref, memref) // CHECK: outs({{.*}}: memref) func @matmul(%A: memref, @@ -87,8 +87,8 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] { // CHECK: linalg.matmul -// CHECK: ins({{.*}}, {{.*}}: memref, memref) -// CHECK: outs({{.*}}: memref) +// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: outs({{.*}}: memref) #matmul_accesses = [ affine_map<(m, n, k) -> (m, k)>, @@ -172,7 +172,7 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]] // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]] // CHECK: linalg.matvec -// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: ins({{.*}}, {{.*}}: memref, memref) // CHECK: outs({{.*}}: memref) func @matmul_perm(%A: memref, @@ -205,8 +205,8 @@ // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] { // CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] { // CHECK: linalg.matmul -// CHECK: ins({{.*}}, {{.*}}: memref, memref) -// CHECK: outs({{.*}}: memref) +// CHECK: ins({{.*}}, {{.*}}: memref, memref) +// CHECK: outs({{.*}}: memref) func @promote_subview_matmul(%arg0: memref, %arg1: memref, @@ -250,13 +250,16 @@ // CHECK: %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a0:.*]] = alloc({{%.*}}) : memref // CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[l0:.*]] = subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] +// CHECK-SAME: memref to memref // CHECK: %[[a1:.*]] = alloc({{%.*}}) : memref // CHECK: %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[l1:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[l1:.*]] = subview %[[v1]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] +// CHECK-SAME: memref to memref // CHECK: %[[a2:.*]] = alloc({{%.*}}) : memref // CHECK: %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[l2:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[l2:.*]] = subview %[[v2]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] +// CHECK-SAME: memref to memref // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK: linalg.copy(%[[s1]], %[[l1]]) : memref, memref // CHECK: linalg.copy(%[[s2]], %[[l2]]) : memref, memref @@ -306,13 +309,13 @@ // CHECK: %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a0:.*]] = alloc({{%.*}}) : memref // CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[l0:.*]] = subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref // CHECK-NOT: %[[a1:.*]] = alloc({{%.*}}) : memref // CHECK-NOT: %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK-NOT: %[[l0:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK-NOT: %[[l0:.*]] = subview %[[v1]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref // CHECK-NOT: %[[a2:.*]] = alloc({{%.*}}) : memref // CHECK-NOT: %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK-NOT: %[[l0:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK-NOT: %[[l0:.*]] = subview %[[v2]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK-NOT: linalg.copy(%[[s1]], %[[l1]]) : memref, memref // CHECK-NOT: linalg.copy(%[[s2]], %[[l2]]) : memref, memref^ @@ -337,7 +340,7 @@ // CHECK: %[[s0:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a0:.*]] = alloc({{%.*}}) {alignment = 32 : i64} : memref // CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref -// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[l0:.*]] = subview %[[v0]][0, 0] [%{{.*}}, %{{.*}}] [1, 1] : memref to memref // CHECK: linalg.fill(%[[v0]], {{%.*}}) : memref, f32 // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK: linalg.fill(%[[v0]], %[[cf]]) : memref, f32 diff --git a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir --- a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir +++ b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir @@ -8,7 +8,7 @@ // LINALG-DAG: #[[$map_p4:.*]] = affine_map<()[s0] -> (s0 + 4)> // LINALG-DAG: #[[$map_p8:.*]] = affine_map<()[s0] -> (s0 + 8)> // LINALG-DAG: #[[$map_2d_stride_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> -// LINALG-DAG: #[[$map_2d_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +// LINALG-DAG: #[[$map_2d_stride_8x1:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1)> // LINALG-DAG: #[[$bounds_map_4:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 4)> // LINALG-DAG: #[[$bounds_map_8:.*]] = affine_map<(d0, d1, d2) -> (d0 - d1, 8)> @@ -58,7 +58,6 @@ // CHECK_SAME: {masked = [false, false]} : memref, vector<4x8xf32> // LINALG-DAG: %[[c0:.*]] = constant 0 : index - // LINALG-DAG: %[[c1:.*]] = constant 1 : index // LINALG-DAG: %[[c4:.*]] = constant 4 : index // LINALG-DAG: %[[c8:.*]] = constant 8 : index // LINALG-DAG: %[[cst:.*]] = constant 0.000000e+00 : f32 @@ -82,9 +81,9 @@ // LINALG: %[[d0:.*]] = dim %[[A]], %[[c0]] : memref // LINALG: %[[sv0:.*]] = affine.min #[[$bounds_map_4]](%[[d0]], %[[i]], %[[c4]]) // LINALG: %[[sv1:.*]] = affine.min #[[$bounds_map_8]](%[[c8]], %[[j]], %[[c8]]) - // LINALG: %[[sv:.*]] = subview %[[A]][%[[i]], %[[j]]] [%[[sv0]], %[[sv1]]] [%[[c1]], %[[c1]]] - // LINALG-SAME: memref to memref - // LINALG: linalg.copy(%[[sv]], %[[alloc]]) : memref, memref<4x8xf32> + // LINALG: %[[sv:.*]] = subview %[[A]][%[[i]], %[[j]]] [%[[sv0]], %[[sv1]]] [1, 1] + // LINALG-SAME: memref to memref + // LINALG: linalg.copy(%[[sv]], %[[alloc]]) : memref, memref<4x8xf32> // LINALG: %[[yielded:.*]] = memref_cast %[[alloc]] : // LINALG-SAME: memref<4x8xf32> to memref // LINALG: scf.yield %[[yielded]], %[[c0]], %[[c0]] : @@ -150,7 +149,6 @@ // CHECK-SAME: memref, vector<4x8xf32> // LINALG-DAG: %[[c0:.*]] = constant 0 : index - // LINALG-DAG: %[[c1:.*]] = constant 1 : index // LINALG-DAG: %[[c4:.*]] = constant 4 : index // LINALG-DAG: %[[c7:.*]] = constant 7 : index // LINALG-DAG: %[[c8:.*]] = constant 8 : index @@ -176,9 +174,9 @@ // LINALG: linalg.fill(%[[alloc]], %[[cst]]) : memref<4x8xf32>, f32 // LINALG: %[[sv0:.*]] = affine.min #[[$bounds_map_4]](%[[c7]], %[[i]], %[[c4]]) // LINALG: %[[sv1:.*]] = affine.min #[[$bounds_map_8]](%[[c8]], %[[j]], %[[c8]]) - // LINALG: %[[sv:.*]] = subview %[[A]][%[[i]], %[[j]]] [%[[sv0]], %[[sv1]]] [%[[c1]], %[[c1]]] - // LINALG-SAME: memref<7x8xf32, #[[$map_2d_stride_1]]> to memref - // LINALG: linalg.copy(%[[sv]], %[[alloc]]) : memref, memref<4x8xf32> + // LINALG: %[[sv:.*]] = subview %[[A]][%[[i]], %[[j]]] [%[[sv0]], %[[sv1]]] [1, 1] + // LINALG-SAME: memref<7x8xf32, #[[$map_2d_stride_1]]> to memref + // LINALG: linalg.copy(%[[sv]], %[[alloc]]) : memref, memref<4x8xf32> // LINALG: %[[yielded:.*]] = memref_cast %[[alloc]] : // LINALG-SAME: memref<4x8xf32> to memref // LINALG: scf.yield %[[yielded]], %[[c0]], %[[c0]] :