diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -365,7 +365,6 @@ "dialect"; let constructor = "mlir::createConvertShapeToStandardPass()"; let dependentDialects = [ - "memref::MemRefDialect", "StandardOpsDialect", "scf::SCFDialect", "tensor::TensorDialect" diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h b/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h @@ -34,6 +34,10 @@ /// Given an operation, retrieves the value of each dynamic dimension through /// constructing the necessary DimOp operators. SmallVector getDynOperands(Location loc, Value val, OpBuilder &b); + +// Helper function that creates a memref::DimOp or tensor::DimOp depending on +// the type of `source`. +Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim); } // namespace mlir //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -512,7 +512,7 @@ // DimOp //===----------------------------------------------------------------------===// -def DimOp : MemRef_Op<"dim", [NoSideEffect, MemRefsNormalizable]> { +def MemRef_DimOp : MemRef_Op<"dim", [NoSideEffect, MemRefsNormalizable]> { let summary = "dimension index operation"; let description = [{ The `dim` operation takes a memref and a dimension operand of type `index`. @@ -538,18 +538,17 @@ ``` }]; - let arguments = (ins AnyTypeOf<[AnyTensor, AnyRankedOrUnrankedMemRef], - "any memref or tensor type">:$memrefOrTensor, + let arguments = (ins AnyRankedOrUnrankedMemRef:$source, Index:$index); let results = (outs Index:$result); let assemblyFormat = [{ - attr-dict $memrefOrTensor `,` $index `:` type($memrefOrTensor) + attr-dict $source `,` $index `:` type($source) }]; let builders = [ - OpBuilder<(ins "Value":$memrefOrTensor, "int64_t":$index)>, - OpBuilder<(ins "Value":$memrefOrTensor, "Value":$index)> + OpBuilder<(ins "Value":$source, "int64_t":$index)>, + OpBuilder<(ins "Value":$source, "Value":$index)> ]; let extraClassDeclaration = [{ @@ -1288,6 +1287,7 @@ let assemblyFormat = "$memref attr-dict `:` type($memref)"; + let hasCanonicalizer = 1; let hasFolder = 1; } diff --git a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td @@ -14,7 +14,7 @@ def StdBufferize : FunctionPass<"std-bufferize"> { let summary = "Bufferize the std dialect"; let constructor = "mlir::createStdBufferizePass()"; - let dependentDialects = ["scf::SCFDialect"]; + let dependentDialects = ["memref::MemRefDialect", "scf::SCFDialect"]; } def StdExpandOps : FunctionPass<"std-expand"> { diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td @@ -14,6 +14,7 @@ def Tensor_Dialect : Dialect { let name = "tensor"; let cppNamespace = "::mlir::tensor"; + let description = [{ The `tensor` dialect is intended to hold core tensor creation and manipulation ops, which are not strongly associated with any particular @@ -43,6 +44,8 @@ dialect), and does not live in this dialect. }]; + + let hasConstantMaterializer = 1; } #endif // TENSOR_BASE diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -60,6 +60,58 @@ let verifier = ?; } +//===----------------------------------------------------------------------===// +// DimOp +//===----------------------------------------------------------------------===// + +def Tensor_DimOp : Tensor_Op<"dim", [NoSideEffect]> { + let summary = "dimension index operation"; + let description = [{ + The `dim` operation takes a tensor and a dimension operand of type `index`. + It returns the size of the requested dimension of the given tensor. + If the dimension index is out of bounds, the behavior is undefined. + + The specified tensor type is that of the first operand. + + Example: + + ```mlir + // Always returns 4, can be constant folded: + %c0 = constant 0 : index + %x = tensor.dim %A, %c0 : tensor<4x?xf32> + + // Returns the dynamic dimension of %A. + %c1 = constant 1 : index + %y = tensor.dim %A, %c1 : memref<4x?xf32> + + // Equivalent generic form: + %x = "tensor.dim"(%A, %c0) : (memref<4x?xf32>, index) -> index + %y = "tensor.dim"(%A, %c1) : (memref<4x?xf32>, index) -> index + ``` + }]; + + let arguments = (ins AnyTensor:$source, + Index:$index); + let results = (outs Index:$result); + + let assemblyFormat = [{ + attr-dict $source `,` $index `:` type($source) + }]; + + let builders = [ + OpBuilder<(ins "Value":$source, "int64_t":$index)>, + OpBuilder<(ins "Value":$source, "Value":$index)> + ]; + + let extraClassDeclaration = [{ + /// Helper function to get the index as a simple integer if it is constant. + Optional getConstantIndex(); + }]; + + let hasCanonicalizer = 1; + let hasFolder = 1; +} + //===----------------------------------------------------------------------===// // ExtractOp //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td @@ -14,7 +14,7 @@ def TensorBufferize : FunctionPass<"tensor-bufferize"> { let summary = "Bufferize the `tensor` dialect"; let constructor = "mlir::createTensorBufferizePass()"; - let dependentDialects = ["scf::SCFDialect"]; + let dependentDialects = ["scf::SCFDialect", "memref::MemRefDialect"]; } #endif // MLIR_DIALECT_TENSOR_TRANSFORMS_PASSES diff --git a/mlir/lib/Conversion/ShapeToStandard/CMakeLists.txt b/mlir/lib/Conversion/ShapeToStandard/CMakeLists.txt --- a/mlir/lib/Conversion/ShapeToStandard/CMakeLists.txt +++ b/mlir/lib/Conversion/ShapeToStandard/CMakeLists.txt @@ -18,7 +18,6 @@ LINK_LIBS PUBLIC MLIRIR - MLIRMemRef MLIRShape MLIRTensor MLIRPass diff --git a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp --- a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp +++ b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp @@ -9,7 +9,6 @@ #include "mlir/Conversion/ShapeToStandard/ShapeToStandard.h" #include "../PassDetail.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/SCF.h" #include "mlir/Dialect/Shape/IR/Shape.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" @@ -140,7 +139,7 @@ // dimension in the tensor. SmallVector ranks, rankDiffs; llvm::append_range(ranks, llvm::map_range(transformed.shapes(), [&](Value v) { - return lb.create(v, zero); + return lb.create(v, zero); })); // Find the maximum rank @@ -254,7 +253,7 @@ // dimension in the tensor. SmallVector ranks, rankDiffs; llvm::append_range(ranks, llvm::map_range(transformed.shapes(), [&](Value v) { - return lb.create(v, zero); + return lb.create(v, zero); })); // Find the maximum rank @@ -346,7 +345,7 @@ // circumvents the necessity to materialize the shape in memory. if (auto shapeOfOp = op.shape().getDefiningOp()) { if (shapeOfOp.arg().getType().isa()) { - rewriter.replaceOpWithNewOp(op, shapeOfOp.arg(), + rewriter.replaceOpWithNewOp(op, shapeOfOp.arg(), transformed.dim()); return success(); } @@ -377,7 +376,7 @@ return failure(); shape::RankOp::Adaptor transformed(operands); - rewriter.replaceOpWithNewOp(op, transformed.shape(), 0); + rewriter.replaceOpWithNewOp(op, transformed.shape(), 0); return success(); } @@ -407,7 +406,7 @@ Value one = rewriter.create(loc, 1); Type indexTy = rewriter.getIndexType(); Value rank = - rewriter.create(loc, indexTy, transformed.shape(), zero); + rewriter.create(loc, indexTy, transformed.shape(), zero); auto loop = rewriter.create( loc, zero, rank, one, op.initVals(), @@ -494,11 +493,11 @@ Value zero = rewriter.create(loc, 0); Value firstShape = transformed.shapes().front(); Value firstRank = - rewriter.create(loc, indexTy, firstShape, zero); + rewriter.create(loc, indexTy, firstShape, zero); Value result = nullptr; // Generate a linear sequence of compares, all with firstShape as lhs. for (Value shape : transformed.shapes().drop_front(1)) { - Value rank = rewriter.create(loc, indexTy, shape, zero); + Value rank = rewriter.create(loc, indexTy, shape, zero); Value eqRank = rewriter.create(loc, CmpIPredicate::eq, firstRank, rank); auto same = rewriter.create( @@ -563,7 +562,7 @@ int64_t rank = rankedTensorTy.getRank(); for (int64_t i = 0; i < rank; i++) { if (rankedTensorTy.isDynamicDim(i)) { - Value extent = rewriter.create(loc, tensor, i); + Value extent = rewriter.create(loc, tensor, i); extentValues.push_back(extent); } else { Value extent = @@ -587,7 +586,7 @@ op, getExtentTensorType(ctx), ValueRange{rank}, [&](OpBuilder &b, Location loc, ValueRange args) { Value dim = args.front(); - Value extent = b.create(loc, tensor, dim); + Value extent = b.create(loc, tensor, dim); b.create(loc, extent); }); @@ -617,7 +616,7 @@ SplitAtOp::Adaptor transformed(op); ImplicitLocOpBuilder b(op.getLoc(), rewriter); Value zero = b.create(0); - Value rank = b.create(transformed.operand(), zero); + Value rank = b.create(transformed.operand(), zero); // index < 0 ? index + rank : index Value originalIndex = transformed.index(); @@ -675,8 +674,8 @@ // Setup target legality. MLIRContext &ctx = getContext(); ConversionTarget target(ctx); - target.addLegalDialect(); + target + .addLegalDialect(); target.addLegalOp(); // Setup conversion patterns. diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -2965,7 +2965,7 @@ LogicalResult matchAndRewrite(memref::DimOp dimOp, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { - Type operandType = dimOp.memrefOrTensor().getType(); + Type operandType = dimOp.source().getType(); if (operandType.isa()) { rewriter.replaceOp(dimOp, {extractSizeOfUnrankedMemRef( operandType, dimOp, operands, rewriter)}); @@ -2977,7 +2977,7 @@ operandType, dimOp, operands, rewriter)}); return success(); } - return failure(); + llvm_unreachable("expected MemRefType or UnrankedMemRefType"); } private: @@ -2995,7 +2995,7 @@ // Extract pointer to the underlying ranked descriptor and bitcast it to a // memref descriptor pointer to minimize the number of GEP // operations. - UnrankedMemRefDescriptor unrankedDesc(transformed.memrefOrTensor()); + UnrankedMemRefDescriptor unrankedDesc(transformed.source()); Value underlyingRankedDesc = unrankedDesc.memRefDescPtr(rewriter, loc); Value scalarMemRefDescPtr = rewriter.create( loc, @@ -3033,7 +3033,7 @@ int64_t i = index.getValue(); if (memRefType.isDynamicDim(i)) { // extract dynamic size from the memref descriptor. - MemRefDescriptor descriptor(transformed.memrefOrTensor()); + MemRefDescriptor descriptor(transformed.source()); return descriptor.size(rewriter, loc, i); } // Use constant for static size. @@ -3042,7 +3042,7 @@ } Value index = dimOp.index(); int64_t rank = memRefType.getRank(); - MemRefDescriptor memrefDescriptor(transformed.memrefOrTensor()); + MemRefDescriptor memrefDescriptor(transformed.source()); return memrefDescriptor.size(rewriter, loc, index, rank); } }; diff --git a/mlir/lib/Conversion/TosaToLinalg/CMakeLists.txt b/mlir/lib/Conversion/TosaToLinalg/CMakeLists.txt --- a/mlir/lib/Conversion/TosaToLinalg/CMakeLists.txt +++ b/mlir/lib/Conversion/TosaToLinalg/CMakeLists.txt @@ -14,7 +14,6 @@ MLIRLinalg MLIRLinalgUtils MLIRMath - MLIRMemRef MLIRPass MLIRTensor MLIRTosa diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -13,7 +13,6 @@ #include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Math/IR/Math.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" @@ -1720,12 +1719,12 @@ offsets.resize(rank, rewriter.create(loc, 0)); for (int i = 0; i < rank; ++i) { - sizes.push_back(rewriter.create(loc, args[0], i)); + sizes.push_back(rewriter.create(loc, args[0], i)); } Value resultDimSize = sizes[axis]; for (auto arg : args.drop_front()) { - auto size = rewriter.create(loc, arg, axisValue); + auto size = rewriter.create(loc, arg, axisValue); resultDimSize = rewriter.create(loc, resultDimSize, size); } sizes[axis] = resultDimSize; @@ -1739,7 +1738,7 @@ rewriter.create(loc, zeroVal, init).getResult(0); for (auto arg : args) { - sizes[axis] = rewriter.create(loc, arg, axisValue); + sizes[axis] = rewriter.create(loc, arg, axisValue); result = rewriter.create(loc, arg, result, offsets, sizes, strides); offsets[axis] = rewriter.create(loc, offsets[axis], sizes[axis]); diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp @@ -14,7 +14,6 @@ #include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Math/IR/Math.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" @@ -34,15 +33,14 @@ public: void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); + StandardOpsDialect, tensor::TensorDialect>(); } void runOnFunction() override { RewritePatternSet patterns(&getContext()); ConversionTarget target(getContext()); - target.addLegalDialect(); + target.addLegalDialect(); target.addIllegalDialect(); // Not every TOSA op can be legalized to linalg. diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -166,7 +166,7 @@ Location loc = xferOp.getLoc(); ImplicitLocOpBuilder lb(xferOp.getLoc(), b); if (!xferOp.isDimInBounds(0) && !isBroadcast) { - Value memrefDim = lb.create(xferOp.source(), *dim); + Value memrefDim = createOrFoldDimOp(b, loc, xferOp.source(), *dim); AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); Value base = xferOp.indices()[dim.getValue()]; diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -58,13 +58,14 @@ if (value.isa()) return legalityCheck(mapping.lookup(value), dest); - // If it's a top-level value beacuse it's defined in the region, + // If it's a top-level value because it's defined in the region, // it can only be inlined if the defining op is a constant or a // `dim`, which can appear anywhere and be valid, since the defining // op won't be top-level anymore after inlining. Attribute operandCst; return matchPattern(value.getDefiningOp(), m_Constant(&operandCst)) || - value.getDefiningOp(); + value.getDefiningOp() || + value.getDefiningOp(); } /// Checks if all values known to be legal affine dimensions or symbols in `src` @@ -296,7 +297,9 @@ // The dim op is okay if its operand memref/tensor is defined at the top // level. if (auto dimOp = dyn_cast(op)) - return isTopLevelValue(dimOp.memrefOrTensor()); + return isTopLevelValue(dimOp.source()); + if (auto dimOp = dyn_cast(op)) + return isTopLevelValue(dimOp.source()); return false; } @@ -317,14 +320,15 @@ } /// Returns true if the result of the dim op is a valid symbol for `region`. -static bool isDimOpValidSymbol(memref::DimOp dimOp, Region *region) { - // The dim op is okay if its operand memref is defined at the top level. - if (isTopLevelValue(dimOp.memrefOrTensor())) +template +static bool isDimOpValidSymbol(OpTy dimOp, Region *region) { + // The dim op is okay if its source is defined at the top level. + if (isTopLevelValue(dimOp.source())) return true; // Conservatively handle remaining BlockArguments as non-valid symbols. // E.g. scf.for iterArgs. - if (dimOp.memrefOrTensor().isa()) + if (dimOp.source().template isa()) return false; // The dim op is also okay if its operand memref is a view/subview whose @@ -333,7 +337,7 @@ assert(index.hasValue() && "expect only `dim` operations with a constant index"); int64_t i = index.getValue(); - return TypeSwitch(dimOp.memrefOrTensor().getDefiningOp()) + return TypeSwitch(dimOp.source().getDefiningOp()) .Case( [&](auto op) { return isMemRefSizeValidSymbol(op, i, region); }) .Default([](Operation *) { return false; }); @@ -362,7 +366,7 @@ return false; } -/// A value can be used as a symbol for `region` iff it meets onf of the the +/// A value can be used as a symbol for `region` iff it meets one of the /// following conditions: /// *) It is a constant. /// *) It is the result of an affine apply operation with symbol arguments. @@ -405,6 +409,8 @@ // Dim op results could be valid symbols at any level. if (auto dimOp = dyn_cast(defOp)) return isDimOpValidSymbol(dimOp, region); + if (auto dimOp = dyn_cast(defOp)) + return isDimOpValidSymbol(dimOp, region); // Check for values dominating `region`'s parent op. Operation *regionOp = region ? region->getParentOp() : nullptr; diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp @@ -196,7 +196,7 @@ SmallVector res; for (OpOperand *opOperand : getInputAndOutputOperands()) { for (int64_t i = 0, e = getRank(opOperand); i < e; ++i) - res.push_back(b.createOrFold(loc, opOperand->get(), i)); + res.push_back(createOrFoldDimOp(b, loc, opOperand->get(), i)); } return res; } @@ -305,8 +305,7 @@ SmallVector shapes; for (int64_t dim : llvm::seq(0, getRank(opOperand))) { if (checkDimExpr.visit(shapeExprs[pos])) - shapes.push_back( - b.createOrFold(loc, opOperand->get(), dim)); + shapes.push_back(createOrFoldDimOp(b, loc, opOperand->get(), dim)); else shapes.push_back(allResultDimValues[pos]); pos++; diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -923,7 +923,7 @@ assert(rankedTensorType.hasStaticShape()); int rank = rankedTensorType.getRank(); for (int i = 0; i < rank; ++i) { - auto dimOp = builder.createOrFold(loc, source, i); + auto dimOp = builder.createOrFold(loc, source, i); auto resultDimSize = builder.createOrFold( loc, rankedTensorType.getDimSize(i)); auto highValue = builder.createOrFold(loc, resultDimSize, dimOp); @@ -943,7 +943,7 @@ for (auto dim : llvm::seq(0, getSourceType().getRank())) { // Shape along each dimension is source dim + low pad + high pad. SmallVector mapOperands; - mapOperands.push_back(b.createOrFold(loc, source(), dim)); + mapOperands.push_back(b.createOrFold(loc, source(), dim)); AffineExpr expr = b.getAffineDimExpr(0); unsigned numSymbols = 0; auto addOpFoldResult = [&](OpFoldResult valueOrAttr) { @@ -1543,7 +1543,7 @@ AffineExpr expr; SmallVector dynamicDims; for (auto dim : llvm::seq(startPos, endPos + 1)) { - dynamicDims.push_back(builder.createOrFold(loc, src, dim)); + dynamicDims.push_back(builder.createOrFold(loc, src, dim)); AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos); expr = (expr ? expr * currExpr : currExpr); } @@ -1612,7 +1612,7 @@ "dimensions"); linearizedStaticDim *= d.value(); } - Value sourceDim = builder.create(loc, src, sourceDimPos); + Value sourceDim = builder.create(loc, src, sourceDimPos); return applyMapToValues( builder, loc, AffineMap::get( diff --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp @@ -1136,8 +1136,7 @@ SmallVector dynShape; for (auto dim : enumerate(memRefType.getShape())) if (dim.value() == ShapedType::kDynamicSize) - dynShape.push_back( - b.create(loc, shapedValue, dim.index())); + dynShape.push_back(createOrFoldDimOp(b, loc, shapedValue, dim.index())); Value allocated = b.create(loc, allocMemRefType, dynShape); Value casted = allocated; @@ -1256,14 +1255,14 @@ /// DimOp tensor operand is modified inplace. This allows leaving dead /// tensors behind that will get DCE'd. -static LogicalResult bufferize(OpBuilder &b, memref::DimOp dimOp, +static LogicalResult bufferize(OpBuilder &b, tensor::DimOp dimOp, BlockAndValueMapping &bvm, const BufferizationAliasInfo &aliasInfo) { - if (dimOp.memrefOrTensor().getType().isa()) { - Value v = lookup(bvm, dimOp.memrefOrTensor()); + if (dimOp.source().getType().isa()) { + Value v = lookup(bvm, dimOp.source()); if (!v) return failure(); - dimOp.memrefOrTensorMutable().assign(v); + dimOp.sourceMutable().assign(v); } return success(); } @@ -1773,8 +1772,8 @@ .Case( [&](auto) { return success(); }) - .Case(loc, shapeDim.shape, - shapeDim.dimension); + Value dim = createOrFoldDimOp(b, loc, shapeDim.shape, shapeDim.dimension); tileSizes.push_back(zero); sizeBounds.push_back(dim); loopRanges.push_back(Range{zero, dim, one}); diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp @@ -1344,7 +1344,7 @@ for (auto dim : llvm::enumerate(operandType.getShape())) { if (dim.value() != ShapedType::kDynamicSize) continue; - dynamicDims.push_back(rewriter.createOrFold( + dynamicDims.push_back(rewriter.createOrFold( loc, operandVal, dim.index())); } Value initTensor = rewriter.create( diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -182,7 +182,7 @@ conds.push_back(leftOutOfBound); else conds.push_back(b.create(loc, conds.back(), leftOutOfBound)); - Value rightBound = b.create(loc, input, idx); + Value rightBound = createOrFoldDimOp(b, loc, input, idx); Value rightOutOfBound = b.create(loc, CmpIPredicate::sge, dim, rightBound); conds.push_back(b.create(loc, conds.back(), rightOutOfBound)); @@ -558,6 +558,7 @@ RewritePatternSet patterns(context); patterns.add>(context); memref::DimOp::getCanonicalizationPatterns(patterns, context); + tensor::DimOp::getCanonicalizationPatterns(patterns, context); AffineApplyOp::getCanonicalizationPatterns(patterns, context); patterns.add(context); // Just apply the patterns greedily. diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -206,7 +206,7 @@ SmallVector offsets(rank, rewriter.getIndexAttr(0)); auto sizes = llvm::to_vector<4>(llvm::map_range( llvm::seq(0, rank), [&](unsigned d) -> OpFoldResult { - auto dimOp = rewriter.create(loc, std::get<0>(it), d); + auto dimOp = rewriter.create(loc, std::get<0>(it), d); newUsersOfOpToPad.insert(dimOp); return dimOp.getResult(); })); @@ -788,8 +788,8 @@ auto low = asValue(rewriter, loc, padOp.getMixedLowPad()[dim]); auto offset = asValue(rewriter, loc, sliceOp.getMixedOffsets()[dim]); auto length = asValue(rewriter, loc, sliceOp.getMixedSizes()[dim]); - auto srcSize = rewriter.createOrFold( - loc, padOp.source(), dim); + auto srcSize = + rewriter.createOrFold(loc, padOp.source(), dim); // The new amount of low padding is `low - offset`. Except for the case // where none of the low padding is read. In that case, the new amount of diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -702,7 +702,7 @@ SmallVector staticSizes; for (unsigned dim = 0; dim < resultType.getRank(); ++dim) { if (resultType.isDynamicDim(dim)) { - auto srcSize = rewriter.createOrFold( + auto srcSize = rewriter.createOrFold( padOp.getLoc(), padOp.source(), dim); // Add low and high padding value. auto plusLow = rewriter.createOrFold( @@ -732,7 +732,7 @@ SmallVector srcSizes; for (unsigned dim = 0; dim < sourceType.getRank(); ++dim) { if (sourceType.isDynamicDim(dim)) { - srcSizes.push_back(rewriter.createOrFold( + srcSizes.push_back(rewriter.createOrFold( padOp.getLoc(), padOp.source(), dim)); } else { srcSizes.push_back(rewriter.getIndexAttr(sourceType.getDimSize(dim))); diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -176,8 +176,8 @@ .getResult(0) .dyn_cast()) boundingConst = cExpr.getValue(); - } else if (auto dimOp = size.getDefiningOp()) { - auto shape = dimOp.memrefOrTensor().getType().dyn_cast(); + } else if (auto dimOp = size.getDefiningOp()) { + auto shape = dimOp.source().getType().dyn_cast(); if (auto constOp = dimOp.index().getDefiningOp()) { if (auto indexAttr = constOp.value().dyn_cast()) { auto dimIndex = indexAttr.getInt(); @@ -576,7 +576,7 @@ LLVM_DEBUG(llvm::dbgs() << "makeTiledShapes: for dim#" << r); if (!isTiled(map.getSubMap({r}), tileSizes)) { offsets.push_back(b.getIndexAttr(0)); - Value dim = b.createOrFold(loc, shapedOp, r); + Value dim = createOrFoldDimOp(b, loc, shapedOp, r); sizes.push_back(dim); strides.push_back(b.getIndexAttr(1)); LLVM_DEBUG(llvm::dbgs() << ": not tiled: use size: " << dim << "\n"); @@ -613,7 +613,7 @@ AffineMap::inferFromExprList( ArrayRef>{{dim0, dim1 - dim2}}) .front(); - Value d = b.create(loc, shapedOp, r); + Value d = createOrFoldDimOp(b, loc, shapedOp, r); SmallVector operands{size, d, offset}; fullyComposeAffineMapAndOperands(&minMap, &operands); size = b.create(loc, b.getIndexType(), minMap, operands); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Transforms/InliningUtils.h" using namespace mlir; @@ -35,12 +36,23 @@ SmallVector dynOperands; auto shapedType = val.getType().cast(); for (auto dim : llvm::enumerate(shapedType.getShape())) { - if (dim.value() == MemRefType::kDynamicSize) - dynOperands.push_back(b.create(loc, val, dim.index())); + if (dim.value() == ShapedType::kDynamicSize) + dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index())); } return dynOperands; } +// Helper function that creates a memref::DimOp or tensor::DimOp depending on +// the type of `source`. +Value mlir::createOrFoldDimOp(OpBuilder &b, Location loc, Value source, + int64_t dim) { + if (source.getType().isa()) + return b.createOrFold(loc, source, dim); + if (source.getType().isa()) + return b.createOrFold(loc, source, dim); + llvm_unreachable("Expected MemRefType or TensorType"); +} + void mlir::memref::MemRefDialect::initialize() { addOperations(loc, index); - build(builder, result, memref, indexValue); + build(builder, result, source, indexValue); } -void DimOp::build(OpBuilder &builder, OperationState &result, Value memref, +void DimOp::build(OpBuilder &builder, OperationState &result, Value source, Value index) { auto indexTy = builder.getIndexType(); - build(builder, result, indexTy, memref, index); + build(builder, result, indexTy, source, index); } Optional DimOp::getConstantIndex() { @@ -626,14 +626,11 @@ return success(); // Check that constant index is not knowingly out of range. - auto type = op.memrefOrTensor().getType(); + auto type = op.source().getType(); if (auto memrefType = type.dyn_cast()) { if (index.getValue() >= memrefType.getRank()) return op.emitOpError("index is out of range"); - } else if (auto tensorType = type.dyn_cast()) { - if (index.getValue() >= tensorType.getRank()) - return op.emitOpError("index is out of range"); - } else if (type.isa() || type.isa()) { + } else if (type.isa()) { // Assume index to be in range. } else { llvm_unreachable("expected operand with memref type"); @@ -642,63 +639,27 @@ } OpFoldResult DimOp::fold(ArrayRef operands) { - auto index = operands[1].dyn_cast_or_null(); - // All forms of folding require a known index. + auto index = operands[1].dyn_cast_or_null(); if (!index) return {}; - auto argTy = memrefOrTensor().getType(); - // Fold if the shape extent along the given index is known. - if (auto shapedTy = argTy.dyn_cast()) { - // Folding for unranked types (UnrankedMemRefType) is not supported. - if (!shapedTy.hasRank()) - return {}; - if (!shapedTy.isDynamicDim(index.getInt())) { - Builder builder(getContext()); - return builder.getIndexAttr(shapedTy.getShape()[index.getInt()]); - } - } - - Operation *definingOp = memrefOrTensor().getDefiningOp(); - - // dim(memref.tensor_load(memref)) -> dim(memref) - if (auto tensorLoadOp = dyn_cast_or_null(definingOp)) { - setOperand(0, tensorLoadOp.memref()); - return getResult(); - } + // Folding for unranked types (UnrankedMemRefType) is not supported. + auto memrefType = source().getType().dyn_cast(); + if (!memrefType) + return {}; - // Fold dim to the operand of tensor.generate. - if (auto fromElements = dyn_cast_or_null(definingOp)) { - auto resultType = - fromElements.getResult().getType().cast(); - // The case where the type encodes the size of the dimension is handled - // above. - assert(resultType.getShape()[index.getInt()] == - RankedTensorType::kDynamicSize); - - // Find the operand of the fromElements that corresponds to this index. - auto dynExtents = fromElements.dynamicExtents().begin(); - for (auto dim : resultType.getShape().take_front(index.getInt())) - if (dim == RankedTensorType::kDynamicSize) - dynExtents++; - - return Value{*dynExtents}; + // Fold if the shape extent along the given index is known. + if (!memrefType.isDynamicDim(index.getInt())) { + Builder builder(getContext()); + return builder.getIndexAttr(memrefType.getShape()[index.getInt()]); } // The size at the given index is now known to be a dynamic size. unsigned unsignedIndex = index.getValue().getZExtValue(); - if (auto sliceOp = dyn_cast_or_null(definingOp)) { - assert(sliceOp.isDynamicSize(unsignedIndex) && - "Expected dynamic slice size"); - return sliceOp.getDynamicSize(unsignedIndex); - } - // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`. - auto memrefType = argTy.dyn_cast(); - if (!memrefType) - return {}; + Operation *definingOp = source().getDefiningOp(); if (auto alloc = dyn_cast_or_null(definingOp)) return *(alloc.getDynamicSizes().begin() + @@ -734,7 +695,7 @@ LogicalResult matchAndRewrite(DimOp dim, PatternRewriter &rewriter) const override { - auto reshape = dim.memrefOrTensor().getDefiningOp(); + auto reshape = dim.source().getDefiningOp(); if (!reshape) return failure(); @@ -751,18 +712,17 @@ } }; -/// Fold dim of a dim of a cast into the dim of the source of the tensor cast. -template +/// Fold dim of a cast into the dim of the source of the memref cast. struct DimOfCastOp : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(DimOp dimOp, PatternRewriter &rewriter) const override { - auto castOp = dimOp.memrefOrTensor().getDefiningOp(); + auto castOp = dimOp.source().getDefiningOp(); if (!castOp) return failure(); Value newSource = castOp.getOperand(); - rewriter.replaceOpWithNewOp(dimOp, newSource, dimOp.index()); + rewriter.replaceOpWithNewOp(dimOp, newSource, dimOp.index()); return success(); } }; @@ -770,8 +730,7 @@ void DimOp::getCanonicalizationPatterns(RewritePatternSet &results, MLIRContext *context) { - results.add, - DimOfCastOp>(context); + results.add(context); } // --------------------------------------------------------------------------- @@ -1954,6 +1913,28 @@ return {}; } +namespace { +struct DimOfTensorLoadFolder : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(tensor::DimOp dimOp, + PatternRewriter &rewriter) const override { + auto tensorLoadOp = dimOp.source().getDefiningOp(); + if (!tensorLoadOp) + return failure(); + + rewriter.replaceOpWithNewOp(dimOp, tensorLoadOp.memref(), + dimOp.index()); + return success(); + } +}; +} // namespace + +void TensorLoadOp::getCanonicalizationPatterns(RewritePatternSet &results, + MLIRContext *context) { + results.add(context); +} + //===----------------------------------------------------------------------===// // TransposeOp //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp @@ -69,12 +69,13 @@ namespace { /// Fold dim of an operation that implements the InferShapedTypeOpInterface -struct DimOfShapedTypeOpInterface : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +template +struct DimOfShapedTypeOpInterface : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(memref::DimOp dimOp, + LogicalResult matchAndRewrite(OpTy dimOp, PatternRewriter &rewriter) const override { - OpResult dimValue = dimOp.memrefOrTensor().dyn_cast(); + OpResult dimValue = dimOp.source().template dyn_cast(); if (!dimValue) return failure(); auto shapedTypeOp = @@ -111,7 +112,10 @@ void memref::populateResolveShapedTypeResultDimsPatterns( RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); + // TODO: Move tensor::DimOp pattern to the Tensor dialect. + patterns.add, + DimOfShapedTypeOpInterface>( + patterns.getContext()); } void ResolveShapedTypeResultDimsPass::runOnOperation() { diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -99,11 +99,11 @@ /// Sparse conversion rule for dimension accesses. class SparseTensorToDimSizeConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(memref::DimOp op, ArrayRef operands, + matchAndRewrite(tensor::DimOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { if (!operands[0].getType().isa()) return failure(); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -377,7 +377,7 @@ // Find lower and upper bound in current dimension. Value up; if (shape[d] == MemRefType::kDynamicSize) { - up = rewriter.create(loc, t->get(), d); + up = createOrFoldDimOp(rewriter, loc, t->get(), d); args.push_back(up); } else { up = rewriter.create(loc, shape[d]); diff --git a/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp b/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp @@ -16,20 +16,21 @@ #include "mlir/Dialect/SCF/SCF.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/StandardOps/Transforms/Passes.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/BlockAndValueMapping.h" #include "mlir/Transforms/DialectConversion.h" using namespace mlir; namespace { -class BufferizeDimOp : public OpConversionPattern { +class BufferizeDimOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(memref::DimOp op, ArrayRef operands, + matchAndRewrite(tensor::DimOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { - memref::DimOp::Adaptor adaptor(operands); - rewriter.replaceOpWithNewOp(op, adaptor.memrefOrTensor(), + tensor::DimOp::Adaptor adaptor(operands); + rewriter.replaceOpWithNewOp(op, adaptor.source(), adaptor.index()); return success(); } @@ -94,8 +95,6 @@ return typeConverter.isLegal(op.getType()) || !op.condition().getType().isa(); }); - target.addDynamicallyLegalOp( - [&](memref::DimOp op) { return typeConverter.isLegal(op); }); if (failed( applyPartialConversion(getFunction(), target, std::move(patterns)))) signalPassFailure(); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -19,6 +19,14 @@ using namespace mlir; using namespace mlir::tensor; +/// Materialize a single constant operation from a given attribute value with +/// the desired resultant type. +Operation *TensorDialect::materializeConstant(OpBuilder &builder, + Attribute value, Type type, + Location loc) { + return builder.create(loc, type, value); +} + //===----------------------------------------------------------------------===// // CastOp //===----------------------------------------------------------------------===// @@ -184,6 +192,123 @@ results.add(context); } +//===----------------------------------------------------------------------===// +// DimOp +//===----------------------------------------------------------------------===// + +void DimOp::build(OpBuilder &builder, OperationState &result, Value source, + int64_t index) { + auto loc = result.location; + Value indexValue = builder.create(loc, index); + build(builder, result, source, indexValue); +} + +void DimOp::build(OpBuilder &builder, OperationState &result, Value source, + Value index) { + auto indexTy = builder.getIndexType(); + build(builder, result, indexTy, source, index); +} + +Optional DimOp::getConstantIndex() { + if (auto constantOp = index().getDefiningOp()) + return constantOp.getValue().cast().getInt(); + return {}; +} + +static LogicalResult verify(DimOp op) { + // Assume unknown index to be in range. + Optional index = op.getConstantIndex(); + if (!index.hasValue()) + return success(); + + // Check that constant index is not knowingly out of range. + auto type = op.source().getType(); + if (auto tensorType = type.dyn_cast()) { + if (index.getValue() >= tensorType.getRank()) + return op.emitOpError("index is out of range"); + } else if (type.isa()) { + // Assume index to be in range. + } else { + llvm_unreachable("expected operand with tensor type"); + } + return success(); +} + +OpFoldResult DimOp::fold(ArrayRef operands) { + // All forms of folding require a known index. + auto index = operands[1].dyn_cast_or_null(); + if (!index) + return {}; + + // Folding for unranked types (UnrankedTensorType) is not supported. + auto tensorType = source().getType().dyn_cast(); + if (!tensorType) + return {}; + + // Fold if the shape extent along the given index is known. + if (!tensorType.isDynamicDim(index.getInt())) { + Builder builder(getContext()); + return builder.getIndexAttr(tensorType.getShape()[index.getInt()]); + } + + Operation *definingOp = source().getDefiningOp(); + + // Fold dim to the operand of tensor.generate. + if (auto fromElements = dyn_cast_or_null(definingOp)) { + auto resultType = + fromElements.getResult().getType().cast(); + // The case where the type encodes the size of the dimension is handled + // above. + assert(resultType.getShape()[index.getInt()] == + RankedTensorType::kDynamicSize); + + // Find the operand of the fromElements that corresponds to this index. + auto dynExtents = fromElements.dynamicExtents().begin(); + for (auto dim : resultType.getShape().take_front(index.getInt())) + if (dim == RankedTensorType::kDynamicSize) + dynExtents++; + + return Value{*dynExtents}; + } + + // The size at the given index is now known to be a dynamic size. + unsigned unsignedIndex = index.getValue().getZExtValue(); + + if (auto sliceOp = dyn_cast_or_null(definingOp)) { + assert(sliceOp.isDynamicSize(unsignedIndex) && + "Expected dynamic slice size"); + return sliceOp.getDynamicSize(unsignedIndex); + } + + // dim(cast) -> dim + if (succeeded(foldTensorCast(*this))) + return getResult(); + + return {}; +} + +namespace { +/// Fold dim of a cast into the dim of the source of the tensor cast. +struct DimOfCastOp : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(DimOp dimOp, + PatternRewriter &rewriter) const override { + auto castOp = dimOp.source().getDefiningOp(); + if (!castOp) + return failure(); + Value newSource = castOp.getOperand(); + rewriter.replaceOpWithNewOp(dimOp, newSource, dimOp.index()); + return success(); + } +}; +} // end anonymous namespace. + +void DimOp::getCanonicalizationPatterns(RewritePatternSet &results, + MLIRContext *context) { + results.add(context); +} + //===----------------------------------------------------------------------===// // ExtractOp //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp @@ -74,6 +74,33 @@ }; } // namespace +namespace { +class BufferizeConstantTensor : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + LogicalResult + matchAndRewrite(ConstantOp op, ArrayRef operands, + ConversionPatternRewriter &rewriter) const override { + if (!op.getType().isa()) + return failure(); + auto elemType = op.getType().cast().getElementType(); + auto elements = op.value().cast(); + int numberOfElements = elements.getNumElements(); + auto resultType = MemRefType::get({numberOfElements}, elemType); + Value result = rewriter.create(op.getLoc(), resultType); + for (auto element : llvm::enumerate(elements.getAttributeValues())) { + Value index = + rewriter.create(op.getLoc(), element.index()); + Value val = + rewriter.create(op.getLoc(), element.value(), elemType); + rewriter.create(op.getLoc(), val, result, index); + } + rewriter.replaceOp(op, {result}); + return success(); + } +}; +} // namespace + namespace { class BufferizeGenerateOp : public OpConversionPattern { public: @@ -154,10 +181,16 @@ populateBufferizeMaterializationLegality(target); populateTensorBufferizePatterns(typeConverter, patterns); + // Cannot add BufferizeConstantTensor in populateTensorBufferizePatterns + // because some users of populateTensorBufferizePatterns define their own + // ConstantOp lowering. + patterns.add(typeConverter, patterns.getContext()); + target.addIllegalOp(); target.addLegalDialect(); - target.addLegalDialect(); + target.addDynamicallyLegalDialect( + [&](Operation *op) { return typeConverter.isLegal(op); }); target.addLegalDialect(); if (failed( diff --git a/mlir/lib/Dialect/Tensor/Transforms/PassDetail.h b/mlir/lib/Dialect/Tensor/Transforms/PassDetail.h --- a/mlir/lib/Dialect/Tensor/Transforms/PassDetail.h +++ b/mlir/lib/Dialect/Tensor/Transforms/PassDetail.h @@ -13,6 +13,10 @@ namespace mlir { +namespace memref { +class MemRefDialect; +} // end namespace memref + namespace scf { class SCFDialect; } // end namespace scf diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -2300,7 +2300,7 @@ Value sum = makeComposedAffineApply(b, loc, d0 + vs, xferOp.indices()[indicesIdx]); Value cond = createFoldedSLE( - b, sum, lb.create(xferOp.source(), indicesIdx)); + b, sum, createOrFoldDimOp(b, loc, xferOp.source(), indicesIdx)); if (!cond) return; // Conjunction over all dims for which we are in-bounds. @@ -2385,7 +2385,8 @@ auto isaWrite = isa(xferOp); xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) { using MapList = ArrayRef>; - Value dimMemRef = lb.create(xferOp.source(), indicesIdx); + Value dimMemRef = + createOrFoldDimOp(b, xferOp.getLoc(), xferOp.source(), indicesIdx); Value dimAlloc = lb.create(alloc, resultIdx); Value index = xferOp.indices()[indicesIdx]; AffineExpr i, j, k; @@ -3857,7 +3858,7 @@ unsigned vecWidth = vtp.getNumElements(); unsigned lastIndex = llvm::size(xferOp.indices()) - 1; Value off = xferOp.indices()[lastIndex]; - Value dim = rewriter.create(loc, xferOp.source(), lastIndex); + Value dim = createOrFoldDimOp(rewriter, loc, xferOp.source(), lastIndex); Value mask = buildVectorComparison( rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off); diff --git a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir --- a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir +++ b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir @@ -31,7 +31,7 @@ // CHECK-SAME: (%[[SHAPE:.*]]: tensor) -> index func @rank(%shape : tensor) -> index { // CHECK: %[[C0:.*]] = constant 0 : index - // CHECK: %[[RESULT:.*]] = memref.dim %[[SHAPE]], %[[C0]] + // CHECK: %[[RESULT:.*]] = tensor.dim %[[SHAPE]], %[[C0]] // CHECK: return %[[RESULT]] : index %rank = shape.rank %shape : tensor -> index return %rank : index @@ -60,12 +60,12 @@ // ----- -// Express `get_extent` as `memref.dim` when it relies directly on the outcome of a +// Express `get_extent` as `tensor.dim` when it relies directly on the outcome of a // `shape_of` operation. // CHECK-LABEL: @get_extent_shape_of // CHECK-SAME: (%[[ARG:.*]]: tensor<2x3xf32>, %[[IDX:.*]]: index) -> index func @get_extent_shape_of(%arg : tensor<2x3xf32>, %idx : index) -> index { - // CHECK: %[[RESULT:.*]] = memref.dim %[[ARG]], %[[IDX]] : tensor<2x3xf32> + // CHECK: %[[RESULT:.*]] = tensor.dim %[[ARG]], %[[IDX]] : tensor<2x3xf32> // CHECK: return %[[RESULT]] : index %shape = shape.shape_of %arg : tensor<2x3xf32> -> tensor %result = shape.get_extent %shape, %idx : tensor, index -> index @@ -178,7 +178,7 @@ // CHECK-NEXT: %[[INIT:.*]] = constant 1 : index // CHECK-NEXT: %[[C0:.*]] = constant 0 : index // CHECK-NEXT: %[[C1:.*]] = constant 1 : index -// CHECK-NEXT: %[[RANK:.*]] = memref.dim %[[SHAPE]], %[[C0]] : tensor +// CHECK-NEXT: %[[RANK:.*]] = tensor.dim %[[SHAPE]], %[[C0]] : tensor // CHECK-NEXT: %[[RESULT:.*]] = scf.for %[[I:.*]] = %[[C0]] to %[[RANK]] step %[[C1]] iter_args(%[[ACC:.*]] = %[[INIT]]) -> (index) // CHECK-NEXT: %[[EXTENT:.*]] = tensor.extract %[[SHAPE]][%[[I]]] // CHECK-NEXT: %[[NEW_ACC:.*]] = muli %[[ACC]], %[[EXTENT]] : index @@ -206,7 +206,7 @@ // CHECK: %[[RANK:.*]] = rank %[[ARG]] : tensor<*xf32> // CHECK: %[[SHAPE:.*]] = tensor.generate %[[RANK]] { // CHECK: ^bb0(%[[I:.*]]: index): - // CHECK: %[[EXTENT:.*]] = memref.dim %[[ARG]], %[[I]] : tensor<*xf32> + // CHECK: %[[EXTENT:.*]] = tensor.dim %[[ARG]], %[[I]] : tensor<*xf32> // CHECK: yield %[[EXTENT]] : index // CHECK: } : tensor %shape = shape.shape_of %arg : tensor<*xf32> -> tensor @@ -258,7 +258,7 @@ // CHECK-DAG: %[[C1:.*]] = constant 1 : index // CHECK-DAG: %[[C5:.*]] = constant 5 : index // CHECK-DAG: %[[C2:.*]] = constant 2 : index - // CHECK-DAG: %[[DYN_DIM:.*]] = memref.dim %[[ARG]], %[[C2]] : tensor<1x5x?xf32> + // CHECK-DAG: %[[DYN_DIM:.*]] = tensor.dim %[[ARG]], %[[C2]] : tensor<1x5x?xf32> // CHECK-DAG: %[[SHAPE_UNCASTED:.*]] = tensor.from_elements %[[C1]], %[[C5]], %[[DYN_DIM]] : tensor<3xindex> %shape = shape.shape_of %arg : tensor<1x5x?xf32> -> tensor return @@ -270,8 +270,8 @@ // CHECK-SAME: (%[[A:.*]]: tensor, %[[B:.*]]: tensor) -> i1 func @shape_eq(%a : tensor, %b : tensor) -> i1 { // CHECK: %[[C0:.*]] = constant 0 : index - // CHECK: %[[RANK_A:.*]] = memref.dim %[[A]], %[[C0]] : tensor - // CHECK: %[[RANK_B:.*]] = memref.dim %[[B]], %[[C0]] : tensor + // CHECK: %[[RANK_A:.*]] = tensor.dim %[[A]], %[[C0]] : tensor + // CHECK: %[[RANK_B:.*]] = tensor.dim %[[B]], %[[C0]] : tensor // CHECK: %[[RANK_EQ:.*]] = cmpi eq, %[[RANK_A]], %[[RANK_B]] // CHECK: %[[SHAPE_EQ:.*]] = scf.if %[[RANK_EQ]] -> (i1) { // CHECK: %[[C1:.*]] = constant 1 : index @@ -299,8 +299,8 @@ // CHECK-SAME: (%[[A:.*]]: tensor, %[[B:.*]]: tensor, %[[C:.*]]: tensor) -> i1 func @shape_eq(%a : tensor, %b : tensor, %c : tensor) -> i1 { // CHECK: %[[C0:.*]] = constant 0 : index - // CHECK: %[[RANK_A:.*]] = memref.dim %[[A]], %[[C0]] : tensor - // CHECK: %[[RANK_B:.*]] = memref.dim %[[B]], %[[C0]] : tensor + // CHECK: %[[RANK_A:.*]] = tensor.dim %[[A]], %[[C0]] : tensor + // CHECK: %[[RANK_B:.*]] = tensor.dim %[[B]], %[[C0]] : tensor // CHECK: %[[RANK_EQ:.*]] = cmpi eq, %[[RANK_A]], %[[RANK_B]] // CHECK: %[[SHAPE_EQ:.*]] = scf.if %[[RANK_EQ]] -> (i1) { // CHECK: %[[C1:.*]] = constant 1 : index @@ -317,7 +317,7 @@ // CHECK: %[[SHAPE_EQ_INNER:.*]] = constant false // CHECK: scf.yield %[[SHAPE_EQ_INNER]] : i1 // CHECK: } - // CHECK: %[[RANK_C:.*]] = memref.dim %[[C]], %[[C0]] : tensor + // CHECK: %[[RANK_C:.*]] = tensor.dim %[[C]], %[[C0]] : tensor // CHECK: %[[RANK_EQ:.*]] = cmpi eq, %[[RANK_A]], %[[RANK_C]] // CHECK: %[[SHAPE_EQ2:.*]] = scf.if %[[RANK_EQ]] -> (i1) { // CHECK: %[[C1:.*]] = constant 1 : index @@ -362,9 +362,9 @@ // CHECK-SAME: %[[ARG2:.*]]: tensor<2xindex>) // CHECK: %[[C0:.*]] = constant 0 : index // CHECK: %[[C1:.*]] = constant 1 : index -// CHECK: %[[RANK0:.*]] = memref.dim %[[ARG0]], %[[C0]] : tensor<2xindex> -// CHECK: %[[RANK1:.*]] = memref.dim %[[ARG1]], %[[C0]] : tensor<3xindex> -// CHECK: %[[RANK2:.*]] = memref.dim %[[ARG2]], %[[C0]] : tensor<2xindex> +// CHECK: %[[RANK0:.*]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<2xindex> +// CHECK: %[[RANK1:.*]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<3xindex> +// CHECK: %[[RANK2:.*]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<2xindex> // CHECK: %[[CMP0:.*]] = cmpi ugt, %[[RANK1]], %[[RANK0]] : index // CHECK: %[[LARGER_DIM:.*]] = select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index // CHECK: %[[CMP1:.*]] = cmpi ugt, %[[RANK2]], %[[LARGER_DIM]] : index @@ -452,9 +452,9 @@ // CHECK-SAME: %[[ARG2:.*]]: tensor<2xindex>) // CHECK: %[[C0:.*]] = constant 0 : index // CHECK: %[[C1:.*]] = constant 1 : index -// CHECK: %[[RANK0:.*]] = memref.dim %[[ARG0]], %[[C0]] : tensor<2xindex> -// CHECK: %[[RANK1:.*]] = memref.dim %[[ARG1]], %[[C0]] : tensor<3xindex> -// CHECK: %[[RANK2:.*]] = memref.dim %[[ARG2]], %[[C0]] : tensor<2xindex> +// CHECK: %[[RANK0:.*]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<2xindex> +// CHECK: %[[RANK1:.*]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<3xindex> +// CHECK: %[[RANK2:.*]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<2xindex> // CHECK: %[[CMP0:.*]] = cmpi ugt, %[[RANK1]], %[[RANK0]] : index // CHECK: %[[LARGER_DIM:.*]] = select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index // CHECK: %[[CMP1:.*]] = cmpi ugt, %[[RANK2]], %[[LARGER_DIM]] : index @@ -544,9 +544,9 @@ // CHECK-SAME: %[[ARG1:.*]]: tensor<3xindex>, // CHECK-SAME: %[[ARG2:.*]]: tensor<2xindex>) { // CHECK: %[[C0:.*]] = constant 0 : index -// CHECK: %[[RANK0:.*]] = memref.dim %[[ARG0]], %[[C0]] : tensor<2xindex> -// CHECK: %[[RANK1:.*]] = memref.dim %[[ARG1]], %[[C0]] : tensor<3xindex> -// CHECK: %[[RANK2:.*]] = memref.dim %[[ARG2]], %[[C0]] : tensor<2xindex> +// CHECK: %[[RANK0:.*]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<2xindex> +// CHECK: %[[RANK1:.*]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<3xindex> +// CHECK: %[[RANK2:.*]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<2xindex> // CHECK: %[[CMP0:.*]] = cmpi ugt, %[[RANK1]], %[[RANK0]] : index // CHECK: %[[LARGER_DIM:.*]] = select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index // CHECK: %[[CMP1:.*]] = cmpi ugt, %[[RANK2]], %[[LARGER_DIM]] : index @@ -611,7 +611,7 @@ // CHECK-SAME: %[[SHAPE:.*]]: tensor, %[[INDEX:.*]]: index func @split_at(%shape: tensor, %index: index) -> (tensor, tensor) { // CHECK-NEXT: %[[C0:.*]] = constant 0 : index - // CHECK-NEXT: %[[RANK:.*]] = memref.dim %[[SHAPE]], %[[C0]] : tensor + // CHECK-NEXT: %[[RANK:.*]] = tensor.dim %[[SHAPE]], %[[C0]] : tensor // CHECK-NEXT: %[[POSINDEX:.*]] = addi %[[INDEX]], %[[RANK]] : index // CHECK-NEXT: %[[ISNEG:.*]] = cmpi slt, %[[INDEX]], %[[C0]] : index // CHECK-NEXT: %[[SELECT:.*]] = select %[[ISNEG]], %[[POSINDEX]], %[[INDEX]] : index diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -670,18 +670,18 @@ // CHECK: [[STRIDE:%.+]] = constant 1 // CHECK: [[OFFSET:%.+]] = constant 0 : index // CHECK: [[IDX0:%.+]] = constant 0 : index - // CHECK: [[ARG0_DIM0:%.+]] = memref.dim %arg0, [[IDX0]] + // CHECK: [[ARG0_DIM0:%.+]] = tensor.dim %arg0, [[IDX0]] // CHECK: [[IDX1:%.+]] = constant 1 : index - // CHECK: [[ARG0_DIM1:%.+]] = memref.dim %arg0, [[IDX1]] - // CHECK: [[ARG1_AXIS:%.+]] = memref.dim %arg1, [[AXIS]] + // CHECK: [[ARG0_DIM1:%.+]] = tensor.dim %arg0, [[IDX1]] + // CHECK: [[ARG1_AXIS:%.+]] = tensor.dim %arg1, [[AXIS]] // CHECK: [[RESULT_AXIS:%.+]] = addi [[ARG0_DIM0]], [[ARG1_AXIS]] // CHECK: [[INIT:%.+]] = linalg.init_tensor [11, 1] // CHECK: [[CST:%.+]] = constant 0.0 // CHECK: [[FILL:%.+]] = linalg.fill([[CST]], [[INIT]]) - // CHECK: [[ARG0_DIM0:%.+]] = memref.dim %arg0, [[AXIS]] + // CHECK: [[ARG0_DIM0:%.+]] = tensor.dim %arg0, [[AXIS]] // CHECK: [[INSERT0:%.+]] = tensor.insert_slice %arg0 into [[FILL]]{{\[}}[[OFFSET]], [[OFFSET]]] {{\[}}[[ARG0_DIM0]], [[ARG0_DIM1]]] {{\[}}[[STRIDE]], [[STRIDE]]] // CHECK: [[NEW_OFFSET:%.+]] = addi [[OFFSET]], [[ARG0_DIM0]] - // CHECK: [[ARG1_DIM0:%.+]] = memref.dim %arg1, [[AXIS]] + // CHECK: [[ARG1_DIM0:%.+]] = tensor.dim %arg1, [[AXIS]] // CHECK: [[INSERT1:%.+]] = tensor.insert_slice %arg1 into [[INSERT0]]{{\[}}[[NEW_OFFSET]], [[OFFSET]]] {{\[}}[[ARG1_DIM0]], [[ARG0_DIM1]]] {{\[}}[[STRIDE]], [[STRIDE]]] %0 = "tosa.concat"(%arg0, %arg1) { axis = 0 : i64} : (tensor<5x1xf32>, tensor<6x1xf32>) -> (tensor<11x1xf32>) @@ -689,18 +689,18 @@ // CHECK: [[STRIDE:%.+]] = constant 1 // CHECK: [[OFFSET:%.+]] = constant 0 : index // CHECK: [[IDX0:%.+]] = constant 0 : index - // CHECK: [[ARG0_DIM0:%.+]] = memref.dim %arg0, [[IDX0]] + // CHECK: [[ARG0_DIM0:%.+]] = tensor.dim %arg0, [[IDX0]] // CHECK: [[IDX1:%.+]] = constant 1 : index - // CHECK: [[ARG0_DIM1:%.+]] = memref.dim %arg0, [[IDX1]] - // CHECK: [[ARG1_AXIS:%.+]] = memref.dim %arg0, [[AXIS]] + // CHECK: [[ARG0_DIM1:%.+]] = tensor.dim %arg0, [[IDX1]] + // CHECK: [[ARG1_AXIS:%.+]] = tensor.dim %arg0, [[AXIS]] // CHECK: [[RESULT_AXIS:%.+]] = addi [[ARG0_DIM1]], [[ARG1_AXIS]] // CHECK: [[INIT:%.+]] = linalg.init_tensor [5, 2] // CHECK: [[CST:%.+]] = constant 0.0 // CHECK: [[FILL:%.+]] = linalg.fill([[CST]], [[INIT]]) - // CHECK: [[ARG0_DIM1:%.+]] = memref.dim %arg0, [[AXIS]] + // CHECK: [[ARG0_DIM1:%.+]] = tensor.dim %arg0, [[AXIS]] // CHECK: [[INSERT0:%.+]] = tensor.insert_slice %arg0 into [[FILL]]{{\[}}[[OFFSET]], [[OFFSET]]] {{\[}}[[ARG0_DIM0]], [[ARG0_DIM1]]] {{\[}}[[STRIDE]], [[STRIDE]]] // CHECK: [[NEW_OFFSET:%.+]] = addi [[OFFSET]], [[ARG0_DIM1]] - // CHECK: [[ARG1_DIM1:%.+]] = memref.dim %arg0, [[AXIS]] + // CHECK: [[ARG1_DIM1:%.+]] = tensor.dim %arg0, [[AXIS]] // CHECK: [[INSERT1:%.+]] = tensor.insert_slice %arg0 into [[INSERT0]]{{\[}}[[OFFSET]], [[NEW_OFFSET]]] {{\[}}[[ARG0_DIM0]], [[ARG1_DIM1]]] {{\[}}[[STRIDE]], [[STRIDE]]] %1 = "tosa.concat"(%arg0, %arg0) { axis = 1 : i64} : (tensor<5x1xf32>, tensor<5x1xf32>) -> (tensor<5x2xf32>) return @@ -878,20 +878,13 @@ func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) { %0 = constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - // CHECK: [[INDEX0:%.+]] = constant 0 : index + // TODO: Output contains multiple "constant 1 : index". // CHECK: [[INDEX1:%.+]] = constant 1 : index - // CHECK: [[ROW0:%.+]] = constant 0 : index - // CHECK: [[LOW0:%.+]] = tensor.extract %cst{{\[}}[[ROW0]], [[INDEX0]]] - // CHECK: [[HIGH0:%.+]] = tensor.extract %cst{{\[}}[[ROW0]], [[INDEX1]]] - // CHECK: [[LOW0_IDX:%.+]] = index_cast %0 - // CHECK: [[HIGH0_IDX:%.+]] = index_cast %1 - // CHECK: [[ROW1:%.+]] = constant 1 : index - // CHECK: [[LOW1:%.+]] = tensor.extract %cst{{\[}}%c1_1, %c0] - // CHECK: [[HIGH1:%.+]] = tensor.extract %cst{{\[}}%c1_1, %c1] - // CHECK: [[LOW1_IDX:%.+]] = index_cast [[LOW1]] - // CHECK: [[HIGH1_IDX:%.+]] = index_cast [[HIGH1]] + // CHECK: [[INDEX2:%.+]] = constant 2 : index + // CHECK: [[INDEX3:%.+]] = constant 3 : index + // CHECK: [[INDEX4:%.+]] = constant 4 : index // CHECK: [[CST:%.+]] = constant 0.000000e+00 : f32 - // CHECK: %8 = linalg.pad_tensor %arg0 low{{\[}}[[LOW0_IDX]], [[LOW1_IDX]]] high{{\[}}[[HIGH0_IDX]], [[HIGH1_IDX]]] { + // CHECK: linalg.pad_tensor %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: ^bb0(%arg1: index, %arg2: index): // no predecessors // CHECK: linalg.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor<4x9xf32> diff --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir --- a/mlir/test/Dialect/Linalg/bufferize.mlir +++ b/mlir/test/Dialect/Linalg/bufferize.mlir @@ -101,8 +101,8 @@ // CHECK-DAG: %[[C0:.*]] = constant 0 : index // CHECK-DAG: %[[C1:.*]] = constant 1 : index // CHECK: %[[MEMREF_ARG:.*]] = memref.buffer_cast %[[ARG]] : memref -// CHECK: %[[DIM0:.*]] = memref.dim %[[ARG]], %[[C0]] : tensor -// CHECK: %[[DIM1:.*]] = memref.dim %[[ARG]], %[[C1]] : tensor +// CHECK: %[[DIM0:.*]] = tensor.dim %[[ARG]], %[[C0]] : tensor +// CHECK: %[[DIM1:.*]] = tensor.dim %[[ARG]], %[[C1]] : tensor // CHECK: %[[RESULT0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref // CHECK: %[[RESULT1:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref // CHECK: linalg.generic @@ -214,8 +214,8 @@ // CHECK-DAG: %[[M:.*]] = memref.buffer_cast %[[T]] : memref // CHECK-DAG: %[[SM0:.*]] = memref.buffer_cast %[[ST0]] : memref<2x3xf32> - // CHECK-NEXT: %[[DIM0:.*]] = memref.dim %[[T]], %[[C0]] : tensor - // CHECK-NEXT: %[[DIM1:.*]] = memref.dim %[[T]], %[[C1]] : tensor + // CHECK-NEXT: %[[DIM0:.*]] = tensor.dim %[[T]], %[[C0]] : tensor + // CHECK-NEXT: %[[DIM1:.*]] = tensor.dim %[[T]], %[[C1]] : tensor // CHECK-NEXT: %[[M_COPY0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref // CHECK-NEXT: linalg.copy(%[[M]], %[[M_COPY0]]) : memref, memref // CHECK-NEXT: %[[SUBVIEW0:.*]] = memref.subview %[[M_COPY0]][0, 0] [2, 3] [1, 1] diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -543,7 +543,7 @@ // CHECK-SAME: %[[ARG0:.+]]: index // CHECK: %[[C2:.+]] = constant 2 // CHECK: %[[INIT1:.+]] = linalg.init_tensor [6, 5, %[[ARG0]]] -// CHECK: %[[D0:.+]] = memref.dim %[[INIT1]], %[[C2]] +// CHECK: %[[D0:.+]] = tensor.dim %[[INIT1]], %[[C2]] // CHECK: %[[T0:.+]] = affine.apply #[[MAP]]()[%[[D0]]] // CHECK: %[[INIT2:.+]] = linalg.init_tensor [2, 3, 5, 4, %[[T0]], 7] // CHECK: return %[[INIT2]] @@ -561,7 +561,7 @@ // CHECK-SAME: %[[ARG0:.+]]: index // CHECK: %[[C4:.+]] = constant 4 // CHECK: %[[INIT1:.+]] = linalg.init_tensor [2, 3, 5, 4, %[[ARG0]], 7] -// CHECK: %[[D0:.+]] = memref.dim %[[INIT1]], %[[C4]] +// CHECK: %[[D0:.+]] = tensor.dim %[[INIT1]], %[[C4]] // CHECK: %[[T0:.+]] = affine.apply #[[MAP]]()[%[[D0]]] // CHECK: %[[INIT2:.+]] = linalg.init_tensor [6, 5, %[[T0]]] // CHECK: return %[[INIT2]] @@ -574,9 +574,9 @@ %c0 = constant 0 : index %c1 = constant 1 : index %c2 = constant 2 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor - %2 = memref.dim %arg0, %c2 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor + %2 = tensor.dim %arg0, %c2 : tensor %3 = linalg.init_tensor [%0, %1, %2] : tensor %4, %5 = linalg.generic { indexing_maps = [#map, #map, #map, #map], @@ -600,8 +600,8 @@ %c0 = constant 0 : index %c1 = constant 1 : index %cst = constant 1.000000e+00 : f32 - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor br ^bb1(%cst : f32) @@ -626,8 +626,8 @@ %c0 = constant 0 : index %c1 = constant 1 : index %cst = constant 1.000000e+00 : f32 - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor br ^bb1(%cst : f32) @@ -721,8 +721,8 @@ %c42 = constant 42 : index %0 = linalg.init_tensor [%c21, %c42] : tensor %1 = linalg.fill(%arg1, %0) : f32, tensor -> tensor - %2 = memref.dim %arg0, %c0 : tensor - %3 = memref.dim %arg0, %c1 : tensor + %2 = tensor.dim %arg0, %c0 : tensor + %3 = tensor.dim %arg0, %c1 : tensor %4 = tensor.insert_slice %arg0 into %1[%arg2, %arg3] [%2, %3] [1, 1] : tensor into tensor return %4 : tensor } diff --git a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir --- a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir +++ b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir @@ -93,11 +93,11 @@ // CHECK-SAME: %[[ARG1:[0-9a-zA-Z]*]]: tensor<4x?x?x8x2x?xf32> func @cmpf(%arg0: tensor<4x?x?x8x2x?xf32>, %arg1: tensor<4x?x?x8x2x?xf32>) -> tensor<4x?x?x8x2x?xi1> { // CHECK: %[[C1:.*]] = constant 1 : index - // CHECK: %[[D1:.*]] = memref.dim %[[ARG0]], %[[C1]] : tensor<4x?x?x8x2x?xf32> + // CHECK: %[[D1:.*]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<4x?x?x8x2x?xf32> // CHECK: %[[C2:.*]] = constant 2 : index - // CHECK: %[[D2:.*]] = memref.dim %[[ARG0]], %[[C2]] : tensor<4x?x?x8x2x?xf32> + // CHECK: %[[D2:.*]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<4x?x?x8x2x?xf32> // CHECK: %[[C5:.*]] = constant 5 : index - // CHECK: %[[D5:.*]] = memref.dim %[[ARG0]], %[[C5]] : tensor<4x?x?x8x2x?xf32> + // CHECK: %[[D5:.*]] = tensor.dim %[[ARG0]], %[[C5]] : tensor<4x?x?x8x2x?xf32> // CHECK: %[[INIT:.*]] = linalg.init_tensor [4, %[[D1]], %[[D2]], 8, 2, %[[D5]]] : tensor<4x?x?x8x2x?xi1> // CHECK: linalg.generic // CHECK-SAME: ins(%[[ARG0]], %[[ARG1]] diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir --- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir +++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir @@ -329,7 +329,7 @@ func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> { %cst = constant 1.000000e+00 : f32 %c3 = constant 3 : index - %0 = memref.dim %arg0, %c3 : tensor<1x?x1x?xf32> + %0 = tensor.dim %arg0, %c3 : tensor<1x?x1x?xf32> %1 = linalg.init_tensor [1, %0] : tensor<1x?xf32> %2 = linalg.fill(%cst, %1) : f32, tensor<1x?xf32> -> tensor<1x?xf32> %3 = linalg.generic { @@ -398,7 +398,7 @@ func @unit_dim_for_reduction_inner(%arg0: tensor) -> tensor { %cst = constant 1.000000e+00 : f32 %c2 = constant 2 : index - %0 = memref.dim %arg0, %c2 : tensor + %0 = tensor.dim %arg0, %c2 : tensor %1 = linalg.init_tensor [%0, 1] : tensor %2 = linalg.fill(%cst, %1) : f32, tensor -> tensor %3 = linalg.generic { diff --git a/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir b/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir --- a/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir +++ b/mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir @@ -15,8 +15,8 @@ -> tensor { %c0 = constant 0 : index %c1 = constant 1 : index - %d0 = memref.dim %arg0, %c0 : tensor - %d1 = memref.dim %arg0, %c1 : tensor + %d0 = tensor.dim %arg0, %c0 : tensor + %d1 = tensor.dim %arg0, %c1 : tensor %init = linalg.init_tensor [%d0, %d1] : tensor %0 = linalg.generic #binary2Dpointwise ins(%arg0, %arg1 : tensor, tensor) diff --git a/mlir/test/Dialect/Linalg/fusion-sequence.mlir b/mlir/test/Dialect/Linalg/fusion-sequence.mlir --- a/mlir/test/Dialect/Linalg/fusion-sequence.mlir +++ b/mlir/test/Dialect/Linalg/fusion-sequence.mlir @@ -150,8 +150,8 @@ %c1 = constant 1 : index %0 = linalg.matmul ins(%arg0, %arg1 : tensor, tensor) outs(%arg2 : tensor) -> tensor - %1 = memref.dim %0, %c0 : tensor - %2 = memref.dim %0, %c1 : tensor + %1 = tensor.dim %0, %c0 : tensor + %2 = tensor.dim %0, %c1 : tensor %3 = linalg.init_tensor [%1, %2] : tensor %4 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, @@ -223,26 +223,26 @@ // CHECK-SAME: %[[ARG6:[a-zA-Z0-9_]+]]: tensor) -> tensor { // CHECK-DAG: %[[C0:.+]] = constant 0 : index // CHECK-DAG: %[[C1:.+]] = constant 1 : index -// CHECK: %[[M:.+]] = memref.dim %[[ARG0]], %c0 : tensor +// CHECK: %[[M:.+]] = tensor.dim %[[ARG0]], %c0 : tensor // CHECK: %[[R0:.+]] = scf.for %[[IV0:[a-zA-Z0-9_]+]] = // CHECK-SAME: iter_args(%[[ARG8:.+]] = %[[ARG6]]) -> (tensor) { -// CHECK: %[[M_1:.+]] = memref.dim %[[ARG8]], %[[C0]] +// CHECK: %[[M_1:.+]] = tensor.dim %[[ARG8]], %[[C0]] // CHECK: %[[TILE_M_1:.+]] = affine.min #[[MAP0]](%[[M_1]], %[[IV0]]) -// CHECK: %[[N3:.+]] = memref.dim %[[ARG8]], %[[C1]] +// CHECK: %[[N3:.+]] = tensor.dim %[[ARG8]], %[[C1]] // CHECK: %[[STARG6:.+]] = tensor.extract_slice %[[ARG8]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_1]], %[[N3]]] -// CHECK: %[[M_2:.+]] = memref.dim %[[ARG4]], %[[C0]] +// CHECK: %[[M_2:.+]] = tensor.dim %[[ARG4]], %[[C0]] // CHECK: %[[TILE_M_2:.+]] = affine.min #[[MAP1]](%[[IV0]])[%[[M_2]], %[[M]]] -// CHECK: %[[N2:.+]] = memref.dim %[[ARG4]], %[[C1]] +// CHECK: %[[N2:.+]] = tensor.dim %[[ARG4]], %[[C1]] // CHECK: %[[STARG4:.+]] = tensor.extract_slice %[[ARG4]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_2]], %[[N2]]] // CHECK: %[[TILE_M_3:.+]] = affine.min #[[MAP1]](%[[IV0]])[%[[M]], %[[M]]] -// CHECK: %[[N0:.+]] = memref.dim %[[ARG0]], %[[C1]] +// CHECK: %[[N0:.+]] = tensor.dim %[[ARG0]], %[[C1]] // CHECK: %[[STARG0:.+]] = tensor.extract_slice %[[ARG0]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_3]], %[[N0]]] -// CHECK: %[[M_3:.+]] = memref.dim %[[ARG2]], %[[C0]] +// CHECK: %[[M_3:.+]] = tensor.dim %[[ARG2]], %[[C0]] // CHECK: %[[TILE_M_4:.+]] = affine.min #[[MAP1]](%[[IV0]])[%[[M_3]], %[[M]]] -// CHECK: %[[N1:.+]] = memref.dim %[[ARG2]], %[[C1]] +// CHECK: %[[N1:.+]] = tensor.dim %[[ARG2]], %[[C1]] // CHECK: %[[STARG2:.+]] = tensor.extract_slice %[[ARG2]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_4]], %[[N1]]] // CHECK: %[[T0:.+]] = linalg.matmul diff --git a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir --- a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir +++ b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir @@ -31,30 +31,30 @@ // CHECK-DAG: %[[C32:.+]] = constant 32 : index // CHECK-DAG: %[[C64:.+]] = constant 64 : index // CHECK-DAG: %[[C16:.+]] = constant 16 : index -// CHECK-DAG: %[[M:.+]] = memref.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]] // CHECK: %[[RESULT:.+]] = scf.for %[[IV0:[a-zA-Z0-9]+]] = // CHECK-SAME: %[[C0]] to %[[M]] step %[[C32]] // CHECK-SAME: iter_args(%[[ARG6:.+]] = %[[ARG4]]) -> (tensor) { -// CHECK: %[[M_2:.+]] = memref.dim %[[ARG6]], %[[C0]] +// CHECK: %[[M_2:.+]] = tensor.dim %[[ARG6]], %[[C0]] // CHECK: %[[TILE_M_2:.+]] = affine.min #[[MAP1]](%[[M_2]], %[[IV0]]) -// CHECK: %[[N3:.+]] = memref.dim %[[ARG6]], %[[C1]] +// CHECK: %[[N3:.+]] = tensor.dim %[[ARG6]], %[[C1]] // CHECK: %[[ST_ARG6:.+]] = tensor.extract_slice %[[ARG6]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_2]], %[[N3]]] // CHECK: %[[TILE_M_3:.+]] = affine.min #[[MAP5]](%[[IV0]])[%[[M]], %[[M]]] -// CHECK: %[[N1:.+]] = memref.dim %[[ARG0]], %[[C1]] +// CHECK: %[[N1:.+]] = tensor.dim %[[ARG0]], %[[C1]] // CHECK: %[[ST_ARG0:.+]] = tensor.extract_slice %[[ARG0]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_3]], %[[N1]]] -// CHECK: %[[M_3:.+]] = memref.dim %[[ARG2]], %[[C0]] +// CHECK: %[[M_3:.+]] = tensor.dim %[[ARG2]], %[[C0]] // CHECK: %[[TILE_M_4:.+]] = affine.min #[[MAP5]](%[[IV0]])[%[[M_3]], %[[M]]] -// CHECK: %[[N2_2:.+]] = memref.dim %[[ARG2]], %[[C1]] +// CHECK: %[[N2_2:.+]] = tensor.dim %[[ARG2]], %[[C1]] // CHECK: %[[ST_ARG2:.+]] = tensor.extract_slice %[[ARG2]][%[[IV0]], 0] // CHECK-SAME: [%[[TILE_M_4]], %[[N2_2]]] // CHECK: %[[LHS:.+]] = linalg.matmul // CHECK-SAME: __internal_linalg_transform__ = "after_lhs_fusion_producer" // CHECK-SAME: ins(%[[ST_ARG0]], %[[ARG1]] : tensor, tensor) // CHECK-SAME: outs(%[[ST_ARG2]] : tensor) -// CHECK: %[[N2:.+]] = memref.dim %[[ARG1]], %[[C1]] -// CHECK: %[[N3_2:.+]] = memref.dim %[[ARG3]], %[[C1]] +// CHECK: %[[N2:.+]] = tensor.dim %[[ARG1]], %[[C1]] +// CHECK: %[[N3_2:.+]] = tensor.dim %[[ARG3]], %[[C1]] // CHECK: %[[YIELD0:.+]] = scf.for %[[IV1:[a-zA-Z0-9]+]] = // CHECK-SAME: %[[C0]] to %[[N3_2]] step %[[C64]] // CHECK-SAME: iter_args(%[[ARG8:.+]] = %[[ST_ARG6]]) -> (tensor) { @@ -64,13 +64,13 @@ // CHECK: %[[TILE_N2:.+]] = affine.min #[[MAP2]](%[[IV2]])[%[[N2]]] // CHECK: %[[ST_LHS:.+]] = tensor.extract_slice %[[LHS]][0, %[[IV2]]] // CHECK-SAME: [%[[TILE_M_3]], %[[TILE_N2]]] -// CHECK: %[[N2_3:.+]] = memref.dim %[[ARG3]], %[[C0]] +// CHECK: %[[N2_3:.+]] = tensor.dim %[[ARG3]], %[[C0]] // CHECK: %[[TILE_N2_2:.+]] = affine.min #[[MAP2]](%[[IV2]])[%[[N2_3]]] // CHECK: %[[TILE_N3:.+]] = affine.min #[[MAP3]](%[[IV1]])[%[[N3_2]]] // CHECK: %[[ST_ARG3:.+]] = tensor.extract_slice %[[ARG3]][%[[IV2]], %[[IV1]]] // CHECK-SAME: [%[[TILE_N2_2]], %[[TILE_N3]]] -// CHECK: %[[M_4:.+]] = memref.dim %[[ARG10]], %[[C0]] -// CHECK: %[[N3_3:.+]] = memref.dim %[[ARG10]], %[[C1]] +// CHECK: %[[M_4:.+]] = tensor.dim %[[ARG10]], %[[C0]] +// CHECK: %[[N3_3:.+]] = tensor.dim %[[ARG10]], %[[C1]] // CHECK: %[[TILE_N3_2:.+]] = affine.min #[[MAP4]](%[[N3_3]], %[[IV1]]) // CHECK: %[[ST_ARG4:.+]] = tensor.extract_slice %[[ARG10]][0, %[[IV1]]] // CHECK-SAME: [%[[M_4]], %[[TILE_N3_2]]] @@ -104,7 +104,7 @@ // TLOOP-DAG: %[[C0:.*]] = constant 0 : index // TLOOP-DAG: %[[C1:.*]] = constant 1 : index -// TLOOP: %[[DIM_A0:.*]] = memref.dim %[[A]], %[[C0]] : [[TY:.*]] +// TLOOP: %[[DIM_A0:.*]] = tensor.dim %[[A]], %[[C0]] : [[TY:.*]] // TLOOP: %[[ABC:.*]] = linalg.tiled_loop (%[[IV0:.*]]) = (%[[C0]]) // TLOOP-SAME: to (%[[DIM_A0]]) step (%[[C32]]) @@ -121,8 +121,8 @@ // TLOOP: %[[AB_SUB:.*]] = linalg.matmul // TLOOP-SAME: ins(%[[A_SUB]], %[[B_]] : {{.*}}) outs(%[[AB_INIT_SUB]] -// TLOOP: %[[DIM_B_1:.*]] = memref.dim %[[B_]], %[[C1]] : [[TY]] -// TLOOP: %[[DIM_C_1:.*]] = memref.dim %[[C_]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_B_1:.*]] = tensor.dim %[[B_]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_C_1:.*]] = tensor.dim %[[C_]], %[[C1]] : [[TY]] // TLOOP: %[[ABC_SUB_:.*]] = linalg.tiled_loop (%[[IV1:.*]], %[[IV2:.*]]) = // TLOOP-SAME: (%[[C0]], %[[C0]]) to (%[[DIM_C_1]], %[[DIM_B_1]]) @@ -156,12 +156,12 @@ %arg2: tensor) -> tensor{ %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg2, %c0 : tensor - %1 = memref.dim %arg2, %c1 : tensor + %0 = tensor.dim %arg2, %c0 : tensor + %1 = tensor.dim %arg2, %c1 : tensor %2 = linalg.matmul ins(%arg0, %arg1 : tensor, tensor) outs(%arg2 : tensor) -> tensor - %3 = memref.dim %2, %c0 : tensor - %4 = memref.dim %2, %c1 : tensor + %3 = tensor.dim %2, %c0 : tensor + %4 = tensor.dim %2, %c1 : tensor %5 = linalg.init_tensor [%3, %4] : tensor %6 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, @@ -213,8 +213,8 @@ // TLOOP-DAG: %[[C0:.*]] = constant 0 : index // TLOOP-DAG: %[[C1:.*]] = constant 1 : index -// TLOOP: %[[DIM_A_0:.*]] = memref.dim %[[A]], %[[C0]] : [[TY:.*]] -// TLOOP: %[[DIM_B_1:.*]] = memref.dim %[[B]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_A_0:.*]] = tensor.dim %[[A]], %[[C0]] : [[TY:.*]] +// TLOOP: %[[DIM_B_1:.*]] = tensor.dim %[[B]], %[[C1]] : [[TY]] // TLOOP: %[[INIT:.*]] = linalg.init_tensor [%[[DIM_A_0]], %[[DIM_B_1]]] @@ -289,8 +289,8 @@ // TLOOP-DAG: %[[C0:.*]] = constant 0 : index // TLOOP-DAG: %[[C1:.*]] = constant 1 : index -// TLOOP: %[[DIM_A_0:.*]] = memref.dim %[[A]], %[[C0]] : [[TY:.*]] -// TLOOP: %[[DIM_B_1:.*]] = memref.dim %[[B]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_A_0:.*]] = tensor.dim %[[A]], %[[C0]] : [[TY:.*]] +// TLOOP: %[[DIM_B_1:.*]] = tensor.dim %[[B]], %[[C1]] : [[TY]] // TLOOP: %[[AB:.*]] = linalg.tiled_loop (%[[I:.*]], %[[J:.*]]) = // TLOOP-SAME: (%[[C0]], %[[C0]]) to (%[[DIM_A_0]], %[[DIM_B_1]]) @@ -300,7 +300,7 @@ // TLOOP-SAME: %[[C0_F32_:.*]] = %[[C0_F32]] // TLOOP-SAME: outs (%[[OUT_:.*]] = %[[OUT]]: [[TY]]) { -// TLOOP: %[[DIM_A__1:.*]] = memref.dim %[[A_]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_A__1:.*]] = tensor.dim %[[A_]], %[[C1]] : [[TY]] // TLOOP: %[[A_SUB:.*]] = tensor.extract_slice %[[A_]][%[[I]], 0] // TLOOP: %[[B_SUB:.*]] = tensor.extract_slice %[[B_]][0, %[[J]]] // TLOOP: %[[OUT_SUB:.*]] = tensor.extract_slice %[[OUT_]][%[[I]], %[[J]]] @@ -360,8 +360,8 @@ // TLOOP-DAG: %[[C0:.*]] = constant 0 : index // TLOOP-DAG: %[[C1:.*]] = constant 1 : index -// TLOOP: %[[DIM_A_0:.*]] = memref.dim %[[A]], %[[C0]] : [[TY:.*]] -// TLOOP: %[[DIM_B_1:.*]] = memref.dim %[[B]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_A_0:.*]] = tensor.dim %[[A]], %[[C0]] : [[TY:.*]] +// TLOOP: %[[DIM_B_1:.*]] = tensor.dim %[[B]], %[[C1]] : [[TY]] // TLOOP: %[[AB:.*]] = linalg.tiled_loop (%[[I:.*]], %[[J:.*]]) = // TLOOP-SAME: (%[[C0]], %[[C0]]) to (%[[DIM_A_0]], %[[DIM_B_1]]) @@ -371,7 +371,7 @@ // TLOOP-SAME: %[[C0_F32_:.*]] = %[[C0_F32]] // TLOOP-SAME: outs (%[[OUT_:.*]] = %[[OUT]]: [[TY]]) { -// TLOOP: %[[DIM_A__1:.*]] = memref.dim %[[A_]], %[[C1]] : [[TY]] +// TLOOP: %[[DIM_A__1:.*]] = tensor.dim %[[A_]], %[[C1]] : [[TY]] // TLOOP: %[[A_SUB:.*]] = tensor.extract_slice %[[A_]][%[[I]], 0] // TLOOP: %[[B_SUB:.*]] = tensor.extract_slice %[[B_]][0, %[[J]]] // TLOOP: %[[OUT_SUB:.*]] = tensor.extract_slice %[[OUT_]][%[[I]], %[[J]]] diff --git a/mlir/test/Dialect/Linalg/fusion-tensor.mlir b/mlir/test/Dialect/Linalg/fusion-tensor.mlir --- a/mlir/test/Dialect/Linalg/fusion-tensor.mlir +++ b/mlir/test/Dialect/Linalg/fusion-tensor.mlir @@ -8,8 +8,8 @@ { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor, tensor) @@ -50,8 +50,8 @@ { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor, f32) @@ -92,8 +92,8 @@ { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor, tensor) @@ -126,8 +126,8 @@ { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor, tensor) @@ -161,7 +161,7 @@ { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor + %0 = tensor.dim %arg0, %c0 : tensor %1 = linalg.init_tensor [%0] : tensor %2 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]} ins(%arg0, %arg1 : tensor, tensor) @@ -172,7 +172,7 @@ } -> tensor // CHECK: linalg.generic { // CHECK-SAME: indexing_maps = {{\[}}[[$MAP1]], [[$MAP1]], [[$MAP0]], [[$MAP0]] - %3 = memref.dim %arg2, %c1 : tensor + %3 = tensor.dim %arg2, %c1 : tensor %4 = linalg.init_tensor [%0, %3] : tensor %5 = linalg.generic {indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]} ins(%2, %arg2 : tensor, tensor) @@ -224,8 +224,8 @@ %c1 = constant 1 : index %c2 = constant 2 : index %cst = constant dense<42.0> : tensor<5xf32> - %0 = memref.dim %arg0, %c1 : tensor<5x?x?xf32> - %1 = memref.dim %arg0, %c2 : tensor<5x?x?xf32> + %0 = tensor.dim %arg0, %c1 : tensor<5x?x?xf32> + %1 = tensor.dim %arg0, %c2 : tensor<5x?x?xf32> %2 = linalg.init_tensor [5, %0, %1] : tensor<5x?x?xf32> %3 = linalg.generic { indexing_maps = [#map0, #map1, #map1], @@ -256,8 +256,8 @@ %c1 = constant 1 : index %c2 = constant 2 : index %cst = constant dense<42.0> : tensor - %0 = memref.dim %arg0, %c1 : tensor<5x?x?xf32> - %1 = memref.dim %arg0, %c2 : tensor<5x?x?xf32> + %0 = tensor.dim %arg0, %c1 : tensor<5x?x?xf32> + %1 = tensor.dim %arg0, %c2 : tensor<5x?x?xf32> %2 = linalg.init_tensor [5, %0, %1] : tensor<5x?x?xf32> %3 = linalg.generic { indexing_maps = [#map0, #map1, #map1], @@ -284,8 +284,8 @@ %arg1: tensor) -> tensor { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic { indexing_maps = [#map0, #map0, #map0], @@ -335,8 +335,8 @@ func @indexed_producer_consumer_fusion(%arg0: tensor) -> tensor { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic { indexing_maps = [#map0, #map0], @@ -389,8 +389,8 @@ -> tensor { %c0 = constant 0 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor %2 = linalg.init_tensor [%0, %1] : tensor %3 = linalg.generic { indexing_maps = [#map0, #map0], @@ -452,7 +452,7 @@ %arg1 : tensor) -> tensor { %c0 = constant 0 : index %c1 = constant 1 : index - %d0 = memref.dim %arg0, %c0 : tensor + %d0 = tensor.dim %arg0, %c0 : tensor %0 = linalg.init_tensor [%d0] : tensor %1 = linalg.generic {indexing_maps = [#map1, #map1], @@ -464,8 +464,8 @@ %4 = addi %arg2, %3 : i32 linalg.yield %4 : i32 } -> tensor - %2 = memref.dim %arg1, %c0 : tensor - %3 = memref.dim %arg1, %c1 : tensor + %2 = tensor.dim %arg1, %c0 : tensor + %3 = tensor.dim %arg1, %c1 : tensor %4 = linalg.init_tensor [%2, %3] : tensor %5 = linalg.generic {indexing_maps = [#map2, #map3, #map2], @@ -629,7 +629,7 @@ ^bb0(%a: f32): // no predecessors linalg.yield %cp5 : f32 } -> tensor - %d0 = memref.dim %0, %c0 : tensor + %d0 = tensor.dim %0, %c0 : tensor %init1 = linalg.init_tensor [%d0, 1] : tensor %2 = linalg.generic {indexing_maps = [ affine_map<(d0, d1) -> (d0, d1)>, @@ -730,13 +730,13 @@ // CHECK-SAME: %[[ARG0:.+]]: tensor) // CHECK-DAG: %[[C0:.+]] = constant 0 : index // CHECK-DAG: %[[C1:.+]] = constant 1 : index -// CHECK-DAG: %[[D0:.+]] = memref.dim %[[ARG0]], %[[C0]] -// CHECK-DAG: %[[D1:.+]] = memref.dim %[[ARG0]], %[[C1]] +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG0]], %[[C1]] // CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], %[[D1]]] // CHECK: %[[GENERIC1:.+]] = linalg.generic // CHECK-SAME: outs(%[[INIT]] : tensor) -// CHECK-DAG: %[[D0:.+]] = memref.dim %[[GENERIC1]], %[[C0]] -// CHECK-DAG: %[[D1:.+]] = memref.dim %[[GENERIC1]], %[[C1]] +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[GENERIC1]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[GENERIC1]], %[[C1]] // CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], %[[D1]]] // CHECK: %[[RESULT:.+]] = linalg.generic // CHECK-SAME: outs(%[[INIT]] : tensor) diff --git a/mlir/test/Dialect/Linalg/hoist-padding.mlir b/mlir/test/Dialect/Linalg/hoist-padding.mlir --- a/mlir/test/Dialect/Linalg/hoist-padding.mlir +++ b/mlir/test/Dialect/Linalg/hoist-padding.mlir @@ -38,12 +38,12 @@ %c0 = constant 0 : index %c1 = constant 1 : index - // CHECK-DAG: %[[dM:.*]] = memref.dim %[[TA]], %[[C0]] : tensor - // CHECK-DAG: %[[dK:.*]] = memref.dim %[[TA]], %[[C1]] : tensor - // CHECK-DAG: %[[dN:.*]] = memref.dim %[[TB]], %[[C1]] : tensor - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor - %2 = memref.dim %arg1, %c1 : tensor + // CHECK-DAG: %[[dM:.*]] = tensor.dim %[[TA]], %[[C0]] : tensor + // CHECK-DAG: %[[dK:.*]] = tensor.dim %[[TA]], %[[C1]] : tensor + // CHECK-DAG: %[[dN:.*]] = tensor.dim %[[TB]], %[[C1]] : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor + %2 = tensor.dim %arg1, %c1 : tensor // CHECK: scf.for %[[I:[0-9a-z]+]] = // First padded tensor is MxKx2x4 under loop M so Kx2x4 @@ -94,19 +94,19 @@ %3 = scf.for %arg3 = %c0 to %0 step %c2 iter_args(%arg4 = %arg2) -> (tensor) { %4 = scf.for %arg5 = %c0 to %2 step %c3 iter_args(%arg6 = %arg4) -> (tensor) { %5 = scf.for %arg7 = %c0 to %1 step %c4 iter_args(%arg8 = %arg6) -> (tensor) { - %6 = memref.dim %arg0, %c0 : tensor + %6 = tensor.dim %arg0, %c0 : tensor %7 = affine.min #map0(%arg3)[%6] - %8 = memref.dim %arg0, %c1 : tensor + %8 = tensor.dim %arg0, %c1 : tensor %9 = affine.min #map1(%arg7)[%8] %10 = tensor.extract_slice %arg0[%arg3, %arg7] [%7, %9] [1, 1] : tensor to tensor - %11 = memref.dim %arg1, %c0 : tensor + %11 = tensor.dim %arg1, %c0 : tensor %12 = affine.min #map1(%arg7)[%11] - %13 = memref.dim %arg1, %c1 : tensor + %13 = tensor.dim %arg1, %c1 : tensor %14 = affine.min #map2(%arg5)[%13] %15 = tensor.extract_slice %arg1[%arg7, %arg5] [%12, %14] [1, 1] : tensor to tensor - %16 = memref.dim %arg8, %c0 : tensor + %16 = tensor.dim %arg8, %c0 : tensor %17 = affine.min #map3(%16, %arg3) - %18 = memref.dim %arg8, %c1 : tensor + %18 = tensor.dim %arg8, %c1 : tensor %19 = affine.min #map4(%18, %arg5) %20 = tensor.extract_slice %arg8[%arg3, %arg5] [%17, %19] [1, 1] : tensor to tensor %21 = subi %c2, %7 : index @@ -159,9 +159,9 @@ %cst = constant 0.000000e+00 : f32 %c2 = constant 2 : index %c0 = constant 0 : index - %1 = memref.dim %arg0, %c0 : tensor - %2 = memref.dim %arg0, %c0 : tensor - %3 = memref.dim %arg1, %c0 : tensor + %1 = tensor.dim %arg0, %c0 : tensor + %2 = tensor.dim %arg0, %c0 : tensor + %3 = tensor.dim %arg1, %c0 : tensor // CHECK: scf.for %[[I:[0-9a-z]+]] = // diff --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir --- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir +++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir @@ -478,7 +478,7 @@ %c1 = constant 1 : index %0 = linalg.tensor_collapse_shape %arg0 [[0, 1, 2], [3, 4], [5]] : tensor<1x?x1x2x1x4xf32> into tensor - %1 = memref.dim %arg0, %c1 : tensor<1x?x1x2x1x4xf32> + %1 = tensor.dim %arg0, %c1 : tensor<1x?x1x2x1x4xf32> %2 = linalg.init_tensor [%1, 2, 4] : tensor %3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, diff --git a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir --- a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir +++ b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir @@ -5,8 +5,8 @@ %c2 = constant 2 : index %c6 = constant 6 : index %0 = linalg.init_tensor [4, 5, %c6] : tensor<4x5x?xf32> - %1 = memref.dim %0, %c2 : tensor<4x5x?xf32> - %2 = memref.dim %0, %c0 : tensor<4x5x?xf32> + %1 = tensor.dim %0, %c2 : tensor<4x5x?xf32> + %2 = tensor.dim %0, %c0 : tensor<4x5x?xf32> return %1, %2 : index, index } // CHECK: func @init_tensor_static_dim @@ -19,7 +19,7 @@ func @init_tensor_dynamic_dim(%arg0 : index) -> (index) { %c2 = constant 2 : index %0 = linalg.init_tensor [4, 5, %arg0] : tensor<4x5x?xf32> - %1 = memref.dim %0, %c2 : tensor<4x5x?xf32> + %1 = tensor.dim %0, %c2 : tensor<4x5x?xf32> return %1 : index } // CHECK: func @init_tensor_dynamic_dim @@ -32,8 +32,8 @@ %c0 = constant 0 : index %c1 = constant 1 : index %0 = linalg.init_tensor [%arg0, %arg1] : tensor - %1 = memref.dim %0, %c0 : tensor - %2 = memref.dim %0, %c1 : tensor + %1 = tensor.dim %0, %c0 : tensor + %2 = tensor.dim %0, %c1 : tensor return %1, %2 : index, index } // CHECK: func @init_tensor_dynamic_dim2 @@ -60,8 +60,8 @@ %2 = addf %1, %arg5 : f32 linalg.yield %2 : f32 } -> tensor - %3 = memref.dim %0, %c0 : tensor - %4 = memref.dim %0, %c1 : tensor + %3 = tensor.dim %0, %c0 : tensor + %4 = tensor.dim %0, %c1 : tensor return %3, %4 : index, index } // CHECK: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 + s1)> @@ -72,11 +72,11 @@ // CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor // CHECK-DAG: %[[C0:.+]] = constant 0 : index // CHECK-DAG: %[[C1:.+]] = constant 1 : index -// CHECK-DAG: %[[T0:.+]] = memref.dim %[[ARG0]], %[[C0]] -// CHECK-DAG: %[[T1:.+]] = memref.dim %[[ARG1]], %[[C1]] +// CHECK-DAG: %[[T0:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[T1:.+]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK: %[[T2:.+]] = affine.apply #[[MAP0]]()[%[[T0]], %[[T1]]] -// CHECK-DAG: %[[T3:.+]] = memref.dim %[[ARG0]], %[[C0]] -// CHECK-DAG: %[[T4:.+]] = memref.dim %[[ARG1]], %[[C1]] +// CHECK-DAG: %[[T3:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[T4:.+]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK: %[[T5:.+]] = affine.apply #[[MAP1]]()[%[[T3]], %[[T4]]] // CHECK: return %[[T2]], %[[T5]] @@ -86,7 +86,7 @@ (%arg0 : tensor, %arg1 : index) -> (index) { %c0 = constant 0 : index %c1 = constant 1 : index - %d0 = memref.dim %arg0, %c0 : tensor + %d0 = tensor.dim %arg0, %c0 : tensor %0 = linalg.init_tensor [%d0, %arg1] : tensor %1 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0)>, @@ -96,7 +96,7 @@ ^bb0(%arg2: f32, %arg3: f32) : linalg.yield %arg2 : f32 } -> tensor - %2 = memref.dim %1, %c1 : tensor + %2 = tensor.dim %1, %c1 : tensor return %2 : index } // CHECK: func @remove_dim_result_uses_outs @@ -112,8 +112,8 @@ %c1 = constant 1 : index %0 = linalg.matmul ins(%arg0, %arg1 : tensor, tensor) outs(%arg2 : tensor) -> tensor - %1 = memref.dim %0, %c0 : tensor - %2 = memref.dim %0, %c1 : tensor + %1 = tensor.dim %0, %c0 : tensor + %2 = tensor.dim %0, %c1 : tensor %3 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d0)>, affine_map<(d0, d1, d2) -> (d0, d2)>, @@ -126,8 +126,8 @@ %5 = addf %4, %arg5 : f32 linalg.yield %5 : f32 } -> tensor - %6 = memref.dim %3, %c0 : tensor - %7 = memref.dim %3, %c1 : tensor + %6 = tensor.dim %3, %c0 : tensor + %7 = tensor.dim %3, %c1 : tensor return %1, %2, %6, %7 : index, index, index, index } // CHECK-LABEL: func @remove_dim_result_uses_sequence @@ -136,10 +136,10 @@ // CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor // CHECK-DAG: %[[C0:.+]] = constant 0 : index // CHECK-DAG: %[[C1:.+]] = constant 1 : index -// CHECK-DAG: %[[T0:.+]] = memref.dim %[[ARG0]], %[[C0]] -// CHECK-DAG: %[[T1:.+]] = memref.dim %[[ARG1]], %[[C1]] -// CHECK-DAG: %[[T2:.+]] = memref.dim %[[ARG0]], %[[C1]] -// CHECK-DAG: %[[T3:.+]] = memref.dim %[[ARG1]], %[[C1]] +// CHECK-DAG: %[[T0:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[T1:.+]] = tensor.dim %[[ARG1]], %[[C1]] +// CHECK-DAG: %[[T2:.+]] = tensor.dim %[[ARG0]], %[[C1]] +// CHECK-DAG: %[[T3:.+]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK: return %[[T0]], %[[T1]], %[[T2]], %[[T3]] // ----- @@ -148,7 +148,7 @@ (%arg0 : tensor, %arg1 : index) -> (index, index) { %c0 = constant 0 : index %c1 = constant 1 : index - %d0 = memref.dim %arg0, %c0 : tensor + %d0 = tensor.dim %arg0, %c0 : tensor %0 = linalg.init_tensor [%d0, %arg1] : tensor %1 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0)>, @@ -158,15 +158,15 @@ ^bb0(%arg2: f32, %arg3 : f32): linalg.yield %arg2 : f32 } -> tensor - %2 = memref.dim %1, %c0 : tensor - %3 = memref.dim %1, %c1 : tensor + %2 = tensor.dim %1, %c0 : tensor + %3 = tensor.dim %1, %c1 : tensor return %2, %3 : index, index } // CHECK: func @keep_result_dim_uses_sequence2 // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index // CHECK-DAG: %[[C0:.+]] = constant 0 : index -// CHECK-DAG: %[[T0:.+]] = memref.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[T0:.+]] = tensor.dim %[[ARG0]], %[[C0]] // CHECK: return %[[T0]], %[[ARG1]] // ----- @@ -185,16 +185,16 @@ } -> (tensor, tensor) %c0 = constant 0 : index - %num_elem_0 = memref.dim %0, %c0 : tensor + %num_elem_0 = tensor.dim %0, %c0 : tensor - %num_elem_1 = memref.dim %1, %c0 : tensor + %num_elem_1 = tensor.dim %1, %c0 : tensor return %num_elem_0, %num_elem_1 : index, index } // CHECK: func @init_tensor_dim_of_linalg_result( // CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG_1:[a-zA-Z0-9_]+]]: tensor) -// CHECK: %[[R0:.+]] = memref.dim %[[ARG_0]] -// CHECK: %[[R1:.+]] = memref.dim %[[ARG_0]] +// CHECK: %[[R0:.+]] = tensor.dim %[[ARG_0]] +// CHECK: %[[R1:.+]] = tensor.dim %[[ARG_0]] // CHECK: return %[[R0]], %[[R1]] // ----- @@ -206,9 +206,9 @@ %c4 = constant 4 : index %0 = linalg.tensor_expand_shape %arg0 [[0, 1], [2], [3, 4, 5]] : tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32> - %1 = memref.dim %0, %c1 : tensor<2x3x5x4x?x7xf32> - %2 = memref.dim %0, %c3 : tensor<2x3x5x4x?x7xf32> - %3 = memref.dim %0, %c4 : tensor<2x3x5x4x?x7xf32> + %1 = tensor.dim %0, %c1 : tensor<2x3x5x4x?x7xf32> + %2 = tensor.dim %0, %c3 : tensor<2x3x5x4x?x7xf32> + %3 = tensor.dim %0, %c4 : tensor<2x3x5x4x?x7xf32> return %1, %2, %3 : index, index, index } // CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 floordiv 28)> @@ -217,7 +217,7 @@ // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C3:.+]] = constant 3 : index // CHECK-DAG: %[[C4:.+]] = constant 4 : index -// CHECK: %[[D0:.+]] = memref.dim %[[ARG0]], %[[C2]] +// CHECK: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C2]] // CHECK: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[D0]]] // CHECK: return %[[C3]], %[[C4]], %[[D1]] @@ -229,8 +229,8 @@ %c2 = constant 2 : index %0 = linalg.tensor_collapse_shape %arg0 [[0, 1], [2], [3, 4, 5]] : tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32> - %1 = memref.dim %0, %c1 : tensor<6x5x?xf32> - %2 = memref.dim %0, %c2 : tensor<6x5x?xf32> + %1 = tensor.dim %0, %c1 : tensor<6x5x?xf32> + %2 = tensor.dim %0, %c2 : tensor<6x5x?xf32> return %1, %2 : index, index } // CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 28)> @@ -238,7 +238,7 @@ // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<2x3x5x4x?x7xf32> // CHECK-DAG: %[[C4:.+]] = constant 4 : index // CHECK-DAG: %[[C5:.+]] = constant 5 : index -// CHECK: %[[D0:.+]] = memref.dim %[[ARG0]], %[[C4]] +// CHECK: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C4]] // CHECK: %[[D1:.+]] = affine.apply #[[MAP]]()[%[[D0]]] // CHECK: return %[[C5]], %[[D1]] @@ -257,9 +257,9 @@ ^bb0(%arg4: index, %arg5: index, %arg6: index): linalg.yield %arg3 : f32 } : tensor<2x?x?xf32> to tensor - %1 = memref.dim %0, %c0 : tensor - %2 = memref.dim %0, %c1 : tensor - %3 = memref.dim %0, %c2 : tensor + %1 = tensor.dim %0, %c0 : tensor + %2 = tensor.dim %0, %c1 : tensor + %3 = tensor.dim %0, %c2 : tensor return %1, %2, %3 : index, index, index } // CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)> @@ -271,8 +271,8 @@ // CHECK-DAG: %[[C1:.+]] = constant 1 : index // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C12:.+]] = constant 12 : index -// CHECK: %[[IN_DIM1:.+]] = memref.dim %[[ARG0]], %[[C1]] +// CHECK: %[[IN_DIM1:.+]] = tensor.dim %[[ARG0]], %[[C1]] // CHECK: %[[OUT_DIM1:.+]] = affine.apply #[[MAP0]]()[%[[ARG1]], %[[IN_DIM1]]] -// CHECK: %[[IN_DIM2:.+]] = memref.dim %[[ARG0]], %[[C2]] +// CHECK: %[[IN_DIM2:.+]] = tensor.dim %[[ARG0]], %[[C2]] // CHECK: %[[OUT_DIM2:.+]] = affine.apply #[[MAP1]]()[%[[ARG2]], %[[IN_DIM2]]] // CHECK: return %[[C12]], %[[OUT_DIM1]], %[[OUT_DIM2]] diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -781,9 +781,9 @@ %c2 = constant 2 : index %c4 = constant 4 : index %c8 = constant 8 : index - %X = memref.dim %input_3d, %c0 : tensor<16x24x32xf32> - %Y = memref.dim %input_3d, %c1 : tensor<16x24x32xf32> - %Z = memref.dim %input_3d, %c2 : tensor<16x24x32xf32> + %X = tensor.dim %input_3d, %c0 : tensor<16x24x32xf32> + %Y = tensor.dim %input_3d, %c1 : tensor<16x24x32xf32> + %Z = tensor.dim %input_3d, %c2 : tensor<16x24x32xf32> %result = linalg.tiled_loop (%i, %j, %k) = (%c0, %c0, %c0) to (%X, %Y, %Z) step (%c2, %c4, %c8) ins(%i3d_ = %input_3d: tensor<16x24x32xf32>, diff --git a/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir b/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir --- a/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir +++ b/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir @@ -133,7 +133,7 @@ // CHECK-SAME: %[[ARG0:.*]]: tensor // CHECK-NOT: linalg.pad_tensor // CHECK: %[[C0:.*]] = constant 0 : index -// CHECK: memref.dim %[[ARG0]], %[[C0]] +// CHECK: tensor.dim %[[ARG0]], %[[C0]] // CHECK: %[[RESULT:.*]] = scf.if %{{.*}} -> (tensor<3x4xf32>) { // CHECK: %[[GEN:.*]] = tensor.generate // CHECK: scf.yield %[[GEN]] diff --git a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir @@ -10,9 +10,9 @@ %c0 = constant 0 : index %c3 = constant 3 : index %c1 = constant 1 : index - %0 = memref.dim %t0, %c0 : tensor - %1 = memref.dim %t0, %c1 : tensor - %2 = memref.dim %arg1, %c1 : tensor + %0 = tensor.dim %t0, %c0 : tensor + %1 = tensor.dim %t0, %c1 : tensor + %2 = tensor.dim %arg1, %c1 : tensor %3 = scf.for %arg3 = %c0 to %0 step %c2 iter_args(%arg4 = %arg2) -> (tensor) { %4 = scf.for %arg5 = %c0 to %2 step %c3 iter_args(%arg6 = %arg4) -> (tensor) { %5 = scf.for %arg7 = %c0 to %1 step %c4 iter_args(%arg8 = %arg6) -> (tensor) { @@ -40,12 +40,12 @@ // CHECK-DAG: %[[C0:.*]] = constant 0 : index // CHECK-DAG: %[[C1:.*]] = constant 1 : index -// CHECK-DAG: %[[dA0:.*]] = memref.dim %[[A]], %[[C0]] : tensor -// CHECK-DAG: %[[dA1:.*]] = memref.dim %[[A]], %[[C1]] : tensor -// CHECK-DAG: %[[dB0:.*]] = memref.dim %[[B]], %[[C0]] : tensor -// CHECK-DAG: %[[dB1:.*]] = memref.dim %[[B]], %[[C1]] : tensor -// CHECK-DAG: %[[dC0:.*]] = memref.dim %[[C]], %[[C0]] : tensor -// CHECK-DAG: %[[dC1:.*]] = memref.dim %[[C]], %[[C1]] : tensor +// CHECK-DAG: %[[dA0:.*]] = tensor.dim %[[A]], %[[C0]] : tensor +// CHECK-DAG: %[[dA1:.*]] = tensor.dim %[[A]], %[[C1]] : tensor +// CHECK-DAG: %[[dB0:.*]] = tensor.dim %[[B]], %[[C0]] : tensor +// CHECK-DAG: %[[dB1:.*]] = tensor.dim %[[B]], %[[C1]] : tensor +// CHECK-DAG: %[[dC0:.*]] = tensor.dim %[[C]], %[[C0]] : tensor +// CHECK-DAG: %[[dC1:.*]] = tensor.dim %[[C]], %[[C1]] : tensor // CHECK: scf.for %[[I:[0-9a-z]*]] // CHECK: %[[sizeA0:.*]] = affine.min #[[BOUND2_MAP]](%[[I]])[%[[dA0]]] // CHECK: %[[stA:.*]] = tensor.extract_slice %[[A]][%[[I]], 0] [%[[sizeA0]], %[[dA1]]] [1, 1] : tensor to tensor @@ -153,10 +153,10 @@ %c8 = constant 8 : index %c16 = constant 16 : index - %n = memref.dim %elementwise, %c0 : tensor - %oh = memref.dim %elementwise, %c1 : tensor - %ow = memref.dim %elementwise, %c2 : tensor - %oc = memref.dim %elementwise, %c3 : tensor + %n = tensor.dim %elementwise, %c0 : tensor + %oh = tensor.dim %elementwise, %c1 : tensor + %ow = tensor.dim %elementwise, %c2 : tensor + %oc = tensor.dim %elementwise, %c3 : tensor %init = linalg.init_tensor [%n, %oh, %ow, %oc] : tensor %fill = linalg.fill(%cst, %init) : f32, tensor -> tensor @@ -222,26 +222,26 @@ // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C3:.+]] = constant 3 : index -// CHECK-DAG: %[[ELEM_N:.+]] = memref.dim %[[ELEM]], %[[C0]] : tensor -// CHECK-DAG: %[[ELEM_OH:.+]] = memref.dim %[[ELEM]], %[[C1]] : tensor -// CHECK-DAG: %[[ELEM_OW:.+]] = memref.dim %[[ELEM]], %[[C2]] : tensor -// CHECK-DAG: %[[ELEM_OC:.+]] = memref.dim %[[ELEM]], %[[C3]] : tensor +// CHECK-DAG: %[[ELEM_N:.+]] = tensor.dim %[[ELEM]], %[[C0]] : tensor +// CHECK-DAG: %[[ELEM_OH:.+]] = tensor.dim %[[ELEM]], %[[C1]] : tensor +// CHECK-DAG: %[[ELEM_OW:.+]] = tensor.dim %[[ELEM]], %[[C2]] : tensor +// CHECK-DAG: %[[ELEM_OC:.+]] = tensor.dim %[[ELEM]], %[[C3]] : tensor // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[ELEM_N]], %[[ELEM_OH]], %[[ELEM_OW]], %[[ELEM_OC]]] : tensor // CHECK: %[[FILL:.+]] = linalg.fill(%cst, %[[INIT]]) : f32, tensor -> tensor -// CHECK-DAG: %[[FILTER_H:.+]] = memref.dim %[[FILTER]], %[[C0]] : tensor -// CHECK-DAG: %[[FILTER_W:.+]] = memref.dim %[[FILTER]], %[[C1]] : tensor -// CHECK-DAG: %[[INPUT_N:.+]] = memref.dim %[[INPUT]], %[[C0]] : tensor -// CHECK-DAG: %[[INPUT_H:.+]] = memref.dim %[[INPUT]], %[[C1]] : tensor -// CHECK-DAG: %[[INPUT_W:.+]] = memref.dim %[[INPUT]], %[[C2]] : tensor -// CHECK-DAG: %[[INPUT_C:.+]] = memref.dim %[[INPUT]], %[[C3]] : tensor -// CHECK-DAG: %[[FILTER_IC:.+]] = memref.dim %[[FILTER]], %[[C2]] : tensor -// CHECK-DAG: %[[FILTER_OC:.+]] = memref.dim %[[FILTER]], %[[C3]] : tensor -// CHECK-DAG: %[[FILL_N:.+]] = memref.dim %[[FILL]], %[[C0]] : tensor -// CHECK-DAG: %[[FILL_H:.+]] = memref.dim %[[FILL]], %[[C1]] : tensor -// CHECK-DAG: %[[FILL_W:.+]] = memref.dim %[[FILL]], %[[C2]] : tensor -// CHECK-DAG: %[[FILL_C:.+]] = memref.dim %[[FILL]], %[[C3]] : tensor +// CHECK-DAG: %[[FILTER_H:.+]] = tensor.dim %[[FILTER]], %[[C0]] : tensor +// CHECK-DAG: %[[FILTER_W:.+]] = tensor.dim %[[FILTER]], %[[C1]] : tensor +// CHECK-DAG: %[[INPUT_N:.+]] = tensor.dim %[[INPUT]], %[[C0]] : tensor +// CHECK-DAG: %[[INPUT_H:.+]] = tensor.dim %[[INPUT]], %[[C1]] : tensor +// CHECK-DAG: %[[INPUT_W:.+]] = tensor.dim %[[INPUT]], %[[C2]] : tensor +// CHECK-DAG: %[[INPUT_C:.+]] = tensor.dim %[[INPUT]], %[[C3]] : tensor +// CHECK-DAG: %[[FILTER_IC:.+]] = tensor.dim %[[FILTER]], %[[C2]] : tensor +// CHECK-DAG: %[[FILTER_OC:.+]] = tensor.dim %[[FILTER]], %[[C3]] : tensor +// CHECK-DAG: %[[FILL_N:.+]] = tensor.dim %[[FILL]], %[[C0]] : tensor +// CHECK-DAG: %[[FILL_H:.+]] = tensor.dim %[[FILL]], %[[C1]] : tensor +// CHECK-DAG: %[[FILL_W:.+]] = tensor.dim %[[FILL]], %[[C2]] : tensor +// CHECK-DAG: %[[FILL_C:.+]] = tensor.dim %[[FILL]], %[[C3]] : tensor // CHECK: scf.for %[[IV0:.+]] = %{{.+}} to %[[ELEM_N]] step %{{.+}} iter_args(%{{.+}} = %[[FILL]]) // CHECK-NEXT: %[[SIZE_ELEM_N:.+]] = affine.min #[[BOUND8_MAP]](%[[IV0]])[%[[ELEM_N]]] @@ -311,8 +311,8 @@ %c32 = constant 32 : index %zero = constant 0.0 : f32 - %d0 = memref.dim %large_input, %c0 : tensor<64x128xf32> - %d1 = memref.dim %large_input, %c1 : tensor<64x128xf32> + %d0 = tensor.dim %large_input, %c0 : tensor<64x128xf32> + %d1 = tensor.dim %large_input, %c1 : tensor<64x128xf32> %pad = linalg.pad_tensor %small_input low[4, 60] high[2, 67] { ^bb0(%arg0: index, %arg1: index): diff --git a/mlir/test/Dialect/Linalg/tile-tensors.mlir b/mlir/test/Dialect/Linalg/tile-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-tensors.mlir @@ -38,9 +38,9 @@ // TLOOP-DAG: %[[C3:.*]] = constant 3 : index // TLOOP-DAG: %[[C4:.*]] = constant 4 : index -// TLOOP: %[[ARG_0_X:.*]] = memref.dim %[[ARG_0]], %[[C0]] : [[TY]] -// TLOOP: %[[ARG_0_Y:.*]] = memref.dim %[[ARG_0]], %[[C1]] : [[TY]] -// TLOOP: %[[ARG_1_Y:.*]] = memref.dim %[[ARG_1]], %[[C1]] : [[TY]] +// TLOOP: %[[ARG_0_X:.*]] = tensor.dim %[[ARG_0]], %[[C0]] : [[TY]] +// TLOOP: %[[ARG_0_Y:.*]] = tensor.dim %[[ARG_0]], %[[C1]] : [[TY]] +// TLOOP: %[[ARG_1_Y:.*]] = tensor.dim %[[ARG_1]], %[[C1]] : [[TY]] // TLOOP: %{{.*}} = linalg.tiled_loop (%[[I:.*]], %[[J:.*]], %[[K:.*]]) = // TLOOP-SAME: (%[[C0]], %[[C0]], %[[C0]]) @@ -68,9 +68,9 @@ %c0 = constant 0 : index %c1 = constant 1 : index %c2 = constant 2 : index - %0 = memref.dim %arg0, %c0 : tensor - %1 = memref.dim %arg0, %c1 : tensor - %2 = memref.dim %arg0, %c2 : tensor + %0 = tensor.dim %arg0, %c0 : tensor + %1 = tensor.dim %arg0, %c1 : tensor + %2 = tensor.dim %arg0, %c2 : tensor %3 = linalg.init_tensor [%0, %1, %2] : tensor %4 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, @@ -119,9 +119,9 @@ // TLOOP-DAG: %[[C4:.*]] = constant 4 : index // TLOOP: %[[INIT:.*]] = linalg.init_tensor -// TLOOP: %[[ARG_0_X:.*]] = memref.dim %[[ARG_0]], %[[C0]] : [[TY]] -// TLOOP: %[[ARG_0_Y:.*]] = memref.dim %[[ARG_0]], %[[C1]] : [[TY]] -// TLOOP: %[[ARG_0_Z:.*]] = memref.dim %[[ARG_0]], %[[C2]] : [[TY]] +// TLOOP: %[[ARG_0_X:.*]] = tensor.dim %[[ARG_0]], %[[C0]] : [[TY]] +// TLOOP: %[[ARG_0_Y:.*]] = tensor.dim %[[ARG_0]], %[[C1]] : [[TY]] +// TLOOP: %[[ARG_0_Z:.*]] = tensor.dim %[[ARG_0]], %[[C2]] : [[TY]] // TLOOP: %{{.*}} = linalg.tiled_loop (%{{.*}}, %{{.*}}, %{{.*}}) = // TLOOP-SAME: (%[[C0]], %[[C0]], %[[C0]]) diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -580,12 +580,12 @@ // CHECK: %[[V0:.*]] = addi %[[LOW]], %[[C2]] : index // CHECK: %[[V1:.*]] = addi %[[V0]], %[[C3]] : index // CHECK: %[[V2:.*]] = addi %[[HIGH]], %[[C5]] : index -// CHECK: %[[DIM3:.*]] = memref.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> +// CHECK: %[[DIM3:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> // CHECK: %[[V4:.*]] = addi %[[DIM3]], %[[C3]] : index // CHECK: %[[V5:.*]] = addi %[[V4]], %[[C2]] : index // CHECK: %[[INIT:.*]] = linalg.init_tensor [6, %[[V1]], %[[V2]], %[[V5]]] : tensor<6x?x?x?xf32> // CHECK: %[[FILL:.*]] = linalg.fill(%{{.*}}, %[[INIT]]) : f32, tensor<6x?x?x?xf32> -> tensor<6x?x?x?xf32> -// CHECK: %[[SRCDIM:.*]] = memref.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> +// CHECK: %[[SRCDIM:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> // CHECK: %[[RESULT:.*]] = tensor.insert_slice %[[SRC]] into %[[FILL]][2, %[[LOW]], 3, 3] [1, 2, 2, %[[SRCDIM]]] [1, 1, 1, 1] : tensor<1x2x2x?xf32> into tensor<6x?x?x?xf32> // CHECK: return %[[RESULT]] func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index, diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir --- a/mlir/test/Dialect/MemRef/canonicalize.mlir +++ b/mlir/test/Dialect/MemRef/canonicalize.mlir @@ -236,7 +236,7 @@ // ----- -// Test case: Basic folding of memref.dim(memref.tensor_load(m)) -> memref.dim(m). +// Test case: Basic folding of tensor.dim(memref.tensor_load(m)) -> memref.dim(m). // CHECK-LABEL: func @dim_of_tensor_load( // CHECK-SAME: %[[MEMREF:[0-9a-z]*]]: memref // CHECK: %[[C0:.*]] = constant 0 @@ -245,24 +245,7 @@ func @dim_of_tensor_load(%arg0: memref) -> index { %c0 = constant 0 : index %0 = memref.tensor_load %arg0 : memref - %1 = memref.dim %0, %c0 : tensor - return %1 : index -} - -// ----- - -// Test case: Folding of memref.dim(tensor.generate %idx) -> %idx -// CHECK-LABEL: func @dim_of_tensor.generate( -// CHECK-SAME: %[[IDX0:[0-9a-z]+]]: index, %[[IDX1:[0-9a-z]+]]: index -// CHECK-NOT: memref.dim -// CHECK: return %[[IDX1]] : index -func @dim_of_tensor.generate(%arg0: index, %arg1: index) -> index { - %c3 = constant 3 : index - %0 = tensor.generate %arg0, %arg1 { - ^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: index): - tensor.yield %c3 : index - } : tensor<2x?x4x?x5xindex> - %1 = memref.dim %0, %c3 : tensor<2x?x4x?x5xindex> + %1 = tensor.dim %0, %c0 : tensor return %1 : index } @@ -338,24 +321,6 @@ // ----- -// Test case: Folding memref.dim(tensor.cast %0, %idx) -> memref.dim %0, %idx -// CHECK-LABEL: func @fold_dim_of_tensor.cast -// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: tensor<4x?xf32> -// CHECK-DAG: %[[C1:.+]] = constant 1 : index -// CHECK-DAG: %[[C4:.+]] = constant 4 : index -// CHECK: %[[T0:.+]] = memref.dim %[[ARG0]], %[[C1]] -// CHECK-NEXT: return %[[C4]], %[[T0]] -func @fold_dim_of_tensor.cast(%arg0 : tensor<4x?xf32>) -> (index, index) { - %c0 = constant 0 : index - %c1 = constant 1 : index - %0 = tensor.cast %arg0 : tensor<4x?xf32> to tensor - %1 = memref.dim %0, %c0 : tensor - %2 = memref.dim %0, %c1 : tensor - return %1, %2: index, index -} - -// ----- - // CHECK-LABEL: func @tensor_cast_to_memref // CHECK-SAME: %[[ARG0:.+]]: tensor<4x6x16x32xi8> // CHECK: %[[M:.+]] = memref.buffer_cast %[[ARG0]] : memref<4x6x16x32xi8> diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -36,7 +36,7 @@ // CHECK: return %[[D]] : index func @sparse_dim(%arg0: tensor) -> index { %c = constant 0 : index - %0 = memref.dim %arg0, %c : tensor + %0 = tensor.dim %arg0, %c : tensor return %0 : index } diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir @@ -1096,7 +1096,7 @@ // CHECK: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_3]], %[[VAL_5]] : tensor> to memref // CHECK: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_3]], %[[VAL_5]] : tensor> to memref // CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor> to memref -// CHECK: %[[VAL_16:.*]] = memref.dim %[[VAL_4]], %[[VAL_5]] : tensor +// CHECK: %[[VAL_16:.*]] = tensor.dim %[[VAL_4]], %[[VAL_5]] : tensor // CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_4]] : memref // CHECK: %[[VAL_18:.*]] = memref.alloc(%[[VAL_16]]) : memref // CHECK: linalg.copy(%[[VAL_17]], %[[VAL_18]]) : memref, memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -977,8 +977,8 @@ // CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_1]], %[[VAL_3]] : tensor -// CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor +// CHECK: %[[VAL_9:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_11:.*]] = memref.alloc(%[[VAL_8]], %[[VAL_9]]) : memref // CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref, memref @@ -1032,10 +1032,10 @@ // CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor> to memref // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref -// CHECK: %[[VAL_12:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_12:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref -// CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_3]], %[[VAL_4]] : tensor -// CHECK: %[[VAL_15:.*]] = memref.dim %[[VAL_3]], %[[VAL_5]] : tensor +// CHECK: %[[VAL_14:.*]] = tensor.dim %[[VAL_3]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_15:.*]] = tensor.dim %[[VAL_3]], %[[VAL_5]] : tensor // CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_17:.*]] = memref.alloc(%[[VAL_14]], %[[VAL_15]]) : memref // CHECK: linalg.copy(%[[VAL_16]], %[[VAL_17]]) : memref, memref @@ -1115,7 +1115,7 @@ // CHECK: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref // CHECK: %[[VAL_20:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_21:.*]] = memref.buffer_cast %[[VAL_4]] : memref -// CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_5]], %[[VAL_6]] : tensor +// CHECK: %[[VAL_22:.*]] = tensor.dim %[[VAL_5]], %[[VAL_6]] : tensor // CHECK: %[[VAL_23:.*]] = memref.buffer_cast %[[VAL_5]] : memref // CHECK: %[[VAL_24:.*]] = memref.alloc(%[[VAL_22]]) : memref // CHECK: linalg.copy(%[[VAL_23]], %[[VAL_24]]) : memref, memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir @@ -1135,11 +1135,11 @@ // CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor> to memref // CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor> to memref // CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref -// CHECK: %[[VAL_10:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : tensor +// CHECK: %[[VAL_10:.*]] = tensor.dim %[[VAL_2]], %[[VAL_5]] : tensor // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref -// CHECK: %[[VAL_13:.*]] = memref.dim %[[VAL_0]], %[[VAL_5]] : tensor -// CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_0]], %[[VAL_6]] : tensor +// CHECK: %[[VAL_13:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor +// CHECK: %[[VAL_14:.*]] = tensor.dim %[[VAL_0]], %[[VAL_6]] : tensor // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_0]] : memref // CHECK: %[[VAL_16:.*]] = memref.alloc(%[[VAL_13]], %[[VAL_14]]) : memref // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref, memref @@ -1256,10 +1256,10 @@ // CHECK: %[[VAL_3:.*]] = constant 2 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = memref.dim %[[VAL_0]], %[[VAL_5]] : tensor -// CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_0]], %[[VAL_3]] : tensor +// CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor +// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_0]] : memref -// CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : tensor> +// CHECK: %[[VAL_9:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor> // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref diff --git a/mlir/test/Dialect/Standard/bufferize.mlir b/mlir/test/Dialect/Standard/bufferize.mlir --- a/mlir/test/Dialect/Standard/bufferize.mlir +++ b/mlir/test/Dialect/Standard/bufferize.mlir @@ -7,7 +7,7 @@ // CHECK: %[[EXTENT:.*]] = memref.dim %[[MEMREF]], %[[INDEX]] : memref // CHECK: return %[[EXTENT]] : index func @dim(%arg0: tensor, %arg1: index) -> index { - %0 = memref.dim %arg0, %arg1 : tensor + %0 = tensor.dim %arg0, %arg1 : tensor return %0 : index } diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir --- a/mlir/test/Dialect/Tensor/bufferize.mlir +++ b/mlir/test/Dialect/Tensor/bufferize.mlir @@ -67,7 +67,7 @@ // CHECK: %[[C0:.*]] = constant 0 : index // CHECK: %[[C1:.*]] = constant 1 : index // CHECK: scf.parallel (%[[I:.*]]) = (%[[C0]]) to (%[[DYNAMIC_EXTENT]]) step (%[[C1]]) { -// CHECK: %[[ELEM:.*]] = memref.dim %[[ARG]], %[[I]] : tensor<*xf32> +// CHECK: %[[ELEM:.*]] = tensor.dim %[[ARG]], %[[I]] : tensor<*xf32> // CHECK: store %[[ELEM]], %[[MEMREF]][%[[I]]] : memref // CHECK: scf.yield // CHECK: } @@ -77,7 +77,7 @@ func @tensor.generate(%arg: tensor<*xf32>, %dynamic_extent: index) -> tensor { %result = tensor.generate %dynamic_extent { ^bb0(%i : index): - %elem = memref.dim %arg, %i : tensor<*xf32> + %elem = tensor.dim %arg, %i : tensor<*xf32> tensor.yield %elem : index } : tensor return %result : tensor diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir --- a/mlir/test/Dialect/Tensor/canonicalize.mlir +++ b/mlir/test/Dialect/Tensor/canonicalize.mlir @@ -184,10 +184,10 @@ // CHECK-SAME: %[[IDX:.*]]: index, %[[TENSOR:.*]]: tensor<*xf32> func @extract_from_tensor.generate(%idx: index, %tensor: tensor<*xf32>) -> index { %size = rank %tensor : tensor<*xf32> - // CHECK-NEXT: %[[RES:.*]] = memref.dim %[[TENSOR]], %[[IDX]] + // CHECK-NEXT: %[[RES:.*]] = tensor.dim %[[TENSOR]], %[[IDX]] %0 = tensor.generate %size { ^bb0(%arg0: index): - %1 = memref.dim %tensor, %arg0 : tensor<*xf32> + %1 = tensor.dim %tensor, %arg0 : tensor<*xf32> tensor.yield %1 : index } : tensor %1 = tensor.extract %0[%idx] : tensor @@ -201,13 +201,13 @@ // CHECK-SAME: %[[IDX0:.*]]: index, %[[IDX1:.*]]: index, %[[TENSOR:.*]]: tensor<*xf32> func @extract_from_tensor.generate_2d(%idx0: index, %idx1: index, %tensor: tensor<*xf32>) -> index { %size = rank %tensor : tensor<*xf32> - // CHECK-NEXT: %[[DIM0:.*]] = memref.dim %[[TENSOR]], %[[IDX0]] - // CHECK-NEXT: %[[DIM1:.*]] = memref.dim %[[TENSOR]], %[[IDX1]] + // CHECK-NEXT: %[[DIM0:.*]] = tensor.dim %[[TENSOR]], %[[IDX0]] + // CHECK-NEXT: %[[DIM1:.*]] = tensor.dim %[[TENSOR]], %[[IDX1]] // CHECK-NEXT: %[[RES:.*]] = addi %[[DIM0]], %[[DIM1]] %0 = tensor.generate %size, %size { ^bb0(%arg0: index, %arg1: index): - %1 = memref.dim %tensor, %arg0 : tensor<*xf32> - %2 = memref.dim %tensor, %arg1 : tensor<*xf32> + %1 = tensor.dim %tensor, %arg0 : tensor<*xf32> + %2 = tensor.dim %tensor, %arg1 : tensor<*xf32> %3 = addi %1, %2 : index tensor.yield %3 : index } : tensor @@ -225,7 +225,7 @@ // CHECK: %[[DTENSOR:.*]] = tensor.generate %0 = tensor.generate %size { ^bb0(%arg0: index): - %1 = memref.dim %tensor, %arg0 : tensor<*xf32> + %1 = tensor.dim %tensor, %arg0 : tensor<*xf32> memref.store %1, %mem[%arg0] : memref tensor.yield %1 : index } : tensor @@ -443,7 +443,7 @@ %c1 = constant 1 : index %c2 = constant 2 : index %c8 = constant 8 : index - %0 = memref.dim %arg0, %c1 : tensor<2x?xi32> + %0 = tensor.dim %arg0, %c1 : tensor<2x?xi32> %1 = tensor.extract %arg1[] : tensor %2 = tensor.generate %arg2, %c8 { ^bb0(%arg4: index, %arg5: index): @@ -482,3 +482,38 @@ // CHECK: %[[GENERATE:.+]] = tensor.generate // CHECK: %[[RESULT:.+]] = tensor.insert_slice %[[ARG0]] into %[[GENERATE]] // CHECK: return %[[RESULT]] + +// ----- + +// Test case: Folding of tensor.dim(tensor.generate %idx) -> %idx +// CHECK-LABEL: func @dim_of_tensor.generate( +// CHECK-SAME: %[[IDX0:[0-9a-z]+]]: index, %[[IDX1:[0-9a-z]+]]: index +// CHECK-NOT: tensor.dim +// CHECK: return %[[IDX1]] : index +func @dim_of_tensor.generate(%arg0: index, %arg1: index) -> index { + %c3 = constant 3 : index + %0 = tensor.generate %arg0, %arg1 { + ^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: index): + tensor.yield %c3 : index + } : tensor<2x?x4x?x5xindex> + %1 = tensor.dim %0, %c3 : tensor<2x?x4x?x5xindex> + return %1 : index +} + +// ----- + +// Test case: Folding tensor.dim(tensor.cast %0, %idx) -> tensor.dim %0, %idx +// CHECK-LABEL: func @fold_dim_of_tensor.cast +// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: tensor<4x?xf32> +// CHECK-DAG: %[[C1:.+]] = constant 1 : index +// CHECK-DAG: %[[C4:.+]] = constant 4 : index +// CHECK: %[[T0:.+]] = tensor.dim %[[ARG0]], %[[C1]] +// CHECK-NEXT: return %[[C4]], %[[T0]] +func @fold_dim_of_tensor.cast(%arg0 : tensor<4x?xf32>) -> (index, index) { + %c0 = constant 0 : index + %c1 = constant 1 : index + %0 = tensor.cast %arg0 : tensor<4x?xf32> to tensor + %1 = tensor.dim %0, %c0 : tensor + %2 = tensor.dim %0, %c1 : tensor + return %1, %2: index, index +} diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -35,9 +35,9 @@ %t = "getTensor"() : () -> tensor<4x4x?xf32> // CHECK: %[[C2:.*]] = constant 2 : index - // CHECK-NEXT: %{{.*}} = memref.dim %[[T]], %[[C2]] : tensor<4x4x?xf32> + // CHECK-NEXT: %{{.*}} = tensor.dim %[[T]], %[[C2]] : tensor<4x4x?xf32> %c2 = constant 2 : index - %t2 = "memref.dim"(%t, %c2) : (tensor<4x4x?xf32>, index) -> index + %t2 = "tensor.dim"(%t, %c2) : (tensor<4x4x?xf32>, index) -> index // CHECK: %{{.*}} = addf %[[ARG]], %[[ARG]] : f32 %x = "std.addf"(%a, %a) : (f32,f32) -> (f32) @@ -50,9 +50,9 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) { ^bb42(%t: tensor<4x4x?xf32>, %f: f32, %i: i32, %idx : index, %j: i64, %half: f16): // CHECK: %[[C2:.*]] = constant 2 : index - // CHECK: %[[A2:.*]] = memref.dim %arg0, %[[C2]] : tensor<4x4x?xf32> + // CHECK: %[[A2:.*]] = tensor.dim %arg0, %[[C2]] : tensor<4x4x?xf32> %c2 = constant 2 : index - %a2 = memref.dim %t, %c2 : tensor<4x4x?xf32> + %a2 = tensor.dim %t, %c2 : tensor<4x4x?xf32> // CHECK: %[[F2:.*]] = addf %arg1, %arg1 : f32 %f2 = "std.addf"(%f, %f) : (f32,f32) -> f32 @@ -757,9 +757,9 @@ // CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32> func @test_dimop(%arg0: tensor<4x4x?xf32>) { // CHECK: %[[C2:.*]] = constant 2 : index - // CHECK: %{{.*}} = memref.dim %[[ARG]], %[[C2]] : tensor<4x4x?xf32> + // CHECK: %{{.*}} = tensor.dim %[[ARG]], %[[C2]] : tensor<4x4x?xf32> %c2 = constant 2 : index - %0 = memref.dim %arg0, %c2 : tensor<4x4x?xf32> + %0 = tensor.dim %arg0, %c2 : tensor<4x4x?xf32> // use dim as an index to ensure type correctness %1 = affine.apply affine_map<(d0) -> (d0)>(%0) return diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -2,7 +2,7 @@ func @dim(%arg : tensor<1x?xf32>) { %c2 = constant 2 : index - memref.dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'memref.dim' op index is out of range}} + tensor.dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'tensor.dim' op index is out of range}} return } diff --git a/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir b/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir --- a/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir +++ b/mlir/test/Interfaces/InferShapedTypeOpInterface/resolve-shaped-type-result-dims.mlir @@ -7,11 +7,11 @@ %c2 = constant 2 : index %0:2 = "test.op_with_result_shape_interface"(%arg0, %arg1) : (tensor<2x3x?xf32>, tensor) -> (tensor, tensor<2x3x?xf32>) - %1 = memref.dim %0#0, %c0 : tensor - %2 = memref.dim %0#0, %c1 : tensor - %3 = memref.dim %0#1, %c0 : tensor<2x3x?xf32> - %4 = memref.dim %0#1, %c1 : tensor<2x3x?xf32> - %5 = memref.dim %0#1, %c2 : tensor<2x3x?xf32> + %1 = tensor.dim %0#0, %c0 : tensor + %2 = tensor.dim %0#0, %c1 : tensor + %3 = tensor.dim %0#1, %c0 : tensor<2x3x?xf32> + %4 = tensor.dim %0#1, %c1 : tensor<2x3x?xf32> + %5 = tensor.dim %0#1, %c2 : tensor<2x3x?xf32> return %1, %2, %3, %4, %5 : index, index, index, index, index } // CHECK-LABEL: func @result_shape( @@ -21,10 +21,10 @@ // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C3:.+]] = constant 3 : index // CHECK-DAG: %[[C5:.+]] = constant 5 : index -// CHECK-DAG: %[[D0:.+]] = memref.dim %[[ARG_1]], %[[C0]] +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG_1]], %[[C0]] // CHECK-DAG: %[[S0:.+]] = tensor.from_elements %[[D0]], %[[C5]] // CHECK-DAG: %[[D0_OUT:.+]] = tensor.extract %[[S0]][%[[C0]]] -// CHECK-DAG: %[[D1:.+]] = memref.dim %[[ARG_0]], %[[C2]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG_0]], %[[C2]] // CHECK-DAG: %[[S1:.+]] = tensor.from_elements %[[C2]], %[[C3]], %[[D1]] // CHECK-DAG: %[[D1_OUT:.+]] = tensor.extract %[[S1]][%[[C2]]] // CHECK: return %[[D0_OUT]], %[[C5]], %[[C2]], %[[C3]], %[[D1_OUT]] @@ -38,11 +38,11 @@ %c2 = constant 2 : index %0:2 = "test.op_with_result_shape_per_dim_interface"(%arg0, %arg1) : (tensor<2x3x?xf32>, tensor) -> (tensor, tensor<2x3x?xf32>) - %1 = memref.dim %0#0, %c0 : tensor - %2 = memref.dim %0#0, %c1 : tensor - %3 = memref.dim %0#1, %c0 : tensor<2x3x?xf32> - %4 = memref.dim %0#1, %c1 : tensor<2x3x?xf32> - %5 = memref.dim %0#1, %c2 : tensor<2x3x?xf32> + %1 = tensor.dim %0#0, %c0 : tensor + %2 = tensor.dim %0#0, %c1 : tensor + %3 = tensor.dim %0#1, %c0 : tensor<2x3x?xf32> + %4 = tensor.dim %0#1, %c1 : tensor<2x3x?xf32> + %5 = tensor.dim %0#1, %c2 : tensor<2x3x?xf32> return %1, %2, %3, %4, %5 : index, index, index, index, index } // CHECK-LABEL: func @result_shape_per_dim( @@ -52,8 +52,8 @@ // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C3:.+]] = constant 3 : index // CHECK-DAG: %[[C5:.+]] = constant 5 : index -// CHECK-DAG: %[[D0:.+]] = memref.dim %[[ARG_1]], %[[C0]] -// CHECK-DAG: %[[D1:.+]] = memref.dim %[[ARG_0]], %[[C2]] +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG_1]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG_0]], %[[C2]] // CHECK: return %[[D0]], %[[C5]], %[[C2]], %[[C3]], %[[D1]] // ----- @@ -65,11 +65,11 @@ %c2 = constant 2 : index %0:2 = "test.op_with_result_shape_and_per_dim_interface"(%arg0, %arg1) : (tensor<2x3x?xf32>, tensor) -> (tensor, tensor<2x3x?xf32>) - %1 = memref.dim %0#0, %c0 : tensor - %2 = memref.dim %0#0, %c1 : tensor - %3 = memref.dim %0#1, %c0 : tensor<2x3x?xf32> - %4 = memref.dim %0#1, %c1 : tensor<2x3x?xf32> - %5 = memref.dim %0#1, %c2 : tensor<2x3x?xf32> + %1 = tensor.dim %0#0, %c0 : tensor + %2 = tensor.dim %0#0, %c1 : tensor + %3 = tensor.dim %0#1, %c0 : tensor<2x3x?xf32> + %4 = tensor.dim %0#1, %c1 : tensor<2x3x?xf32> + %5 = tensor.dim %0#1, %c2 : tensor<2x3x?xf32> return %1, %2, %3, %4, %5 : index, index, index, index, index } // CHECK-LABEL: func @result_shape_and_per_dim( @@ -79,10 +79,10 @@ // CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK-DAG: %[[C3:.+]] = constant 3 : index // CHECK-DAG: %[[C5:.+]] = constant 5 : index -// CHECK-DAG: %[[D0:.+]] = memref.dim %[[ARG_1]], %[[C0]] +// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG_1]], %[[C0]] // CHECK-DAG: %[[S0:.+]] = tensor.from_elements %[[D0]], %[[C5]] // CHECK-DAG: %[[D0_OUT:.+]] = tensor.extract %[[S0]][%[[C0]]] -// CHECK-DAG: %[[D1:.+]] = memref.dim %[[ARG_0]], %[[C2]] +// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG_0]], %[[C2]] // CHECK-DAG: %[[S1:.+]] = tensor.from_elements %[[C2]], %[[C3]], %[[D1]] // CHECK-DAG: %[[D1_OUT:.+]] = tensor.extract %[[S1]][%[[C2]]] // CHECK: return %[[D0_OUT]], %[[C5]], %[[C2]], %[[C3]], %[[D1_OUT]] diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -29,7 +29,7 @@ // CHECK: %c4 = constant 4 : index %c1 = constant 1 : index - %0 = memref.dim %arg0, %c1 : tensor<8x4xf32> + %0 = tensor.dim %arg0, %c1 : tensor<8x4xf32> // CHECK-NEXT: return %c4 return %0 : index @@ -53,7 +53,7 @@ // CHECK-LABEL: func @trivial_dce func @trivial_dce(%arg0: tensor<8x4xf32>) { %c1 = constant 1 : index - %0 = memref.dim %arg0, %c1 : tensor<8x4xf32> + %0 = tensor.dim %arg0, %c1 : tensor<8x4xf32> // CHECK-NEXT: return return } diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir --- a/mlir/test/Transforms/constant-fold.mlir +++ b/mlir/test/Transforms/constant-fold.mlir @@ -548,7 +548,7 @@ // CHECK:[[C4:%.+]] = constant 4 : index %c1 = constant 1 : index - %0 = memref.dim %x, %c1 : tensor<8x4xf32> + %0 = tensor.dim %x, %c1 : tensor<8x4xf32> // CHECK-NEXT: return [[C4]] return %0 : index diff --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir --- a/mlir/test/Transforms/pipeline-data-transfer.mlir +++ b/mlir/test/Transforms/pipeline-data-transfer.mlir @@ -320,18 +320,14 @@ // ----- // CHECK-LABEL: func @dynamic_shape_dma_buffer -func @dynamic_shape_dma_buffer(%arg0: memref<512 x 32 x f32>) { - %c32 = constant 32 : index +func @dynamic_shape_dma_buffer(%arg0: memref<512 x 32 x f32>, %Av: memref) { %num_elt = constant 512 : index %zero = constant 0 : index - - %Av = memref.alloc(%c32, %c32) : memref %tag = memref.alloc() : memref<1 x i32> // Double buffering for dynamic shaped buffer. -// CHECK: memref.alloc(%{{.*}}, %{{.*}}) : memref -// CHECK-NEXT: %[[C0:.*]] = constant 0 : index -// CHECK-NEXT: memref.dim %{{.*}}, %[[C0]] : memref +// Note: Cannot capture C0 because there are multiple C0 constants in the IR. +// CHECK: memref.dim %{{.*}}, %{{.*}} : memref // CHECK-NEXT: %[[C1:.*]] = constant 1 : index // CHECK-NEXT: memref.dim %{{.*}}, %[[C1]] : memref // CHECK-NEXT: memref.alloc(%{{.*}}, %{{.*}}) : memref<2x?x?xf32, 2> @@ -342,7 +338,6 @@ memref, memref<1 x i32> affine.dma_wait %tag[%zero], %num_elt : memref<1 x i32> } - memref.dealloc %Av : memref return // CHECK-NEXT: affine.for %{{.*}} = 1 to 16 { // CHECK: affine.dma_start %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}} mod 2, 0, 0], %{{.*}}[%{{.*}} mod 2, 0], %{{.*}} diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -11,7 +11,6 @@ #include "TestInterfaces.h" #include "TestTypes.h" #include "mlir/Dialect/DLTI/DLTI.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/BuiltinOps.h" @@ -799,7 +798,7 @@ OpBuilder &builder, ValueRange operands, llvm::SmallVectorImpl &shapes) { shapes = SmallVector{ - builder.createOrFold(getLoc(), operands.front(), 0)}; + builder.createOrFold(getLoc(), operands.front(), 0)}; return success(); } @@ -813,7 +812,7 @@ llvm::seq( 0, operand.getType().cast().getRank()), [&](int64_t dim) -> Value { - return builder.createOrFold(loc, operand, dim); + return builder.createOrFold(loc, operand, dim); })); shapes.push_back(builder.create( getLoc(), builder.getIndexType(), currShape)); @@ -832,7 +831,7 @@ llvm::seq( 0, operand.getType().cast().getRank()), [&](int64_t dim) -> Value { - return builder.createOrFold(loc, operand, dim); + return builder.createOrFold(loc, operand, dim); })); shapes.emplace_back(std::move(currShape)); } @@ -849,7 +848,7 @@ llvm::seq( 0, operand.getType().cast().getRank()), [&](int64_t dim) -> Value { - return builder.createOrFold(loc, operand, dim); + return builder.createOrFold(loc, operand, dim); })); shapes.push_back(builder.create( getLoc(), builder.getIndexType(), currShape)); @@ -868,7 +867,7 @@ llvm::seq( 0, operand.getType().cast().getRank()), [&](int64_t dim) -> Value { - return builder.createOrFold(loc, operand, dim); + return builder.createOrFold(loc, operand, dim); })); shapes.emplace_back(std::move(currShape)); } diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp --- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp +++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp @@ -7,9 +7,9 @@ //===----------------------------------------------------------------------===// #include "TestDialect.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/Matchers.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/DialectConversion.h" @@ -159,7 +159,7 @@ struct TestReturnTypeDriver : public PassWrapper { void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); + registry.insert(); } StringRef getArgument() const final { return "test-return-type"; } StringRef getDescription() const final { return "Run return type functions"; }