diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.td @@ -315,55 +315,54 @@ // Builders for a contracting reshape whose result type is computed from // `src` and `reassociation`. OpBuilder<(ins "Value":$src, - "ArrayRef":$reassociation, + "ArrayRef":$reassociation, CArg<"ArrayRef", "{}">:$attrs)>, OpBuilder<(ins "Value":$src, - "ArrayRef":$reassociation, + "ArrayRef":$reassociation, CArg<"ArrayRef", "{}">:$attrs), [{ auto reassociationMaps = - convertReassociationIndicesToMaps($_builder, reassociation); + convertReassociationMapsToIndices($_builder, reassociation); build($_builder, $_state, src, reassociationMaps, attrs); }]>, // Builders for a reshape whose result type is passed explicitly. This may // be either a contracting or expanding reshape. OpBuilder<(ins "Type":$resultType, "Value":$src, - "ArrayRef":$reassociation, + "ArrayRef":$reassociation, CArg<"ArrayRef", "{}">:$attrs)>, OpBuilder<(ins "Type":$resultType, "Value":$src, - "ArrayRef":$reassociation, + "ArrayRef":$reassociation, CArg<"ArrayRef", "{}">:$attrs), [{ auto reassociationMaps = - convertReassociationIndicesToMaps($_builder, reassociation); + convertReassociationMapsToIndices($_builder, reassociation); build($_builder, $_state, resultType, src, reassociationMaps, attrs); }]> ]; code commonExtraClassDeclaration = [{ static StringRef getReassociationAttrName() { return "reassociation"; } - SmallVector getReassociationMaps() { - return llvm::to_vector<4>(llvm::map_range(reassociation(), [ - ](Attribute a) { return a.cast().getValue(); })); - } - SmallVector getReassociationExprs() { - return - llvm::to_vector<4>(llvm::map_range(reassociation(), - [](Attribute a) { - return llvm::to_vector<2>( - a.cast().getValue().getResults()); - })); - } - }]; - let assemblyFormat = [{ - $src $reassociation attr-dict `:` type($src) `into` type(results) + SmallVector getReassociationMaps(); + SmallVector getReassociationExprs(); + SmallVector getReassociationIndices() { + SmallVector reassociationIndices; + for (auto attr : reassociation()) + reassociationIndices.push_back(llvm::to_vector<2>( + llvm::map_range(attr.cast(), [&](Attribute indexAttr) { + return indexAttr.cast().getInt(); + }))); + return reassociationIndices; + }; }]; } +def IndexListArrayAttr : + TypedArrayAttrBase; + def Linalg_ReshapeOp : Linalg_ReshapeLikeOp<"reshape", [DeclareOpInterfaceMethods]>, - Arguments<(ins AnyStridedMemRef:$src, AffineMapArrayAttr:$reassociation)>, + Arguments<(ins AnyStridedMemRef:$src, IndexListArrayAttr:$reassociation)>, Results<(outs AnyStridedMemRef:$result)> { let summary = "linalg.reshape produces a new view into the operand view"; let description = [{ @@ -373,9 +372,7 @@ and copies. A reassociation is defined as a continuous grouping of dimensions and is - represented with an affine map array attribute. In the future, - non-continuous groupings may be allowed (i.e. permutations, reindexings - etc). + represented with an array of I64ArrayAttr attribute. For now, it is assumed that either: 1. a reassociation produces and consumes contiguous MemRefType or, @@ -401,13 +398,13 @@ ```mlir // Dimension collapse (i, j) -> i' and k -> k' - %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] : + %1 = linalg.reshape %0 [[0, 1], [2]] : memref into memref ``` ```mlir // Dimension expansion i -> (i', j') and (k) -> (k') - %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] : + %1 = linalg.reshape %0 [[0, 1], [2]] : memref into memref ``` }]; @@ -417,6 +414,8 @@ }]; let hasFolder = 1; let hasCanonicalizer = 1; + let printer = [{ return ::print(p, *this); }]; + let parser = [{ return ::parseReshapeLikeOp(parser, result); }]; } def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp< @@ -424,7 +423,7 @@ [DeclareOpInterfaceMethods]>, Arguments<(ins AnyTensor:$src, - AffineMapArrayAttr:$reassociation)>, + IndexListArrayAttr:$reassociation)>, Results<(outs AnyTensor:$result)> { let summary = "linalg.tensor_reshape produces a new reshaped tensor."; let description = [{ @@ -432,9 +431,7 @@ reassociation of the original `src`. A reassociation is defined as a continuous grouping of dimensions and is - represented with an affine map array attribute. In the future, - non-continuous groupings may be allowed (i.e. permutations, reindexings - etc). + represented with an array of I64ArrayAttr attribute. A reshape may either collapse or expand dimensions, depending on the relationship between source and target tensor ranks. The verification rule @@ -453,14 +450,14 @@ ```mlir // Dimension collapse (i, j) -> i' and k -> k' - %b = linalg.tensor_reshape %a [(i, j, k) -> (i, j), (i, j, k) -> (k)] : - tensor into tensor + %b = linalg.tensor_reshape %a [[0, 1], [2]] + : tensor into tensor ``` ```mlir // Dimension expansion i -> (i', j') and (k) -> (k') - %b = linalg.tensor_reshape %a [(i, j, k) -> (i, j), (i, j, k) -> (k)] : - tensor into tensor + %b = linalg.tensor_reshape %a [[0, 1], [2]] + : tensor into tensor ``` }]; let extraClassDeclaration = commonExtraClassDeclaration # [{ @@ -473,6 +470,8 @@ }]; let hasFolder = 1; let hasCanonicalizer = 1; + let printer = [{ return ::print(p, *this); }]; + let parser = [{ return ::parseReshapeLikeOp(parser, result); }]; } def Linalg_YieldOp : Linalg_Op<"yield", [NoSideEffect, ReturnLike, Terminator]>, diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -1160,6 +1160,89 @@ return reassociationMap; } +template +static void print(OpAsmPrinter &p, ReshapeLikeOp op) { + p << op.getOperationName() << ' ' << op.src() << " ["; + + llvm::interleaveComma(op.reassociation(), p, [&](const Attribute &attr) { + p << '['; + auto arrayAttr = attr.template cast(); + llvm::interleaveComma(arrayAttr, p, [&](const Attribute &attr) { + p << attr.cast().getInt(); + }); + p << ']'; + }); + + p << "] "; + p.printOptionalAttrDict(op->getAttrs(), + /*elidedAttrs=*/{op.getReassociationAttrName()}); + p << ": " << op.src().getType() << " into " << op.getType(); +} + +static void print(OpAsmPrinter &p, linalg::ReshapeOp op) { + print(p, op); +} + +static void print(OpAsmPrinter &p, linalg::TensorReshapeOp op) { + print(p, op); +} + +static ParseResult parseReshapeLikeOp(OpAsmParser &parser, + OperationState &result) { + // Parse the operand. + OpAsmParser::OperandType src; + if (parser.parseOperand(src)) + return failure(); + + // Parse reassociation indices. + Builder &b = parser.getBuilder(); + SmallVector reassociation; + if (parser.parseLSquare()) + return failure(); + + while (true) { + if (succeeded(parser.parseOptionalRSquare())) + break; + if (parser.parseLSquare()) + return failure(); + SmallVector indices; + while (true) { + int64_t index; + if (parser.parseInteger(index)) + return failure(); + indices.push_back(index); + + if (succeeded(parser.parseOptionalComma())) + continue; + if (failed(parser.parseRSquare())) + return failure(); + break; + } + reassociation.push_back(b.getI64ArrayAttr(indices)); + if (succeeded(parser.parseOptionalComma())) + continue; + if (failed(parser.parseRSquare())) + return failure(); + break; + } + + result.addAttribute(ReshapeOp::getReassociationAttrName(), + b.getArrayAttr(reassociation)); + + // Parse optional attributes. + parser.parseOptionalAttrDict(result.attributes); + + // Parse types. + Type srcType; + Type resultType; + if (parser.parseColon() || parser.parseType(srcType) || + parser.resolveOperand(src, srcType, result.operands) || + parser.parseKeyword("into") || parser.parseType(resultType)) + return failure(); + result.addTypes(resultType); + return success(); +} + /// Collapse reassociation maps that are used in pair of reshape ops where one /// is a producer and other is the consumer. Only valid to use this method when /// both the producer and consumer are collapsing dimensions or both are @@ -1195,18 +1278,16 @@ return llvm::None; unsigned currDim = 0; - ReassociationIndices reassociations; SmallVector reassociationMaps; for (AffineMap rhs : mapsConsumer) { + ReassociationIndices reassociations; for (AffineExpr rhsExpr : rhs.getResults()) { AffineDimExpr dimExpr = rhsExpr.cast(); for (int i = 0, e = mapsProducer[dimExpr.getPosition()].getNumResults(); - i < e; ++i) { + i < e; ++i) reassociations.push_back(currDim++); - } } - reassociationMaps.emplace_back(ReassociationIndices{}); - std::swap(reassociationMaps.back(), reassociations); + reassociationMaps.push_back(std::move(reassociations)); } return reassociationMaps; } @@ -1401,14 +1482,6 @@ MemRefType::Builder(type).setShape(newSizes).setAffineMaps({layout})); } -/// Helper functions assert Attribute of the proper type in attr and returns the -/// corresponding vector. -/// TODO: this should be evolved into a generic -/// `getRangeOfType(ArrayAttr attrs)` that does not copy. -static SmallVector getAffineMaps(ArrayAttr attrs) { - return llvm::to_vector<8>(llvm::map_range( - attrs, [](Attribute a) { return a.cast().getValue(); })); -} template unsigned getMaxPosOfType(ArrayRef exprArrays) { @@ -1438,8 +1511,21 @@ return maps; } +static SmallVector convertReassociationMapsToIndices( + OpBuilder &b, ArrayRef reassociationExprs) { + SmallVector reassociationIndices; + for (const auto &exprs : reassociationExprs) { + ReassociationIndices indices; + indices.reserve(exprs.size()); + for (const auto &expr : exprs) + indices.push_back(expr.cast().getPosition()); + reassociationIndices.push_back(indices); + } + return reassociationIndices; +} + static SmallVector, 2> -convertReassociationIndicesToMaps( +convertReassociationIndicesToExprs( OpBuilder &b, ArrayRef reassociationIndices) { SmallVector, 2> reassociationMaps; for (const auto &indices : reassociationIndices) { @@ -1452,6 +1538,20 @@ return reassociationMaps; } +SmallVector ReshapeOp::getReassociationMaps() { + return getSymbolLessAffineMaps(getReassociationExprs()); +} +SmallVector ReshapeOp::getReassociationExprs() { + OpBuilder b(this->getContext()); + return convertReassociationIndicesToExprs(b, getReassociationIndices()); +} +SmallVector TensorReshapeOp::getReassociationMaps() { + return getSymbolLessAffineMaps(getReassociationExprs()); +} +SmallVector TensorReshapeOp::getReassociationExprs() { + OpBuilder b(this->getContext()); + return convertReassociationIndicesToExprs(b, getReassociationIndices()); +} /// For reshape op compute the shape at dimension `dimIndex` of the output in /// terms of shape of the `src`, when the reshape op is a collapsing /// operation. It is the product of the shape of the collapsed dimensions of the @@ -1571,26 +1671,37 @@ builder, loc, src, dstStaticShape, reassocation); } -void mlir::linalg::ReshapeOp::build(OpBuilder &b, OperationState &result, - Value src, - ArrayRef reassociation, - ArrayRef attrs) { - auto maps = getSymbolLessAffineMaps(reassociation); +static ArrayAttr +getReassociationIndicesAttribute(OpBuilder &b, + ArrayRef reassociation) { + SmallVector reassociationAttr = + llvm::to_vector<4>(llvm::map_range( + reassociation, [&](ReassociationIndices indices) -> Attribute { + return b.getI64ArrayAttr(indices).cast(); + })); + return b.getArrayAttr(reassociationAttr); +} + +void mlir::linalg::ReshapeOp::build( + OpBuilder &b, OperationState &result, Value src, + ArrayRef reassociation, + ArrayRef attrs) { auto memRefType = src.getType().cast(); - auto resultType = computeReshapeCollapsedType(memRefType, maps); + auto resultType = computeReshapeCollapsedType( + memRefType, getSymbolLessAffineMaps( + convertReassociationIndicesToExprs(b, reassociation))); build(b, result, resultType, src, attrs); result.addAttribute(ReshapeOp::getReassociationAttrName(), - b.getAffineMapArrayAttr(maps)); + getReassociationIndicesAttribute(b, reassociation)); } -void mlir::linalg::ReshapeOp::build(OpBuilder &b, OperationState &result, - Type resultType, Value src, - ArrayRef reassociation, - ArrayRef attrs) { - auto maps = getSymbolLessAffineMaps(reassociation); +void mlir::linalg::ReshapeOp::build( + OpBuilder &b, OperationState &result, Type resultType, Value src, + ArrayRef reassociation, + ArrayRef attrs) { build(b, result, resultType, src, attrs); result.addAttribute(ReshapeOp::getReassociationAttrName(), - b.getAffineMapArrayAttr(maps)); + getReassociationIndicesAttribute(b, reassociation)); } Value mlir::linalg::ReshapeOp::getViewSource() { return src(); } @@ -1670,16 +1781,15 @@ // sizes 1. if (llvm::any_of(expandedType.getShape(), [](int64_t dim) -> bool { return dim != 1; })) - return op.emitOpError( - "invalid to reshape tensor/memref with non-unit extent dimensions to " - "zero-rank tensor/memref"); + return op.emitOpError("invalid to reshape tensor/memref with non-unit " + "extent dimensions to zero-rank tensor/memref"); return success(); } if (collapsedRank != op.reassociation().size()) return op.emitOpError("expected rank of the collapsed type(") << collapsedRank << ") to be the number of reassociation maps(" << op.reassociation().size() << ")"; - auto maps = getAffineMaps(op.reassociation()); + auto maps = op.getReassociationMaps(); for (auto it : llvm::enumerate(maps)) if (it.value().getNumDims() != expandedRank) return op.emitOpError("expected reassociation map #") @@ -1696,7 +1806,7 @@ MemRefType expandedType, collapsedType; if (failed(verifyReshapeLikeTypes(op, expandedType, collapsedType))) return failure(); - auto maps = getAffineMaps(op.reassociation()); + auto maps = op.getReassociationMaps(); MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps); if (collapsedType != expectedType) return op.emitOpError("expected collapsed type to be ") @@ -1743,31 +1853,32 @@ void mlir::linalg::TensorReshapeOp::build( OpBuilder &b, OperationState &result, Value src, - ArrayRef reassociation, + ArrayRef reassociation, ArrayRef attrs) { - auto maps = getSymbolLessAffineMaps(reassociation); auto resultType = computeTensorReshapeCollapsedType( - src.getType().cast(), maps); + src.getType().cast(), + getSymbolLessAffineMaps( + convertReassociationIndicesToExprs(b, reassociation))); build(b, result, resultType, src, attrs); - result.addAttribute(TensorReshapeOp::getReassociationAttrName(), - b.getAffineMapArrayAttr(maps)); + result.addAttribute(ReshapeOp::getReassociationAttrName(), + getReassociationIndicesAttribute(b, reassociation)); } void mlir::linalg::TensorReshapeOp::build( OpBuilder &b, OperationState &result, Type resultType, Value src, - ArrayRef reassociation, + ArrayRef reassociation, ArrayRef attrs) { - auto maps = getSymbolLessAffineMaps(reassociation); build(b, result, resultType, src, attrs); - result.addAttribute(TensorReshapeOp::getReassociationAttrName(), - b.getAffineMapArrayAttr(maps)); + result.addAttribute(ReshapeOp::getReassociationAttrName(), + getReassociationIndicesAttribute(b, reassociation)); } static LogicalResult verify(TensorReshapeOp op) { RankedTensorType expandedType, collapsedType; if (failed(verifyReshapeLikeTypes(op, expandedType, collapsedType))) return failure(); - auto maps = getAffineMaps(op.reassociation()); + + auto maps = op.getReassociationMaps(); RankedTensorType expectedType = computeTensorReshapeCollapsedType(expandedType, maps); if (collapsedType != expectedType) @@ -2397,8 +2508,8 @@ if (oType.getRank() != iType.getRank() || oType.getRank() != fType.getRank()) return op.emitOpError("expects memref ranks to match"); if (auto strides = op.strides()) { - if (failed( - verifyStrideOrDilation(op, strides->getValue(), /*isStride=*/true))) + if (failed(verifyStrideOrDilation(op, strides->getValue(), + /*isStride=*/true))) return failure(); } if (auto dilations = op.dilations()) { @@ -2422,8 +2533,8 @@ return op.emitOpError("expects memref ranks to match"); if (auto strides = op.strides()) { - if (failed( - verifyStrideOrDilation(op, strides->getValue(), /*isStride=*/true))) + if (failed(verifyStrideOrDilation(op, strides->getValue(), + /*isStride=*/true))) return failure(); } if (auto dilations = op.dilations()) { diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @@ -327,7 +327,7 @@ reassociations.push_back(getAffineDimExpr(dim, context)); } reassociationMaps.push_back(AffineMapAttr::get(AffineMap::get( - origRank, /*numSymbols = */ 0, reassociations, context))); + origRank, /*symbolCount = */ 0, reassociations, context))); reassociations.clear(); ++dim; } @@ -341,6 +341,15 @@ namespace { +SmallVector +convertAffineMapArrayToExprs(ArrayAttr affineMapArrayAttr) { + SmallVector reassociationExprs; + for (auto attr : affineMapArrayAttr) + reassociationExprs.push_back( + llvm::to_vector<4>(attr.cast().getValue().getResults())); + return reassociationExprs; +} + /// Pattern to replace tensors operands/results that are unit extents. template struct ReplaceUnitExtentTensors : public OpRewritePattern { @@ -387,7 +396,7 @@ else res.push_back(rewriter.create( loc, newInputOutputTypes[flattenedIdx], operand.value(), - reassociationMaps[flattenedIdx])); + convertAffineMapArrayToExprs(reassociationMaps[flattenedIdx]))); ++flattenedIdx; } return res; @@ -419,7 +428,8 @@ .template cast(); if (origResultType != result.value().getType()) resultReplacements.push_back(rewriter.create( - loc, origResultType, result.value(), reassociationMaps[index])); + loc, origResultType, result.value(), + convertAffineMapArrayToExprs(reassociationMaps[index]))); else resultReplacements.push_back(result.value()); } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -73,15 +73,15 @@ // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)> -// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)> +// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> +// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)> +// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)> // CHECK-LABEL: @test_multibroadcast func @test_multibroadcast(%arg0: tensor<1x3xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x3xf32> { // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3] : tensor<2x3xf32> - // CHECK: [[RESHAPE1:%.+]] = linalg.tensor_reshape %arg0 [#map0] - // CHECK: [[RESHAPE2:%.+]] = linalg.tensor_reshape %arg1 [#map0] + // CHECK: [[RESHAPE1:%.+]] = linalg.tensor_reshape %arg0 {{\[}}[0, 1]] + // CHECK: [[RESHAPE2:%.+]] = linalg.tensor_reshape %arg1 {{\[}}[0, 1]] // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins([[RESHAPE1]], [[RESHAPE2]] : tensor<3xf32>, tensor<2xf32>) outs([[INIT]] : tensor<2x3xf32>) { // CHECK: ^bb0(%arg2: f32, %arg3: f32, %arg4: f32): // CHECK: [[ELEMENT:%.+]] = addf %arg2, %arg3 : f32 @@ -418,10 +418,9 @@ // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-LABEL: @test_reshape_downrank func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> { - // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 [#[[$MAP0]]] + // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 {{\[}}[0, 1]] %0 = "tosa.reshape"(%arg0) {new_shape = [6]} : (tensor<2x3xf32>) -> tensor<6xf32> // CHECK: return [[RESHAPE]] return %0 : tensor<6xf32> @@ -429,10 +428,9 @@ // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-LABEL: @test_reshape_uprank func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> { - // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 [#[[$MAP0]]] + // CHECK: [[RESHAPE:%.+]] = linalg.tensor_reshape %arg0 {{\[}}[0, 1]] %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<6xf32>) -> tensor<2x3xf32> // CHECK: return [[RESHAPE]] return %0 : tensor<2x3xf32> @@ -440,25 +438,21 @@ // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-LABEL: @test_reshape_samerank func @test_reshape_samerank(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> { - // CHECK: [[RESHAPE1:%.+]] = linalg.tensor_reshape %arg0 [#[[$MAP0]]] - // CHECK: [[RESHAPE2:%.+]] = linalg.tensor_reshape [[RESHAPE1]] [#[[$MAP0]]] + // CHECK-SAME: (%[[ARG0:.*]]: tensor<3x2xf32>) + // CHECK-NEXT: %[[RESHAPE1:.*]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1]] + // CHECK-NEXT: %[[RESHAPE2:.*]] = linalg.tensor_reshape %[[RESHAPE1]] {{\[}}[0, 1]] %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<3x2xf32>) -> tensor<2x3xf32> - // CHECK: return [[RESHAPE2]] + // CHECK-NEXT: return %[[RESHAPE2]] return %0 : tensor<2x3xf32> } // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)> -// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3)> -// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d4, d5)> - // CHECK-LABEL: @test_reshape_downrank_6D func @test_reshape_downrank_6D(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> { - // CHECK: linalg.tensor_reshape %arg0 [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]] + // CHECK: linalg.tensor_reshape %arg0 {{\[}}[0, 1, 2], [3], [4, 5]] %0 = "tosa.reshape"(%arg0) {new_shape = [2, 3]} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> return %0 : tensor<6x5x77xf32> } @@ -496,9 +490,9 @@ // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)> -// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)> +// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> +// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1)> +// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0)> // CHECK-LABEL: @reduce_float // CHECK-SAME: [[ARG0:%.+]]: tensor<5x4xf32> @@ -510,7 +504,7 @@ // CHECK: ^bb0(%arg1: f32, %arg2: f32) // CHECK: [[RES:%.+]] = addf %arg1, %arg2 : f32 // CHECK: linalg.yield [[RES]] : f32 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<4xf32> into tensor<1x4xf32> + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xf32> into tensor<1x4xf32> %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> // CHECK: [[INIT:%.+]] = linalg.init_tensor [5] @@ -520,7 +514,7 @@ // CHECK: ^bb0(%arg1: f32, %arg2: f32) // CHECK: [[RES:%.+]] = addf %arg1, %arg2 : f32 // CHECK: linalg.yield [[RES]] : f32 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<5xf32> into tensor<5x1xf32> + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<5xf32> into tensor<5x1xf32> %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<5x4xf32>) -> tensor<5x1xf32> // CHECK: constant 1.0 @@ -561,7 +555,7 @@ // CHECK: ^bb0(%arg1: i32, %arg2: i32) // CHECK: [[RES:%.+]] = addi %arg1, %arg2 : i32 // CHECK: linalg.yield [[RES]] : i32 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<4xi32> into tensor<1x4xi32> + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xi32> into tensor<1x4xi32> %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> // CHECK: [[INIT:%.+]] = linalg.init_tensor [5] @@ -571,7 +565,7 @@ // CHECK: ^bb0(%arg1: i32, %arg2: i32) // CHECK: [[RES:%.+]] = addi %arg1, %arg2 : i32 // CHECK: linalg.yield [[RES]] : i32 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<5xi32> into tensor<5x1xi32> + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<5xi32> into tensor<5x1xi32> %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<5x4xi32>) -> tensor<5x1xi32> // CHECK: constant 1 @@ -611,7 +605,7 @@ // CHECK: ^bb0(%arg1: i1, %arg2: i1) // CHECK: [[RES:%.+]] = and %arg1, %arg2 : i1 // CHECK: linalg.yield [[RES]] : i1 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#map0] : tensor<4xi1> into tensor<1x4xi1> + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xi1> into tensor<1x4xi1> %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<5x4xi1>) -> tensor<1x4xi1> // CHECK: constant false @@ -775,31 +769,27 @@ // ----- -// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)> -// CHECK: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> -// CHECK: #[[$MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK: #[[$MAP3:.*]] = affine_map<(d0, d1, d2, d3) -> (d3)> -// CHECK: #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -// CHECK: #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)> +// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)> +// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK-LABEL: @tile func @tile(%arg0 : tensor<2x3xi8>) -> () { // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 2, 1, 3] // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>) // CHECK: linalg.yield %arg1 : i8 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#[[$MAP2]], #[[$MAP3]]] + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1, 2], [3]] %0 = "tosa.tile"(%arg0) {multiples = [2, 1]} : (tensor<2x3xi8>) -> (tensor<4x3xi8>) // CHECK: [[INIT:%.+]] = linalg.init_tensor [1, 2, 2, 3] // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<1x2x2x3xi8>) // CHECK: linalg.yield %arg1 : i8 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#[[$MAP4]], #[[$MAP5]]] + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1], [2, 3]] %1 = "tosa.tile"(%arg0) {multiples = [1, 2]} : (tensor<2x3xi8>) -> (tensor<2x6xi8>) // CHECK: [[INIT:%.+]] = linalg.init_tensor [5, 2, 7, 3] // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<2x3xi8>) outs([[INIT]] : tensor<5x2x7x3xi8>) // CHECK: linalg.yield %arg1 : i8 - // CHECK: linalg.tensor_reshape [[GENERIC]] [#[[$MAP4]], #[[$MAP5]]] + // CHECK: linalg.tensor_reshape [[GENERIC]] {{\[}}[0, 1], [2, 3]] %2 = "tosa.tile"(%arg0) {multiples = [5, 7]} : (tensor<2x3xi8>) -> (tensor<10x21xi8>) return @@ -1110,7 +1100,7 @@ // CHECK-DAG: %[[VAL7:.+]] = mulf %[[VAL5]], %[[STRIDEX]] // CHECK-DAG: %[[VAL8:.+]] = addf %[[VAL6]], %[[OFFSETY]] // CHECK-DAG: %[[VAL9:.+]] = addf %[[VAL7]], %[[OFFSETX]] - + // Find the remainder and integer component of the target index. // CHECK-DAG: %[[VAL10:.+]] = floorf %[[VAL8]] @@ -1167,8 +1157,8 @@ // CHECK: %[[VAL10:.+]] = floorf %[[VAL8:.+]] // CHECK: %[[VAL11:.+]] = floorf %[[VAL9:.+]] - // CHECK: %[[DY:.+]] = subf %[[VAL8:.+]], %[[VAL10]] - // CHECK: %[[DX:.+]] = subf %[[VAL9:.+]], %[[VAL11]] + // CHECK: %[[DY:.+]] = subf %[[VAL8:.+]], %[[VAL10]] + // CHECK: %[[DX:.+]] = subf %[[VAL9:.+]], %[[VAL11]] // CHECK: %[[Y0:.+]] = fptosi %[[VAL10]] // CHECK: %[[X0:.+]] = fptosi %[[VAL11]] @@ -1212,7 +1202,7 @@ // CHECK: %[[LOHI:.+]] = tensor.extract %arg0[%arg1, %[[YLOI]], %[[XHII]], %arg4] // CHECK: %[[HILO:.+]] = tensor.extract %arg0[%arg1, %[[YHII]], %[[XLOI]], %arg4] // CHECK: %[[HIHI:.+]] = tensor.extract %arg0[%arg1, %[[YHII]], %[[XHII]], %arg4] - + // Compute the bilinear interpolation. // CHECK: %[[ONE:.+]] = constant 1.000000e+00 @@ -1252,7 +1242,7 @@ // CHECK-DAG: %[[VAL5:.+]] = muli %[[X]], %[[STRIDEX]] // CHECK-DAG: %[[VAL6:.+]] = addi %[[VAL4]], %[[OFFSETY]] // CHECK-DAG: %[[VAL7:.+]] = addi %[[VAL5]], %[[OFFSETX]] - + // Find the remainder and integer component of the target index. @@ -1358,7 +1348,7 @@ // CHECK: %[[XLOHI:.+]] = sexti %[[LOHI]] // CHECK: %[[XHILO:.+]] = sexti %[[HILO]] // CHECK: %[[XHIHI:.+]] = sexti %[[HIHI]] - + // Compute the bilinear interpolation. // CHECK: %[[SCALE:.+]] = constant 256 diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -45,29 +45,22 @@ func @collapsing_tensor_reshapes(%arg0 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] : - tensor into tensor - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2], [3, 4]] + : tensor into tensor + %1 = linalg.tensor_reshape %0 [[0, 1], [2]] + : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> // CHECK-LABEL: collapsing_tensor_reshapes -// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]] // CHECK-NOT: linalg.tensor_reshape // ----- func @collapsing_tensor_reshapes_to_zero_dim(%arg0 : tensor<1x1x1xf32>) -> tensor { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : - tensor<1x1x1xf32> into tensor<1xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2]] + : tensor<1x1x1xf32> into tensor<1xf32> %1 = linalg.tensor_reshape %0 [] : tensor<1xf32> into tensor return %1 : tensor } @@ -79,8 +72,8 @@ func @collapsing_memref_reshapes_to_zero_dim(%arg0 : memref<1x1x1xf32>) -> memref { - %0 = linalg.reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : - memref<1x1x1xf32> into memref<1xf32> + %0 = linalg.reshape %arg0 [[0, 1, 2]] + : memref<1x1x1xf32> into memref<1xf32> %1 = linalg.reshape %0 [] : memref<1xf32> into memref return %1 : memref } @@ -92,63 +85,42 @@ func @expanding_tensor_reshapes(%arg0 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor into tensor - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] + : tensor into tensor + %1 = linalg.tensor_reshape %0 [[0, 1], [2], [3, 4]] + : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> // CHECK-LABEL: expanding_tensor_reshapes -// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]] // CHECK-NOT: linalg.tensor_reshape // ----- func @collapsing_memref_reshapes(%arg0 : memref) -> memref { - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] : - memref into memref - %1 = linalg.reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]] + : memref into memref + %1 = linalg.reshape %0 [[0, 1], [2]] + : memref into memref return %1 : memref } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> // CHECK-LABEL: collapsing_memref_reshapes -// CHECK: linalg.reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]] +// CHECK: linalg.reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]] // CHECK-NOT: linalg.reshape // ----- func @expanding_memref_reshapes(%arg0 : memref) -> memref { - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref into memref - %1 = linalg.reshape %0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0, 1], [2]] + : memref into memref + %1 = linalg.reshape %0 [[0, 1], [2], [3, 4]] + : memref into memref return %1 : memref } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> // CHECK-LABEL: expanding_memref_reshapes -// CHECK: linalg.reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]] +// CHECK: linalg.reshape %{{.*}} {{\[}}[0, 1, 2], [3, 4]] // CHECK-NOT: linalg.reshape // ----- @@ -156,8 +128,8 @@ func @expanding_tensor_reshapes_to_zero_dim(%arg0 : tensor) -> tensor<1x1x1xf32> { %0 = linalg.tensor_reshape %arg0 [] : tensor into tensor<1xf32> - %1 = linalg.tensor_reshape %0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : - tensor<1xf32> into tensor<1x1x1xf32> + %1 = linalg.tensor_reshape %0 [[0, 1, 2]] + : tensor<1xf32> into tensor<1x1x1xf32> return %1 : tensor<1x1x1xf32> } // CHECK-LABEL: expanding_tensor_reshapes_to_zero @@ -169,9 +141,8 @@ func @expanding_memref_reshapes_to_zero_dim(%arg0 : memref) -> memref<1x1x1xf32> { %0 = linalg.reshape %arg0 [] : memref into memref<1xf32> - %1 = linalg.reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : - memref<1xf32> into memref<1x1x1xf32> + %1 = linalg.reshape %0 [[0, 1, 2]] + : memref<1xf32> into memref<1x1x1xf32> return %1 : memref<1x1x1xf32> } // CHECK-LABEL: expanding_memref_reshapes_to_zero @@ -182,14 +153,10 @@ func @fold_tensor_reshape(%arg0 : tensor<12x4xf32>) -> tensor<12x4xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor<12x4xf32> into tensor<3x4x4xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor<3x4x4xf32> into tensor<12x4xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] + : tensor<12x4xf32> into tensor<3x4x4xf32> + %1 = linalg.tensor_reshape %0 [[0, 1], [2]] + : tensor<3x4x4xf32> into tensor<12x4xf32> return %1 : tensor<12x4xf32> } // CHECK-LABEL: @fold_tensor_reshape @@ -199,14 +166,10 @@ func @fold_tensor_reshape_dynamic(%arg0 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor into tensor - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] + : tensor into tensor + %1 = linalg.tensor_reshape %0 [[0, 1], [2]] + : tensor into tensor return %1 : tensor } // CHECK-LABEL: @fold_tensor_reshape_dynamic @@ -216,14 +179,10 @@ func @fold_memref_reshape(%arg0 : memref<12x4xf32>) -> memref<12x4xf32> { - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref<12x4xf32> into memref<3x4x4xf32> - %1 = linalg.reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref<3x4x4xf32> into memref<12x4xf32> + %0 = linalg.reshape %arg0 [[0, 1], [2]] + : memref<12x4xf32> into memref<3x4x4xf32> + %1 = linalg.reshape %0 [[0, 1], [2]] + : memref<3x4x4xf32> into memref<12x4xf32> return %1 : memref<12x4xf32> } // CHECK-LABEL: @fold_memref_reshape @@ -233,14 +192,10 @@ func @fold_memref_reshape_dynamic(%arg0 : memref) -> memref { - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref into memref - %1 = linalg.reshape %0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0, 1], [2]] + : memref into memref + %1 = linalg.reshape %0 [[0, 1], [2]] + : memref into memref return %1 : memref } // CHECK-LABEL: @fold_memref_reshape_dynamic @@ -250,223 +205,154 @@ func @reshape_collapse(%arg0 : tensor<2x3x4x5x6x7x8xf32>) -> tensor<24x5x42x8xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3, 4, 5, 6]] : tensor<2x3x4x5x6x7x8xf32> into tensor<40320xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3]] : tensor<40320xf32> into tensor<24x5x42x8xf32> return %1 : tensor<24x5x42x8xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d3)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d6)> // CHECK: func @reshape_collapse // CHECK-SAME: %[[ARG0:.+]]: tensor<2x3x4x5x6x7x8xf32> // CHECK: %[[RESULT:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]] +// CHECK-SAME: [0, 1, 2], [3], [4, 5], [6] // CHECK: return %[[RESULT]] // ----- func @reshape_expand(%arg0 : tensor<24x5x42x8xf32>) -> tensor<2x3x4x5x6x7x8xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3]] : tensor<24x5x42x8xf32> into tensor<40320xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3, 4, 5, 6]] : tensor<40320xf32> into tensor<2x3x4x5x6x7x8xf32> return %1 : tensor<2x3x4x5x6x7x8xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d3)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d6)> // CHECK: func @reshape_expand // CHECK-SAME: %[[ARG0:.+]]: tensor<24x5x42x8xf32> // CHECK: %[[RESULT:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]] +// CHECK-SAME: [0, 1, 2], [3], [4, 5], [6] // CHECK: return %[[RESULT]] // ----- func @expand_reshape_1D(%arg0 : tensor<2048xf32>) -> tensor<4x512xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3]] : tensor<2048xf32> into tensor<1x4x1x512xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3) -> (d3)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3]] : tensor<1x4x1x512xf32> into tensor<4x512xf32> return %1 : tensor<4x512xf32> } -// CHECK: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK: func @expand_reshape_1D -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]] // CHECK-SAME: tensor<2048xf32> into tensor<4x512xf32> // ----- func @fold_reshape_1D(%arg0 : tensor<4x512xf32>) -> tensor<2048xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3) -> (d3)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2], [3]] : tensor<4x512xf32> into tensor<1x4x1x512xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3]] : tensor<1x4x1x512xf32> into tensor<2048xf32> return %1 : tensor<2048xf32> } -// CHECK: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK: func @fold_reshape_1D -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]] // CHECK-SAME: tensor<4x512xf32> into tensor<2048xf32> // ----- func @fold_reshape_unit_dims(%arg0 : tensor<2048x1x1xf32>) -> tensor<4x512x1x1xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d4)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3], [4], [5]] : tensor<2048x1x1xf32> into tensor<1x4x1x512x1x1xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d4)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3], [4], [5]] : tensor<1x4x1x512x1x1xf32> into tensor<4x512x1x1xf32> return %1 : tensor<4x512x1x1xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK: func @fold_reshape_unit_dims -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2], [3]] // CHECK-SAME: tensor<2048x1x1xf32> into tensor<4x512x1x1xf32> // ----- func @expand_reshape_unit_dims(%arg0 : tensor<2048x1x2048xf32>) -> tensor<4x512x1x512x4xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d2, d3, d4)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d5)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7, d8)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2, 3, 4], [5], [6, 7, 8]] : tensor<2048x1x2048xf32> into tensor<1x4x1x512x1x1x512x1x4xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d3, d4)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d5)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d8)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3, 4], [5], [6, 7], [8]] : tensor<1x4x1x512x1x1x512x1x4xf32> into tensor<4x512x1x512x4xf32> return %1 : tensor<4x512x1x512x4xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> // CHECK: func @expand_reshape_unit_dims -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2], [3, 4]] // CHECK-SAME: tensor<2048x1x2048xf32> into tensor<4x512x1x512x4xf32> // ----- func @fold_reshape_trailing_unit_dims(%arg0: tensor<2xf32>) -> tensor<2x1xf32> { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : tensor<2xf32> into tensor<2x1x1xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)> - ] : tensor<2x1x1xf32> into tensor<2x1xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2]] + : tensor<2xf32> into tensor<2x1x1xf32> + %1 = linalg.tensor_reshape %0 [[0], [1, 2]] + : tensor<2x1x1xf32> into tensor<2x1xf32> return %1 : tensor<2x1xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK: func @fold_reshape_trailing_unit_dims -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]] // CHECK-SAME: tensor<2xf32> into tensor<2x1xf32> // ----- func @collapse_reshape_unit_dims_dynamic(%arg0 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d1, d2)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d3)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d4)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d5)>, - affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7, d8)>] + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3], [4], [5], [6, 7, 8]] : tensor into tensor - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>] + %1 = linalg.tensor_reshape %0 [[0], [1], [2, 3, 4], [5]] : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d1, d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d3, d4, d5)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8) -> (d6, d7, d8)> // CHECK: func @collapse_reshape_unit_dims_dynamic -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]], #[[MAP2]], #[[MAP3]]] +// CHECK: linalg.tensor_reshape +// CHECK-SAME: [0], [1, 2], [3, 4, 5], [6, 7, 8] // CHECK-SAME: tensor into tensor // ----- func @fold_reshape_trailing_unit_dims(%arg0: tensor<2xf32>) -> tensor<2x1xf32> { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] : tensor<2xf32> into tensor<2x1x1xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)> - ] : tensor<2x1x1xf32> into tensor<2x1xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2]] + : tensor<2xf32> into tensor<2x1x1xf32> + %1 = linalg.tensor_reshape %0 [[0], [1, 2]] + : tensor<2x1x1xf32> into tensor<2x1xf32> return %1 : tensor<2x1xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK: func @fold_reshape_trailing_unit_dims -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]] // CHECK-SAME: tensor<2xf32> into tensor<2x1xf32> // ----- func @fold_reshape_trailing_unit_dims_dynamic(%arg0: tensor<1x1x?x1x1x1xf32>) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d4)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2], [3], [4], [5]] : tensor<1x1x?x1x1x1xf32> into tensor - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2, 3]] : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)> // CHECK: func @fold_reshape_trailing_unit_dims_dynamic -// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1, 2, 3, 4, 5]] // CHECK-SAME: tensor<1x1x?x1x1x1xf32> into tensor // ----- func @no_fold_reshapes(%arg0 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3) -> (d0)>, - affine_map<(d0, d1, d2, d3) -> (d1)>, - affine_map<(d0, d1, d2, d3) -> (d2, d3)>] + %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3]] : tensor into tensor - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3) -> (d0)>, - affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)>] + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } @@ -478,15 +364,9 @@ func @no_fold_reshape_incompatible(%arg0 : tensor<4x6x8xf32>) -> tensor<2x6x16xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>, - affine_map<(d0, d1, d2, d3, d4) -> (d4)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2, 3], [4]] : tensor<4x6x8xf32> into tensor<2x2x3x2x8xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1, d2)>, - affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] + %1 = linalg.tensor_reshape %0 [[0], [1, 2], [3, 4]] : tensor<2x2x3x2x8xf32> into tensor<2x6x16xf32> return %1 : tensor<2x6x16xf32> } @@ -497,19 +377,18 @@ // ----- func @no_fold_reshape_empty_expr(%arg0: tensor<3x2x2xf32>) -> tensor<12x1xf32> { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2, d3) -> (d0)>, affine_map<(d0, d1, d2, d3) -> (d1)>, affine_map<(d0, d1, d2, d3) -> (d2, d3)>] : tensor<3x2x2xf32> into tensor<3x2x2x1xf32> - %1 = linalg.tensor_reshape %0 [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, affine_map<(d0, d1, d2, d3) -> (d3)>] : tensor<3x2x2x1xf32> into tensor<12x1xf32> + %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3]] + : tensor<3x2x2xf32> into tensor<3x2x2x1xf32> + %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3]] + : tensor<3x2x2x1xf32> into tensor<12x1xf32> return %1 : tensor<12x1xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK: func @no_fold_reshape_empty_expr // CHECK-SAME: %[[ARG0:.+]]: tensor<3x2x2xf32> -// CHECK: %[[RARG0:.+]] = linalg.tensor_reshape %[[ARG0:.+]] [#[[MAP0]], #[[MAP1]], #[[MAP2]] -// CHECK: %[[RES:.+]] = linalg.tensor_reshape %[[RARG0:.+]] [#[[MAP3]], #[[MAP4]]] +// CHECK: %[[RARG0:.+]] = linalg.tensor_reshape %[[ARG0]] +// CHECK-SAME: [0], [1], [2, 3] +// CHECK: %[[RES:.+]] = linalg.tensor_reshape %[[RARG0]] +// CHECK-SAME: [0, 1, 2], [3] // CHECK: return %[[RES:.+]] : tensor<12x1xf32> // ----- @@ -546,10 +425,8 @@ func @reshape_splat_constant_int32() -> tensor<2x4x2xi32> { %c0 = constant dense<42> : tensor<2x8xi32> - %0 = linalg.tensor_reshape %c0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] - : tensor<2x8xi32> into tensor<2x4x2xi32> + %0 = linalg.tensor_reshape %c0 [[0], [1, 2]] + : tensor<2x8xi32> into tensor<2x4x2xi32> return %0 : tensor<2x4x2xi32> } // CHECK-LABEL: @reshape_splat_constant_int32 @@ -560,10 +437,8 @@ func @reshape_splat_constant_int16() -> tensor<2x4x2xi16> { %c0 = constant dense<42> : tensor<2x8xi16> - %0 = linalg.tensor_reshape %c0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] - : tensor<2x8xi16> into tensor<2x4x2xi16> + %0 = linalg.tensor_reshape %c0 [[0], [1, 2]] + : tensor<2x8xi16> into tensor<2x4x2xi16> return %0 : tensor<2x4x2xi16> } // CHECK-LABEL: @reshape_splat_constant_int16 @@ -574,10 +449,8 @@ func @reshape_splat_constant_float32() -> tensor<2x4x2xf32> { %c0 = constant dense<42.0> : tensor<2x8xf32> - %0 = linalg.tensor_reshape %c0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] - : tensor<2x8xf32> into tensor<2x4x2xf32> + %0 = linalg.tensor_reshape %c0 [[0], [1, 2]] + : tensor<2x8xf32> into tensor<2x4x2xf32> return %0 : tensor<2x4x2xf32> } // CHECK-LABEL: @reshape_splat_constant_float32 @@ -588,10 +461,8 @@ func @reshape_splat_constant_float64() -> tensor<2x4x2xf64> { %c0 = constant dense<42.0> : tensor<2x8xf64> - %0 = linalg.tensor_reshape %c0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] - : tensor<2x8xf64> into tensor<2x4x2xf64> + %0 = linalg.tensor_reshape %c0 [[0], [1, 2]] + : tensor<2x8xf64> into tensor<2x4x2xf64> return %0 : tensor<2x4x2xf64> } // CHECK-LABEL: @reshape_splat_constant_float64 @@ -851,11 +722,8 @@ func @init_tensor_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> { %0 = linalg.init_tensor [6, 5, %arg0] : tensor<6x5x?xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] : - tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32> + %1 = linalg.tensor_reshape %0 [[0, 1], [2], [3, 4, 5]] + : tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32> return %1 : tensor<2x3x5x4x?x7xf32> } // CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 floordiv 28)> @@ -869,11 +737,8 @@ func @init_tensor_reshape_collapse(%arg0 : index) -> tensor<6x5x?xf32> { %0 = linalg.init_tensor [2, 3, 5, 4, %arg0, 7] : tensor<2x3x5x4x?x7xf32> - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] : - tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32> + %1 = linalg.tensor_reshape %0 [[0, 1], [2], [3, 4, 5]] + : tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32> return %1 : tensor<6x5x?xf32> } // CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 28)> @@ -1022,11 +887,8 @@ %c1 = constant 1 : index %c3 = constant 3 : index %c4 = constant 4 : index - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] : - tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2], [3, 4, 5]] + : tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32> %1 = memref.dim %0, %c1 : tensor<2x3x5x4x?x7xf32> %2 = memref.dim %0, %c3 : tensor<2x3x5x4x?x7xf32> %3 = memref.dim %0, %c4 : tensor<2x3x5x4x?x7xf32> @@ -1048,11 +910,8 @@ { %c1 = constant 1 : index %c2 = constant 2 : index - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] : - tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2], [3, 4, 5]] + : tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32> %1 = memref.dim %0, %c1 : tensor<6x5x?xf32> %2 = memref.dim %0, %c2 : tensor<6x5x?xf32> return %1, %2 : index, index @@ -1109,9 +968,8 @@ %init = linalg.init_tensor [1, 2, 3, 4] : tensor<1x2x3x4xf32> // CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %cst) : tensor<6x4xf32>, f32 -> tensor<6x4xf32> %fill = linalg.fill(%init, %zero) : tensor<1x2x3x4xf32>, f32 -> tensor<1x2x3x4xf32> - %reshape = linalg.tensor_reshape %fill [ - affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3) -> (d3)>] : tensor<1x2x3x4xf32> into tensor<6x4xf32> + %reshape = linalg.tensor_reshape %fill [[0, 1, 2], [3]] + : tensor<1x2x3x4xf32> into tensor<6x4xf32> // CHECK: return %[[FILL]] : tensor<6x4xf32> return %reshape : tensor<6x4xf32> } @@ -1125,9 +983,7 @@ // CHECK: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] %0 = linalg.fill(%arg0, %zero) : tensor, f32 -> tensor // CHECK: %[[RESULT:.+]] = linalg.fill(%[[RESHAPE]], %{{.+}}) - %1 = linalg.tensor_reshape %0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)>] + %1 = linalg.tensor_reshape %0 [[0, 1, 2], [3, 4]] : tensor into tensor // CHECK: return %[[RESULT]] return %1 : tensor diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir --- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir +++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir @@ -20,19 +20,14 @@ } -> tensor return %0 : tensor } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)> // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)> // CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-DAG: #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)> -// CHECK-DAG: #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)> -// CHECK-DAG: #[[$MAP6:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4)> // CHECK-LABEL: func @drop_one_trip_loops -// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2]] // CHECK: linalg.generic // CHECK-SAME: indexing_maps = [#[[$MAP2]], #[[$MAP3]]] // CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"] -// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP4]], #[[$MAP5]], #[[$MAP6]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1], [2, 3], [4]] // ----- @@ -146,7 +141,7 @@ } -> tensor<1x1xf32> return %0 : tensor<1x1xf32> } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> ()> +// CHECK: #[[$MAP0:.*]] = affine_map<() -> ()> // CHECK-LABEL: func @drop_all_loops // CHECK: linalg.tensor_reshape %{{.*}} [] // CHECK: linalg.generic @@ -235,9 +230,10 @@ } -> tensor<5xf32> return %0 : tensor<5xf32> } -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> +// CHECK: #[[$MAP1:.*]] = affine_map<(d0) -> (d0)> + // CHECK-LABEL: func @leading_dim_1_canonicalization -// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]]] +// CHECK: linalg.tensor_reshape %{{.*}} {{\[}}[0, 1]] // CHECK: linalg.generic // CHECK-SAME: indexing_maps = [#[[$MAP1]], #[[$MAP1]]] // CHECK-SAME: iterator_types = ["parallel"] @@ -258,10 +254,8 @@ func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>, %shape : tensor<5x5xf32>) -> tensor<5x5xf32> { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] : - tensor<5xf32> into tensor<1x5xf32> - %1 = linalg.tensor_reshape %arg1 [affine_map<(d0, d1) -> (d0, d1)>] : - tensor<5xf32> into tensor<5x1xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1]] : tensor<5xf32> into tensor<1x5xf32> + %1 = linalg.tensor_reshape %arg1 [[0, 1]] : tensor<5xf32> into tensor<5x1xf32> %2 = linalg.generic #trait ins(%0, %1 : tensor<1x5xf32>, tensor<5x1xf32>) outs(%shape : tensor<5x5xf32>) { @@ -319,8 +313,6 @@ #map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> #map1 = affine_map<(d0, d1, d2) -> (d2)> -#map3 = affine_map<(d0, d1, d2) -> (d0, d1)> -#map4 = affine_map<(d0, d1, d2) -> (d2)> func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32> { %1 = linalg.init_tensor [1, 2, 5] : tensor<1x2x5xf32> @@ -330,7 +322,7 @@ ^bb0(%arg1: f32, %arg2: f32): // no predecessors linalg.yield %arg1 : f32 } -> tensor<1x2x5xf32> - %3 = linalg.tensor_reshape %2 [#map3, #map4] + %3 = linalg.tensor_reshape %2 [[0, 1], [2]] : tensor<1x2x5xf32> into tensor<2x5xf32> return %3 : tensor<2x5xf32> } @@ -356,14 +348,13 @@ } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0) -> (d0)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0) -> ()> // CHECK: func @fold_unit_dim_for_init_tensor -// CHECK: %[[INPUT_RESHAPE:.+]] = linalg.tensor_reshape %{{.+}} [#[[MAP0]]] : tensor<1x1000xf32> into tensor<1000xf32> +// CHECK: %[[INPUT_RESHAPE:.+]] = linalg.tensor_reshape %{{.+}} {{\[}}[0, 1]] : tensor<1x1000xf32> into tensor<1000xf32> // CHECK: %[[INIT:.+]] = linalg.init_tensor [] : tensor // CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %cst) : tensor, f32 -> tensor // CHECK: %[[GENERIC:.+]] = linalg.generic @@ -389,20 +380,17 @@ tensor<1x?x?x?x?x1x1xf32> to tensor<1x?x?x1x?x1x1xf32> return %0, %1 : tensor<1x?x?x1x?x1x1xf32>, tensor<1x?x?x1x?x1x1xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d3, d4, d5, d6)> // CHECK: func @fold_subtensor // CHECK-SAME: %[[ARG0:.+]]: tensor<1x?x?x1x?x1x1xf32> // CHECK-SAME: %[[ARG1:.+]]: tensor<1x?x?x?x?x1x1xf32> // CHECK: %[[SUBTENSOR1:.+]] = subtensor %[[ARG0]] // CHECK-SAME: to tensor // CHECK: %[[RESULT1:.+]] = linalg.tensor_reshape %[[SUBTENSOR1]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0, 1], [2], [3, 4, 5, 6] // CHECK: %[[SUBTENSOR2:.+]] = subtensor %[[ARG1]] // CHECK-SAME: to tensor // CHECK: %[[RESULT2:.+]] = linalg.tensor_reshape %[[SUBTENSOR2]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0, 1], [2], [3, 4, 5, 6] // CHECK: return %[[RESULT1]], %[[RESULT2]] // ----- @@ -425,13 +413,11 @@ } -> tensor<1x?xf32> return %3 : tensor<1x?xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (d0)> // CHECK: func @unit_dim_for_reduction // CHECK-SAME: %[[ARG0:.+]]: tensor<1x?x1x?xf32> -// CHECK-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] [#[[MAP0]], #[[MAP1]]] +// CHECK-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1, 2], [3]] // CHECK: %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor // CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %{{.+}}) // CHECK: %[[RESULT:.+]] = linalg.generic @@ -439,7 +425,7 @@ // CHECK-SAME: iterator_types = ["parallel", "reduction"] // CHECK-SAME: ins(%[[RESHAPE]] : tensor) // CHECK-SAME: outs(%[[FILL]] : tensor) -// CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] [#[[MAP2]]] +// CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]] // CHECK: return %[[RESULT_RESHAPE]] // ----- @@ -461,13 +447,11 @@ } -> tensor<1x1xf32> return %3 : tensor<1x1xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (d0)> // CHECK: func @unit_dim_for_reduction_keep_one // CHECK-SAME: %[[ARG0:.+]]: tensor<1x?x1x1xf32> -// CHECK-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] [#[[MAP0]], #[[MAP1]]] +// CHECK-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1, 2], [3]] // CHECK: %[[INIT:.+]] = linalg.init_tensor [1] : tensor<1xf32> // CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %{{.+}}) // CHECK: %[[RESULT:.+]] = linalg.generic @@ -475,7 +459,7 @@ // CHECK-SAME: iterator_types = ["parallel", "reduction"] // CHECK-SAME: ins(%[[RESHAPE]] : tensor) // CHECK-SAME: outs(%[[FILL]] : tensor<1xf32>) -// CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] [#[[MAP2]]] +// CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]] // CHECK: return %[[RESULT_RESHAPE]] // ----- @@ -498,13 +482,11 @@ } -> tensor return %3 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1) -> (d0)> // CHECK: func @unit_dim_for_reduction_inner // CHECK-SAME: %[[ARG0:.+]]: tensor -// CHECK-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] [#[[MAP0]], #[[MAP1]]] +// CHECK-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG0]] {{\[}}[0, 1], [2, 3]] // CHECK: %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor // CHECK: %[[FILL:.+]] = linalg.fill(%[[INIT]], %{{.+}}) // CHECK: %[[RESULT:.+]] = linalg.generic @@ -512,5 +494,5 @@ // CHECK-SAME: iterator_types = ["parallel", "reduction"] // CHECK-SAME: ins(%[[RESHAPE]] : tensor) // CHECK-SAME: outs(%[[FILL]] : tensor) -// CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] [#[[MAP2]]] +// CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]] // CHECK: return %[[RESULT_RESHAPE]] diff --git a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir --- a/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir +++ b/mlir/test/Dialect/Linalg/fusion-push-reshape.mlir @@ -1,21 +1,18 @@ // RUN: mlir-opt %s -test-linalg-push-reshape -split-input-file | FileCheck %s -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)> // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1)> // CHECK-LABEL: func @reshape // CHECK-SAME: (%[[A:.*]]: tensor, %[[B:.*]]: tensor<16xf32>, %[[INIT:.*]]: tensor) -// CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[INIT]] [#[[$MAP0]], #[[$MAP1]]] : tensor into tensor +// CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[INIT]] {{\[}}[0, 1], [2]] : tensor into tensor // CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[$MAP2]], #[[$MAP3]], #[[$MAP2]]], // CHECK-SAME: iterator_types = ["parallel", "parallel"]} // CHECK-SAME: ins(%[[A]], %[[B]] : tensor, tensor<16xf32>) outs(%[[RI]] : tensor) -// CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] [#[[$MAP0]], #[[$MAP1]]] : tensor into tensor +// CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] {{\[}}[0, 1], [2]] : tensor into tensor // CHECK: return %[[RR]] : tensor func @reshape(%A: tensor, %B: tensor<16xf32>, %init: tensor) -> tensor { - %0 = linalg.tensor_reshape %A [ - affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] + %0 = linalg.tensor_reshape %A [[0, 1], [2]] : tensor into tensor %2 = linalg.generic {indexing_maps = [ affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d2)>, @@ -32,27 +29,23 @@ // ----- -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)> // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1)> // CHECK-LABEL: func @reshape_multiple // CHECK-SAME: (%[[A:.*]]: tensor<12544x16xf32>, %[[B:.*]]: tensor<12544x16xf32>, %[[C:.*]]: tensor<16xf32>) // CHECK: %[[I:.*]] = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32> -// CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[I]] [#[[$MAP0]], #[[$MAP1]]] : tensor<112x112x16xf32> into tensor<12544x16xf32> +// CHECK: %[[RI:.*]] = linalg.tensor_reshape %[[I]] {{\[}}[0, 1], [2]] : tensor<112x112x16xf32> into tensor<12544x16xf32> // CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[$MAP2]], #[[$MAP2]], #[[$MAP3]], #[[$MAP2]]], // CHECK-SAME: iterator_types = ["parallel", "parallel"]} // CHECK-SAME: ins(%[[A]], %[[B]], %[[C]] : tensor<12544x16xf32>, tensor<12544x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<12544x16xf32>) -// CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] [#[[$MAP0]], #[[$MAP1]]] : tensor<12544x16xf32> into tensor<112x112x16xf32> +// CHECK: %[[RR:.*]] = linalg.tensor_reshape %[[R]] {{\[}}[0, 1], [2]] : tensor<12544x16xf32> into tensor<112x112x16xf32> // CHECK: return %[[RR]] : tensor<112x112x16xf32> func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>, %C: tensor<16xf32>) -> tensor<112x112x16xf32> { - %0 = linalg.tensor_reshape %A [ - affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] + %0 = linalg.tensor_reshape %A [[0, 1], [2]] : tensor<12544x16xf32> into tensor<112x112x16xf32> - %1 = linalg.tensor_reshape %B [ - affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] + %1 = linalg.tensor_reshape %B [[0, 1], [2]] : tensor<12544x16xf32> into tensor<112x112x16xf32> %2 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32> %3 = linalg.generic {indexing_maps = [ @@ -80,8 +73,7 @@ // CHECK: linalg.generic // CHECK: } -> tensor<112x112x16xf32> func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<112x112x16xf32> { - %20 = linalg.tensor_reshape %A [ - affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] + %20 = linalg.tensor_reshape %A [[0, 1], [2]] : tensor<12544x16xf32> into tensor<112x112x16xf32> %21 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32> %22 = linalg.generic {indexing_maps = [ diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir --- a/mlir/test/Dialect/Linalg/invalid.mlir +++ b/mlir/test/Dialect/Linalg/invalid.mlir @@ -348,29 +348,21 @@ func @reshape(%arg0: memref) { // expected-error @+1 {{expected non-zero memref ranks}} - %0 = linalg.reshape %arg0 [affine_map<()->(0)>] : memref into memref + %0 = linalg.reshape %arg0 [[0]] : memref into memref } // ----- func @reshape(%arg0: memref) { // expected-error @+1 {{expected to collapse or expand dims}} - %0 = linalg.reshape %arg0 [affine_map<(i)->(i)>] : memref into memref + %0 = linalg.reshape %arg0 [[0]] : memref into memref } // ----- func @reshape(%arg0: memref) { // expected-error @+1 {{expected rank of the collapsed type(2) to be the number of reassociation maps(1)}} - %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>] : - memref into memref -} - -// ----- - -func @reshape(%arg0: memref) { - // expected-error @+1 {{expected reassociation map #0 of same rank as expanded memref(3), but got 1}} - %0 = linalg.reshape %arg0 [affine_map<(i) -> (i)>, affine_map<(i, j, k) -> (k)>] : + %0 = linalg.reshape %arg0 [[0, 1]] : memref into memref } @@ -378,7 +370,7 @@ func @reshape(%arg0: memref) { // expected-error @+1 {{expected reassociation map #1 to be valid and contiguous}} - %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (k, j)>] : + %0 = linalg.reshape %arg0 [[0, 1], [1, 2]] : memref into memref } @@ -386,7 +378,7 @@ func @reshape(%arg0: memref) { // expected-error @+1 {{expected collapsed type to be 'memref', but got 'memref (d0 * s0 + d1)>>'}} - %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, affine_map<(i, j, k) -> (k)>] : + %0 = linalg.reshape %arg0 [[0, 1], [2]] : memref into memref (d0 * s0 + d1)>> } @@ -463,11 +455,8 @@ (%arg0: tensor) -> tensor { // expected-error @+1 {{invalid to have a single dimension (2) expanded into multiple dynamic dims (2,4)}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]] + : tensor into tensor return %0 : tensor } @@ -477,11 +466,8 @@ (%arg0: memref) -> memref { // expected-error @+1 {{invalid to have a single dimension (2) expanded into multiple dynamic dims (2,4)}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]] + : memref into memref return %0 : memref } @@ -491,11 +477,8 @@ (%arg0: tensor<2x3x20xf32>) -> tensor<2x3x2x4x5xf32> { // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : - tensor<2x3x20xf32> into tensor<2x3x2x4x5xf32> + %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]] + : tensor<2x3x20xf32> into tensor<2x3x2x4x5xf32> return %0 : tensor<2x3x2x4x5xf32> } @@ -505,11 +488,8 @@ (%arg0: tensor<2x3x2x4x5xf32>) -> tensor<2x3x20xf32> { // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : - tensor<2x3x2x4x5xf32> into tensor<2x3x20xf32> + %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]] + : tensor<2x3x2x4x5xf32> into tensor<2x3x20xf32> return %0 : tensor<2x3x20xf32> } @@ -519,11 +499,8 @@ (%arg0: memref<2x3x20xf32>) -> memref<2x3x2x4x5xf32> { // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : - memref<2x3x20xf32> into memref<2x3x2x4x5xf32> + %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]] + : memref<2x3x20xf32> into memref<2x3x2x4x5xf32> return %0 : memref<2x3x2x4x5xf32> } @@ -533,11 +510,8 @@ (%arg0: memref<2x3x2x4x5xf32>) -> memref<2x3x20xf32> { // expected-error @+1 {{expected dimension 2 of collapsed type to be static value of 40}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : - memref<2x3x2x4x5xf32> into memref<2x3x20xf32> + %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]] + : memref<2x3x2x4x5xf32> into memref<2x3x20xf32> return %0 : memref<2x3x20xf32> } @@ -546,10 +520,8 @@ func @illegal_collapsing_reshape_mixed_tensor(%arg0 : tensor) -> tensor { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] + : tensor into tensor return %0 : tensor } @@ -558,10 +530,8 @@ func @illegal_collapsing_reshape_mixed_tensor_2(%arg0 : tensor) -> tensor { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]] + : tensor into tensor return %0 : tensor } @@ -570,10 +540,8 @@ func @illegal_expanding_reshape_mixed_tensor(%arg0 : tensor) -> tensor { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] + : tensor into tensor return %0 : tensor } @@ -582,10 +550,8 @@ func @illegal_expanding_reshape_mixed_tensor_2(%arg0 : tensor) -> tensor { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}} - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] : - tensor into tensor + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]] + : tensor into tensor return %0 : tensor } @@ -594,10 +560,8 @@ func @illegal_collapsing_reshape_mixed_memref(%arg0 : memref) -> memref { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0, 1], [2]] + : memref into memref return %0 : memref } @@ -606,10 +570,8 @@ func @illegal_collapsing_reshape_mixed_memref_2(%arg0 : memref) -> memref { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0], [1, 2]] + : memref into memref return %0 : memref } @@ -618,10 +580,8 @@ func @illegal_expanding_reshape_mixed_memref(%arg0 : memref) -> memref { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 5}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0, 1], [2]] + : memref into memref return %0 : memref } @@ -630,10 +590,8 @@ func @illegal_expanding_reshape_mixed_memref_2(%arg0 : memref) -> memref { // expected-error @+1 {{expected dimension 1 of collapsed type to be static value of 20}} - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2) -> (d0)>, - affine_map<(d0, d1, d2) -> (d1, d2)>] : - memref into memref + %0 = linalg.reshape %arg0 [[0], [1, 2]] + : memref into memref return %0 : memref } diff --git a/mlir/test/Dialect/Linalg/llvm.mlir b/mlir/test/Dialect/Linalg/llvm.mlir --- a/mlir/test/Dialect/Linalg/llvm.mlir +++ b/mlir/test/Dialect/Linalg/llvm.mlir @@ -16,10 +16,8 @@ func @reshape_static_expand(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> { // Reshapes that expand a contiguous tensor with some 1's. - %0 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : - memref<3x4x5xf32> into memref<1x3x4x1x5xf32> + %0 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]] + : memref<3x4x5xf32> into memref<1x3x4x1x5xf32> return %0 : memref<1x3x4x1x5xf32> } // CHECK-LABEL: func @reshape_static_expand @@ -52,9 +50,7 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr, ptr, i64, array<5 x i64>, array<5 x i64>)> func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> { - %0 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %0 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]] : memref<1x3x4x1x5xf32> into memref<3x4x5xf32> return %0 : memref<3x4x5xf32> } diff --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir --- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir +++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir @@ -7,9 +7,7 @@ %arg1 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k)>, - affine_map<(i, j, k, l) -> (l)>] : + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] : tensor into tensor %1 = linalg.generic { indexing_maps = [#map0, #map1, #map1], @@ -23,29 +21,24 @@ return %1 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d1)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3)> // CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d0, d1, d2)> // CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d3, d0, d1)> // CHECK: func @generic_op_reshape_producer_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0], [1, 2], [3] // CHECK: %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]] -// CHECK-SAME: [#[[MAP0]], #[[MAP3]], #[[MAP4]]] +// CHECK-SAME: [0], [1], [2, 3] // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[T0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP3]], #[[MAP4]]] +// CHECK-SAME: [0], [1], [2, 3] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP6]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel"] // CHECK-SAME: ins(%[[ARG0]], %[[T1]] : tensor, tensor) // CHECK-SAME: outs(%[[T2]] : tensor) // CHECK: %[[T4:.+]] = linalg.tensor_reshape %[[T3]] -// CHECK-SAME: [#[[MAP0]], #[[MAP3]], #[[MAP4]]] +// CHECK-SAME: [0], [1], [2, 3] // CHECK-SAME: tensor into tensor // CHECK: return %[[T4]] @@ -65,26 +58,23 @@ %1 = mulf %arg3, %arg4 : f32 linalg.yield %1 : f32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK: func @generic_op_reshape_consumer_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0], [1, 2, 3] // CHECK-SAME: tensor into tensor // CHECK: %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0], [1, 2, 3] // CHECK-SAME: tensor into tensor // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0], [1, 2, 3] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP2]], #[[MAP2]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel"] @@ -109,21 +99,10 @@ %1 = addf %arg0, %arg1 : f32 linalg.yield %1 : f32 } -> tensor - %d = linalg.tensor_reshape %c - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] + %d = linalg.tensor_reshape %c [[0, 1], [2], [3, 4, 5]] : tensor into tensor return %d : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> -// CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)> -// CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)> -// CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)> // CHECK-DAG: #[[MAP8:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d0, d1, d5)> // CHECK-DAG: #[[MAP9:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d5)> // CHECK-DAG: #[[MAP10:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d5, d2, d3, d4)> @@ -131,13 +110,13 @@ // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0, 1, 2], [3, 4], [5] // CHECK-SAME: tensor into tensor<3x4x?x?x2x?xf32> // CHECK: %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]] -// CHECK-SAME: [#[[MAP3]], #[[MAP4]]] +// CHECK-SAME: [0, 1, 2], [3] // CHECK-SAME: tensor into tensor<3x4x?x?xf32> // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP5]], #[[MAP6]], #[[MAP7]]] +// CHECK-SAME: [0, 1], [2], [3, 4, 5] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP8]], #[[MAP9]], #[[MAP10]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"] @@ -164,18 +143,16 @@ %2 = mulf %arg1, %arg2 : f32 linalg.yield %2 : f32 } -> tensor<264x4xf32> - %2 = linalg.tensor_reshape %1 [#map1, #map2] : + %2 = linalg.tensor_reshape %1 [[0, 1], [2]] : tensor<264x4xf32> into tensor<8x33x4xf32> return %2 : tensor<8x33x4xf32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d2)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK: func @generic_op_reshape_consumer_static // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<264x4xf32> // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0, 1], [2] // CHECK-SAME: tensor<264x4xf32> into tensor<8x33x4xf32> // CHECK: %[[T1:.+]] = linalg.init_tensor [8, 33, 4] // CHECK: %[[T2:.+]] = linalg.generic @@ -193,9 +170,7 @@ %arg1 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k)>, - affine_map<(i, j, k, l) -> (l)>] : + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] : tensor into tensor %1 = linalg.indexed_generic { indexing_maps = [#map0, #map1, #map1], @@ -243,9 +218,7 @@ %arg1 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k)>, - affine_map<(i, j, k, l) -> (l)>] : + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]]: tensor into tensor %1 = linalg.generic { indexing_maps = [#map0, #map1, #map1], @@ -309,8 +282,7 @@ %5 = addi %3, %4 : i32 linalg.yield %5 : i32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } @@ -354,8 +326,7 @@ %5 = addi %3, %4 : i32 linalg.yield %5 : i32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } @@ -402,20 +373,10 @@ %7 = addi %5, %6 : i32 linalg.yield %7 : i32 } -> tensor<6x4x210xi32> - %d = linalg.tensor_reshape %c - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] + %d = linalg.tensor_reshape %c [[0, 1], [2], [3, 4, 5]] : tensor<6x4x210xi32> into tensor<2x3x4x5x6x7xi32> return %d : tensor<2x3x4x5x6x7xi32> } - - -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d0, d1, d5)> // CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d5)> // CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d5, d2, d3, d4)> @@ -425,9 +386,9 @@ // CHECK-SAME: %[[ARG0:.+]]: tensor<210x6x4xi32> // CHECK-SAME: %[[ARG1:.+]]: tensor<210x4xi32> // CHECK-DAG: %[[T1:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0, 1, 2], [3, 4], [5] // CHECK-DAG: %[[T2:.+]] = linalg.tensor_reshape %[[ARG1]] -// CHECK-SAME: [#[[MAP3]], #[[MAP4]]] +// CHECK-SAME: [0, 1, 2], [3] // CHECK-DAG: %[[T0:.+]] = linalg.init_tensor [2, 3, 4, 5, 6, 7] // CHECK: %[[T4:.+]] = linalg.indexed_generic // CHECK-SAME: indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP7]]] @@ -475,20 +436,12 @@ %7 = addi %5, %6 : i32 linalg.yield %7 : i32 } -> tensor<6x4x210xi32> - %d = linalg.tensor_reshape %c - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>] + %d = linalg.tensor_reshape %c [[0, 1], [2], [3, 4, 5]] : tensor<6x4x210xi32> into tensor<2x3x4x5x6x7xi32> return %d : tensor<2x3x4x5x6x7xi32> } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d0, d1, d5)> // CHECK-DAG: #[[MAP6:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d2, d3, d4, d5)> // CHECK-DAG: #[[MAP7:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d5, d2, d3, d4)> @@ -498,9 +451,9 @@ // CHECK-SAME: %[[ARG0:.+]]: tensor<210x6x4xi32> // CHECK-SAME: %[[ARG1:.+]]: tensor<210x4xi32> // CHECK-DAG: %[[T1:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0, 1, 2], [3, 4], [5] // CHECK-DAG: %[[T2:.+]] = linalg.tensor_reshape %[[ARG1]] -// CHECK-SAME: [#[[MAP3]], #[[MAP4]]] +// CHECK-SAME: [0, 1, 2], [3] // CHECK-DAG: %[[T0:.+]] = linalg.init_tensor [2, 3, 4, 5, 6, 7] // CHECK: %[[T4:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP7]]] @@ -530,8 +483,7 @@ func @reshape_as_producer_projected_permutation( %arg0 : tensor<33x8x?xi32>, %shape : tensor<264x?x4xi32>) -> tensor<264x?x4xi32> { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] : tensor<33x8x?xi32> into tensor<264x?xi32> %1 = linalg.indexed_generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1)>, @@ -554,9 +506,6 @@ // CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0 + d1 * 8)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d2)> -// CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK: @reshape_as_producer_projected_permutation // CHECK-SAME: %[[ARG0:.+]]: tensor<33x8x?xi32> // CHECK: %[[RES:.+]] = linalg.indexed_generic @@ -578,7 +527,7 @@ // CHECK: %[[T6:.+]] = addi %[[T4]], %[[T5]] : i32 // CHECK: linalg.yield %[[T6]] : i32 // CHECK: %[[RES2:.+]] = linalg.tensor_reshape %[[RES]] -// CHECK-SAME: [#[[MAP3]], #[[MAP4]], #[[MAP5]]] +// CHECK-SAME: [0, 1], [2], [3] // CHECK-SAME: : tensor<33x8x?x4xi32> into tensor<264x?x4xi32> // CHECK: return %[[RES2]] : tensor<264x?x4xi32> @@ -587,8 +536,7 @@ func @reshape_as_producer_projected_permutation( %arg0 : tensor<33x8x?xi32>, %shape : tensor<264x?x4xi32>) -> tensor<264x?x4xi32> { - %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d2)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1], [2]] : tensor<33x8x?xi32> into tensor<264x?xi32> %1 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1)>, @@ -614,9 +562,6 @@ // CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0 + d1 * 8)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -// CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d2)> -// CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK: @reshape_as_producer_projected_permutation // CHECK-SAME: %[[ARG0:.+]]: tensor<33x8x?xi32> // CHECK: %[[RES:.+]] = linalg.generic @@ -638,7 +583,7 @@ // CHECK: %[[T6:.+]] = addi %[[T4]], %[[T5]] : i32 // CHECK: linalg.yield %[[T6]] : i32 // CHECK: %[[RES2:.+]] = linalg.tensor_reshape %[[RES]] -// CHECK-SAME: [#[[MAP3]], #[[MAP4]], #[[MAP5]]] +// CHECK-SAME: [0, 1], [2], [3] // CHECK-SAME: : tensor<33x8x?x4xi32> into tensor<264x?x4xi32> // CHECK: return %[[RES2]] : tensor<264x?x4xi32> @@ -659,29 +604,24 @@ %1 = mulf %arg3, %arg4 : f32 linalg.yield %1 : f32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)> // CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK-DAG: #[[MAP5:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d0, d1, d2)> // CHECK: func @generic_op_reshape_consumer_fusion_projected // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0, 1, 2], [3] // CHECK-SAME: tensor into tensor // CHECK: %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0, 1, 2], [3] // CHECK-SAME: tensor into tensor // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP2]], #[[MAP3]]] +// CHECK-SAME: [0], [1, 2, 3] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP4]], #[[MAP4]], #[[MAP5]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel"] @@ -692,8 +632,8 @@ // ----- func @unit_dim_reshape_expansion(%arg0 : tensor<1x5xf32>) -> tensor<5x5xf32> { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1) -> (d0, d1)>] : tensor<1x5xf32> into tensor<5xf32> + %0 = linalg.tensor_reshape %arg0 [[0, 1]] + : tensor<1x5xf32> into tensor<5xf32> %1 = linalg.init_tensor [5, 5] : tensor<5x5xf32> %2 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0)>, @@ -722,8 +662,7 @@ ^bb0(%arg2: f32, %arg3: f32): // no predecessors linalg.yield %arg2 : f32 } -> tensor<5x5xf32> - %2 = linalg.tensor_reshape %1 - [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] + %2 = linalg.tensor_reshape %1 [[0, 1], [2]] : tensor<5x5xf32> into tensor<5x1x5xf32> return %2 : tensor<5x1x5xf32> } @@ -738,10 +677,7 @@ (%arg0 : tensor<1x?x1x2x1x4xf32>, %arg1 : tensor) -> tensor { %c1 = constant 1 : index - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>, - affine_map<(d0, d1, d2, d3, d4, d5) -> (d5)>] + %0 = linalg.tensor_reshape %arg0 [[0, 1, 2], [3, 4], [5]] : tensor<1x?x1x2x1x4xf32> into tensor %1 = memref.dim %arg0, %c1 : tensor<1x?x1x2x1x4xf32> %2 = linalg.init_tensor [%1, 2, 4] : tensor diff --git a/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir --- a/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir +++ b/mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir @@ -2,12 +2,8 @@ #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> func @generic_op_reshape_producer_fusion(%arg0 : tensor, - %arg1 : tensor) -> - tensor -{ - %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k)>, - affine_map<(i, j, k, l) -> (l)>] : + %arg1 : tensor) -> tensor { + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] : tensor into tensor %1 = linalg.generic { indexing_maps = [#map0, #map0, #map0], @@ -20,16 +16,12 @@ } -> tensor return %1 : tensor } - -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)> // CHECK-DAG: #[[MAP4:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK: func @generic_op_reshape_producer_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0], [1, 2], [3] // CHECK: linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP3]], #[[MAP4]], #[[MAP4]]] // CHECK-SAME: ins(%[[ARG0]], %{{.+}} : tensor, tensor) @@ -39,9 +31,7 @@ #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> func @generic_op_reshape_consumer_fusion(%arg0 : tensor, - %arg1 : tensor) -> - tensor -{ + %arg1 : tensor) -> tensor { %0 = linalg.generic { indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} @@ -51,20 +41,17 @@ %1 = mulf %arg3, %arg4 : f32 linalg.yield %1 : f32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)> // CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)> // CHECK: func @generic_op_reshape_consumer_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0], [1, 2, 3] // CHECK: linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP2]], #[[MAP3]]] // CHECK-SAME: outs(%[[T0]] : tensor) @@ -74,9 +61,7 @@ #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k)>, - affine_map<(i, j, k, l) -> (l)>] : + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2], [3]] : tensor into tensor %1 = linalg.indexed_generic { indexing_maps = [#map0, #map0], @@ -90,16 +75,12 @@ } -> tensor return %1 : tensor } - -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)> // CHECK-DAG: #[[MAP4:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK: func @indexed_generic_op_reshape_producer_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK-SAME: [0], [1, 2], [3] // CHECK: linalg.indexed_generic // CHECK-SAME: indexing_maps = [#[[MAP3]], #[[MAP4]]] // CHECK-SAME: ins(%[[ARG0]] : tensor) @@ -119,20 +100,16 @@ %3 = addi %arg6, %2 : i32 linalg.yield %3 : i32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } - -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)> // CHECK: func @indexed_generic_op_reshape_consumer_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0], [1, 2, 3] // CHECK: linalg.indexed_generic // CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP3]]] // CHECK-SAME: outs(%[[T0]] : tensor) @@ -140,12 +117,11 @@ // ----- -#map0 = affine_map<(d0, d1, d2) -> (d0)> -#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> #map2 = affine_map<(d0, d1, d2) -> (d0, d2, d1)> #map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<3x7x5xf32> { - %0 = linalg.tensor_reshape %arg0 [#map0, #map1] : tensor<3x35xf32> into tensor<3x5x7xf32> + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]] + : tensor<3x35xf32> into tensor<3x5x7xf32> %1 = linalg.init_tensor [3, 7, 5] : tensor<3x7x5xf32> %2 = linalg.generic {indexing_maps = [#map2, #map3], @@ -166,12 +142,11 @@ // ----- -#map0 = affine_map<(d0, d1, d2) -> (d0)> -#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> #map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)> #map3 = affine_map<(d0, d1, d2) -> (d0, d2, d1)> func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x7x3xf32> { - %0 = linalg.tensor_reshape %arg0 [#map0, #map1] : tensor<3x35xf32> into tensor<3x5x7xf32> + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]] + : tensor<3x35xf32> into tensor<3x5x7xf32> %1 = linalg.init_tensor [5, 7, 3] : tensor<5x7x3xf32> %2 = linalg.generic {indexing_maps = [#map2, #map3], @@ -197,7 +172,8 @@ #map2 = affine_map<(d0, d1, d2) -> (d1, d0, d2)> #map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf32>) -> tensor<5x3x7xf32> { - %0 = linalg.tensor_reshape %arg0 [#map0, #map1] : tensor<3x35xf32> into tensor<3x5x7xf32> + %0 = linalg.tensor_reshape %arg0 [[0], [1, 2]] + : tensor<3x35xf32> into tensor<3x5x7xf32> %1 = linalg.init_tensor [5, 3, 7] : tensor<5x3x7xf32> %2 = linalg.generic {indexing_maps = [#map2, #map3], @@ -232,19 +208,17 @@ ^bb0(%arg2: f32, %arg3 : f32): // no predecessors linalg.yield %arg2 : f32 } -> tensor<5x3x7xf32> - %2 = linalg.tensor_reshape %1 [#map2, #map3] : tensor<5x3x7xf32> into tensor<5x21xf32> + %2 = linalg.tensor_reshape %1 [[0], [1, 2]] + : tensor<5x3x7xf32> into tensor<5x21xf32> return %2 : tensor<5x21xf32> } - -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d1, d2)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> // CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d1, d0 * 7 + d2)> // CHECK: func @generic_op_102_permultation_reshape_consumer_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor<3x5x7xf32> // CHECK: %[[T0:.+]] = linalg.init_tensor [5, 3, 7] // CHECK: %[[T1:.+]] = linalg.tensor_reshape %[[T0]] -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [0], [1, 2] // CHECK: linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP3]]] // CHECK-SAME: ins(%[[ARG0]] : tensor<3x5x7xf32>) @@ -266,8 +240,7 @@ %1 = mulf %arg3, %arg4 : f32 linalg.yield %1 : f32 } -> tensor - %1 = linalg.tensor_reshape %0 [affine_map<(i, j, k, l) -> (i)>, - affine_map<(i, j, k, l) -> (j, k, l)>] : + %1 = linalg.tensor_reshape %0 [[0], [1, 2, 3]] : tensor into tensor return %1 : tensor } diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -10,17 +10,6 @@ // CHECK-DAG: #[[$id_1d:.*]] = affine_map<(d0, d1, d2) -> (d1)> // CHECK-DAG: #[[$permute_0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)> // CHECK-DAG: #[[$permute_1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)> -// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)> -// CHECK-DAG: #[[$reshape5D0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)> -// CHECK-DAG: #[[$reshape5D1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)> -// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)> -// CHECK-DAG: #[[$reshape5D345:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)> -// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> -// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)> -// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)> -// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)> // CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> // CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> // CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)> @@ -571,68 +560,53 @@ // ----- -func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2: tensor<3x?x5xf32>) { +func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, + %arg2: tensor<3x?x5xf32>) { // Reshapes that collapse and expand back a contiguous buffer. - %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %0 = linalg.reshape %arg0 [[0, 1], [2]] : memref<3x4x5xf32> into memref<12x5xf32> - %r0 = linalg.reshape %0 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %r0 = linalg.reshape %0 [[0, 1], [2]] : memref<12x5xf32> into memref<3x4x5xf32> - %1 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i)>, - affine_map<(i, j, k) -> (j, k)>] : + %1 = linalg.reshape %arg0 [[0], [1, 2]] : memref<3x4x5xf32> into memref<3x20xf32> - %r1 = linalg.reshape %1 [affine_map<(i, j, k) -> (i)>, - affine_map<(i, j, k) -> (j, k)>] : + %r1 = linalg.reshape %1 [[0], [1, 2]] : memref<3x20xf32> into memref<3x4x5xf32> - %2 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j, k)>] : + %2 = linalg.reshape %arg0 [[0, 1, 2]] : memref<3x4x5xf32> into memref<60xf32> - %r2 = linalg.reshape %2 [affine_map<(i, j, k) -> (i, j, k)>] : + %r2 = linalg.reshape %2 [[0, 1, 2]] : memref<60xf32> into memref<3x4x5xf32> // Reshapes that expand and collapse back a contiguous buffer with some 1's. - %3 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %3 = linalg.reshape %arg0 [[0, 1], [2], [3, 4]] : memref<3x4x5xf32> into memref<1x3x4x1x5xf32> - %r3 = linalg.reshape %3 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %r3 = linalg.reshape %3 [[0, 1], [2], [3, 4]] : memref<1x3x4x1x5xf32> into memref<3x4x5xf32> // Reshapes on tensors. - %t0 = linalg.tensor_reshape %arg1 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %t0 = linalg.tensor_reshape %arg1 [[0, 1], [2], [3, 4]] : tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32> - %rt0 = linalg.tensor_reshape %t0 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %rt0 = linalg.tensor_reshape %t0 [[0, 1], [2], [3, 4]] : tensor<1x3x4x1x5xf32> into tensor<3x4x5xf32> - %t1 = linalg.tensor_reshape %arg2 [affine_map<(i, j, k, l, m) -> (i, j)>, - affine_map<(i, j, k, l, m) -> (k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %t1 = linalg.tensor_reshape %arg2 [[0, 1], [2], [3, 4]] : tensor<3x?x5xf32> into tensor<1x3x?x1x5xf32> - %rt1 = linalg.tensor_reshape %t1 [affine_map<(i, j, k, l, m) -> (i)>, - affine_map<(i, j, k, l, m) -> (j, k)>, - affine_map<(i, j, k, l, m) -> (l, m)>] : + %rt1 = linalg.tensor_reshape %t1 [[0], [1, 2], [3, 4]] : tensor<1x3x?x1x5xf32> into tensor<1x?x5xf32> return } // CHECK-LABEL: func @reshape_static -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref<3x4x5xf32> into memref<12x5xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref<12x5xf32> into memref<3x4x5xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD0]], #[[$reshapeD12]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0], [1, 2]] // CHECK-SAME: memref<3x4x5xf32> into memref<3x20xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD0]], #[[$reshapeD12]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0], [1, 2]] // CHECK-SAME: memref<3x20xf32> into memref<3x4x5xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD012]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1, 2]] // CHECK-SAME: memref<3x4x5xf32> into memref<60xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD012]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1, 2]] // CHECK-SAME: memref<60xf32> into memref<3x4x5xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshape5D01]], #[[$reshape5D2]], #[[$reshape5D34]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2], [3, 4]] // CHECK-SAME: memref<3x4x5xf32> into memref<1x3x4x1x5xf32> -// CHECK: linalg.reshape {{.*}} [#[[$reshape5D01]], #[[$reshape5D2]], #[[$reshape5D34]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2], [3, 4]] // CHECK-SAME: memref<1x3x4x1x5xf32> into memref<3x4x5xf32> // // CHECK: linalg.tensor_reshape {{.*}}: tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32> @@ -645,43 +619,36 @@ func @reshape_dynamic(%arg0: memref, %arg1: memref, %arg2: memref) { - %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %0 = linalg.reshape %arg0 [[0, 1], [2]] : memref into memref - %r0 = linalg.reshape %0 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %r0 = linalg.reshape %0 [[0, 1], [2]] : memref into memref - %1 = linalg.reshape %arg1 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %1 = linalg.reshape %arg1 [[0, 1], [2]] : memref into memref - %r1 = linalg.reshape %1 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %r1 = linalg.reshape %1 [[0, 1], [2]] : memref into memref - %2 = linalg.reshape %arg2 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %2 = linalg.reshape %arg2 [[0, 1], [2]] : memref into memref - %r2 = linalg.reshape %2 [affine_map<(i, j, k) -> (i, j)>, - affine_map<(i, j, k) -> (k)>] : + %r2 = linalg.reshape %2 [[0, 1], [2]] : memref into memref return } - // CHECK-LABEL: func @reshape -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref into memref -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref into memref -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref into memref -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref into memref -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref into memref -// CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] // CHECK-SAME: memref into memref func @named_ops(%a3: memref, %b3: memref, %c3: memref, @@ -749,30 +716,26 @@ func @legal_collapsing_reshape_dynamic_tensor (%arg0: tensor) -> tensor { - %0 = linalg.tensor_reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : + %0 = linalg.tensor_reshape %arg0 [[0], [1], [2, 3, 4]] : tensor into tensor return %0 : tensor } -// CHECK: func @legal_collapsing_reshape_dynamic_tensor -// CHECK: linalg.tensor_reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]] +// CHECK: func @legal_collapsing_reshape_dynamic_tensor +// CHECK: linalg.tensor_reshape +// CHECK-SAME: [0], [1], [2, 3, 4] // ----- func @legal_collapsing_reshape_dynamic_memref (%arg0: memref) -> memref { - %0 = linalg.reshape %arg0 - [affine_map<(d0, d1, d2, d3, d4) -> (d0)>, - affine_map<(d0, d1, d2, d3, d4) -> (d1)>, - affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)>] : + %0 = linalg.reshape %arg0 [[0], [1], [2, 3, 4]] : memref into memref return %0 : memref } -// CHECK: func @legal_collapsing_reshape_dynamic_memref -// CHECK: linalg.reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]] +// CHECK: func @legal_collapsing_reshape_dynamic_memref +// CHECK: linalg.reshape +// CHECK-SAME: [0], [1], [2, 3, 4] // ----- diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp --- a/mlir/test/EDSC/builder-api-test.cpp +++ b/mlir/test/EDSC/builder-api-test.cpp @@ -1046,8 +1046,8 @@ // clang-format off // CHECK-LABEL: func @linalg_metadata_ops -// CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<4x8x16xf32> into memref<32x16xf32> -// CHECK: linalg.reshape {{.*}} [affine_map<(d0, d1, d2) -> (d0, d1)>, affine_map<(d0, d1, d2) -> (d2)>] : memref<32x16xf32> into memref<4x8x16xf32> +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] : memref<4x8x16xf32> into memref<32x16xf32> +// CHECK: linalg.reshape {{.*}} {{\[}}[0, 1], [2]] : memref<32x16xf32> into memref<4x8x16xf32> // clang-format on TEST_FUNC(linalg_metadata_ops) { using linalg::ReassociationExprs;