diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -1273,6 +1273,11 @@ /// Returns true when two result types are compatible for this op; /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); + + /// Return the AND result between two integer operands + static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) { + return leftOperand & rightOperand; + } }]; } @@ -1301,6 +1306,11 @@ /// Returns true when two result types are compatible for this op; /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); + + /// Return the OR result between two integer operands + static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) { + return leftOperand | rightOperand; + } }]; } @@ -1329,6 +1339,12 @@ /// Returns true when two result types are compatible for this op; /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); + + /// Return the max of the two integer operands + static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) { + const llvm::APInt subtractRes = leftOperand - rightOperand; + return (!subtractRes.isNegative()) ? leftOperand : rightOperand; + } }]; } @@ -1357,6 +1373,12 @@ /// Returns true when two result types are compatible for this op; /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); + + /// Return the min of the two integer operands + static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) { + const llvm::APInt subtractRes = leftOperand - rightOperand; + return (!subtractRes.isNegative()) ? rightOperand : leftOperand; + } }]; } @@ -1385,6 +1407,11 @@ /// Returns true when two result types are compatible for this op; /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); + + /// Return the prod of the two integer operands + static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) { + return leftOperand * rightOperand; + } }]; } @@ -1406,13 +1433,17 @@ let results = (outs Tosa_Tensor:$output ); - let hasFolder = 1; let extraClassDeclaration = [{ /// Returns true when two result types are compatible for this op; /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); + + /// Return the sum of the two integer operands + static inline APInt calcOneElement(APInt leftOperand, APInt rightOperand) { + return leftOperand + rightOperand; + } }]; } diff --git a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/Tosa/Transforms/Passes.h @@ -34,6 +34,8 @@ RewritePatternSet &patterns); void populateTosaFoldConstantTransposePatterns(MLIRContext *ctx, RewritePatternSet &patterns); +void populateTosaConstantReduction(MLIRContext *ctx, + RewritePatternSet &patterns); std::unique_ptr createTosaLayerwiseConstantFoldPass(); std::unique_ptr createTosaInferShapesPass(); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include +#include #include "mlir/Dialect/Tosa/IR/TosaOps.h" #include "mlir/Dialect/Tosa/Transforms/Passes.h" @@ -289,8 +290,130 @@ } }; +/// Getting the axes position of the element which is located +/// in the tensor at the counter index + +llvm::SmallVector +getPositionFromIndex(int64_t index, llvm::ArrayRef tensorShape) { + int64_t remaining = index; + llvm::SmallVector position(tensorShape.size(), 0); + for (int64_t i = tensorShape.size() - 1; i >= 0; --i) { + position[i] = remaining % tensorShape[i]; + remaining /= tensorShape[i]; + } + return position; +} + +/// Getting the index of the element which is located at the +/// axes position in the tensor + +int64_t getIndexFromPosition(llvm::ArrayRef position, + llvm::ArrayRef tensorShape) { + int64_t index = 0; + int64_t multiplierTmp = 1; + for (int64_t i = position.size() - 1; i >= 0; --i) { + index += position[i] * multiplierTmp; + multiplierTmp *= tensorShape[i]; + } + return index; +} + +template +llvm::APInt calculateReducedValue(const mlir::ElementsAttr &oldTensorAttr, + llvm::ArrayRef oldShape, + int64_t reductionAxis, + int64_t reductionIndex) { + + llvm::SmallVector newShape(oldShape); + newShape[reductionAxis] = 1; + /// Let's calculate the position of the index + llvm::SmallVector position = + getPositionFromIndex(reductionIndex, newShape); + auto oldTensor = oldTensorAttr.getValues(); + /// Starting from the first positon along the reduction axis + position[reductionAxis] = 0; + int64_t indexAtOldTensor = getIndexFromPosition(position, oldShape); + llvm::APInt reducedValue = oldTensor[indexAtOldTensor]; + + for (int64_t reductionAxisVal = 1; reductionAxisVal < oldShape[reductionAxis]; + ++reductionAxisVal) { + + int64_t stride = std::accumulate(oldShape.begin() + reductionAxis + 1, + oldShape.end(), 1, std::multiplies()); + int64_t index = indexAtOldTensor + stride * reductionAxisVal; + reducedValue = + OperationType::calcOneElement(reducedValue, oldTensor[index]); + } + return reducedValue; +} + +template +struct ReduceConstantOptimization : public OpRewritePattern { + + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(OperationType op, + PatternRewriter &rewriter) const override { + Value inputOp = op.getInput(); + auto constOp = inputOp.getDefiningOp(); + + if (!constOp) + return rewriter.notifyMatchFailure( + op, "reduce input must be const operation"); + + if (!inputOp.hasOneUse()) + return rewriter.notifyMatchFailure( + op, "input operation has more than one user"); + + auto resultType = cast(op.getOutput().getType()); + + if (!resultType.hasStaticShape()) + return rewriter.notifyMatchFailure(op, "result type shape is not static"); + + auto reductionAxis = op.getAxis(); + const auto denseElementsAttr = constOp.getValue(); + const auto shapedOldElementsValues = + denseElementsAttr.getType().cast(); + + if (!llvm::isa(shapedOldElementsValues.getElementType())) + return rewriter.notifyMatchFailure( + op, "reduce input currently supported with integer type"); + + auto oldShape = shapedOldElementsValues.getShape(); + auto newShape = resultType.getShape(); + + auto newNumOfElements = std::accumulate(newShape.begin(), newShape.end(), 1, + std::multiplies()); + llvm::SmallVector newReducedTensor(newNumOfElements); + + for (int64_t reductionIndex = 0; reductionIndex < newNumOfElements; + ++reductionIndex) { + + /// Let's reduce all the elements along this reduction axis + newReducedTensor[reductionIndex] = calculateReducedValue( + denseElementsAttr, oldShape, reductionAxis, reductionIndex); + } + + auto rankedTensorType = cast(resultType); + auto denseAttr = + mlir::DenseElementsAttr::get(rankedTensorType, newReducedTensor); + rewriter.replaceOpWithNewOp(op, rankedTensorType, denseAttr); + return success(); + } +}; + } // namespace +void mlir::tosa::populateTosaConstantReduction(MLIRContext *ctx, + RewritePatternSet &patterns) { + patterns.add>(ctx); + patterns.add>(ctx); + patterns.add>(ctx); + patterns.add>(ctx); + patterns.add>(ctx); + patterns.add>(ctx); +} + void mlir::tosa::populateTosaFoldConstantTransposePatterns( MLIRContext *ctx, RewritePatternSet &patterns) { patterns.add(ctx); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaLayerwiseConstantFoldPass.cpp @@ -52,6 +52,7 @@ mlir::tosa::populateTosaFoldConstantReciprocalPatterns(ctx, patterns); mlir::tosa::populateTosaFoldConstantTransposePatterns(ctx, patterns); + mlir::tosa::populateTosaConstantReduction(ctx, patterns); populateTosaOpsCanonicalizationPatterns(ctx, patterns); if (applyPatternsAndFoldGreedily(func, std::move(patterns)).failed()) diff --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir --- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir +++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir @@ -573,3 +573,481 @@ // CHECK: return %[[NOFOLD]], %arg0 return %nofold, %fold : tensor<10x1xi32>, tensor<10x1xi32> } + +// ----- + + func.func @reduce_sum_constant() -> tensor<1x3xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}5, 7, 9]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3xi32> + + %const = "tosa.const"() {value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>} : () -> tensor<2x3xi32> + %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + return %0 : tensor<1x3xi32> + } + +// ----- + + func.func @reduce_sum_constant() -> tensor<2x1xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [15]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_sum %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32> + return %0 : tensor<2x1xi32> + } + + +// ----- + +func.func @reduce_sum_constant() -> tensor<3x1xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<3x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [15], [24]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32> + // CHECK: return %[[VAL_0]] : tensor<3x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32> + %0 = tosa.reduce_sum %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32> + return %0 : tensor<3x1xi32> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<2x1x4xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x1x4xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[15, 18, 21, 24]], {{\[\[}}51, 54, 57, 60]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32> + %0 = tosa.reduce_sum %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32> + return %0 : tensor<2x1x4xi32> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<1x3x3xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[30, 33, 36], [39, 42, 45], [48, 51, 54]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32> + %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32> + return %0 : tensor<1x3x3xi32> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<2x2x2x1xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x2x2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}3], [7]], {{\[\[}}11], [15]]], {{\[\[}}[19], [23]], {{\[\[}}27], [31]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32> + %0 = tosa.reduce_sum %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32> + return %0 : tensor<2x2x2x1xi32> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<1x1x1xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x1x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32> + return %0 : tensor<1x1x1xi32> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<2x3x1x5xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<2x3x1x5xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}34, 38, 42, 46, 50]], {{\[\[}}114, 118, 122, 126, 130]], {{\[\[}}194, 198, 202, 206, 210]]], {{\[\[}}[274, 278, 282, 286, 290]], {{\[\[}}354, 358, 362, 366, 370]], {{\[\[}}434, 438, 442, 446, 450]]]]> : tensor<2x3x1x5xi32>}> : () -> tensor<2x3x1x5xi32> + // CHECK: return %[[VAL_0]] : tensor<2x3x1x5xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20]], [[21, 22, 23, 24, 25], [26, 27, 28, 29, 30], [31, 32, 33, 34, 35], [36, 37, 38, 39, 40]], [[41, 42, 43, 44, 45], [46, 47, 48, 49, 50], [51, 52, 53, 54, 55], [56, 57, 58, 59, 60]]], [[[61, 62, 63, 64, 65], [66, 67, 68, 69, 70], [71, 72, 73, 74, 75], [76, 77, 78, 79, 80]], [[81, 82, 83, 84, 85], [86, 87, 88, 89, 90], [91, 92, 93, 94, 95], [96, 97, 98, 99, 100]], [[101, 102, 103, 104, 105], [106, 107, 108, 109, 110], [111, 112, 113, 114, 115], [116, 117, 118, 119, 120]]]]> : tensor<2x3x4x5xi32>}> : () -> tensor<2x3x4x5xi32> + %0 = tosa.reduce_sum %const {axis = 2 : i32} : (tensor<2x3x4x5xi32>) -> tensor<2x3x1x5xi32> + return %0 : tensor<2x3x1x5xi32> +} + +// ----- + + func.func @reduce_prod_constant() -> tensor<1x3xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<1x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}4, 10, 18]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3xi32> + + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_prod %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + return %0 : tensor<1x3xi32> + } + +// ----- + + func.func @reduce_prod_constant() -> tensor<2x1xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [120]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1xi32> + // CHECK: } + + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_prod %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32> + return %0 : tensor<2x1xi32> + } + +// ----- + +func.func @reduce_prod_constant() -> tensor<3x1xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<3x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}6], [120], [504]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32> + // CHECK: return %[[VAL_0]] : tensor<3x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32> + %0 = tosa.reduce_prod %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32> + return %0 : tensor<3x1xi32> +} + +// ----- + +func.func @reduce_prod_constant() -> tensor<2x1x4xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<2x1x4xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[45, 120, 231, 384]], {{\[\[}}4641, 5544, 6555, 7680]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32> + %0 = tosa.reduce_prod %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32> + return %0 : tensor<2x1x4xi32> +} + +// ----- + +func.func @reduce_prod_constant() -> tensor<1x3x3xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<1x3x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[190, 440, 756], [1144, 1610, 2160], [2800, 3536, 4374]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32> + %0 = tosa.reduce_prod %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32> + return %0 : tensor<1x3x3xi32> +} + +// ----- + +func.func @reduce_prod_constant() -> tensor<2x2x2x1xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<2x2x2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}2], [12]], {{\[\[}}30], [56]]], {{\[\[}}[90], [132]], {{\[\[}}182], [240]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32> + %0 = tosa.reduce_prod %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32> + return %0 : tensor<2x2x2x1xi32> +} + +// ----- + +func.func @reduce_prod_constant() -> tensor<1x1x1xi32> { + // CHECK-LABEL: func.func @reduce_prod_constant() -> tensor<1x1x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + %0 = tosa.reduce_prod %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32> + return %0 : tensor<1x1x1xi32> +} + +// ----- + + func.func @reduce_max_constant() -> tensor<1x3xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<1x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}4, 5, 6]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3xi32> + + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_max %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + return %0 : tensor<1x3xi32> + } + +// ----- + + func.func @reduce_max_constant() -> tensor<2x1xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}3], [6]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1xi32> + // CHECK: } + + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_max %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32> + return %0 : tensor<2x1xi32> + } + +// ----- + +func.func @reduce_max_constant() -> tensor<3x1xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<3x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}3], [6], [9]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32> + // CHECK: return %[[VAL_0]] : tensor<3x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32> + %0 = tosa.reduce_max %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32> + return %0 : tensor<3x1xi32> +} + +// ----- + +func.func @reduce_max_constant() -> tensor<2x1x4xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<2x1x4xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[9, 10, 11, 12]], {{\[\[}}21, 22, 23, 24]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32> + %0 = tosa.reduce_max %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32> + return %0 : tensor<2x1x4xi32> +} + +// ----- + +func.func @reduce_max_constant() -> tensor<1x3x3xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<1x3x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32> + %0 = tosa.reduce_max %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32> + return %0 : tensor<1x3x3xi32> +} + +// ----- + +func.func @reduce_max_constant() -> tensor<2x2x2x1xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<2x2x2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}2], [4]], {{\[\[}}6], [8]]], {{\[\[}}[10], [12]], {{\[\[}}14], [16]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32> + %0 = tosa.reduce_max %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32> + return %0 : tensor<2x2x2x1xi32> +} + +// ----- + +func.func @reduce_max_constant() -> tensor<1x1x1xi32> { + // CHECK-LABEL: func.func @reduce_max_constant() -> tensor<1x1x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + %0 = tosa.reduce_max %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32> + return %0 : tensor<1x1x1xi32> +} + +// ----- + + func.func @reduce_min_constant() -> tensor<1x3xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<1x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1, 2, 3]]> : tensor<1x3xi32>}> : () -> tensor<1x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3xi32> + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_min %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + return %0 : tensor<1x3xi32> + } + + +// ----- + + func.func @reduce_min_constant() -> tensor<2x1xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1], [4]]> : tensor<2x1xi32>}> : () -> tensor<2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1xi32> + // CHECK: } + + %const = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_min %const {axis = 1 : i32} : (tensor<2x3xi32>) -> tensor<2x1xi32> + return %0 : tensor<2x1xi32> + } + +// ----- + +func.func @reduce_min_constant() -> tensor<3x1xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<3x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1], [4], [7]]> : tensor<3x1xi32>}> : () -> tensor<3x1xi32> + // CHECK: return %[[VAL_0]] : tensor<3x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[1, 2, 3], [4, 5, 6], [7, 8, 9]]> : tensor<3x3xi32>}> : () -> tensor<3x3xi32> + %0 = tosa.reduce_min %const {axis = 1 : i32} : (tensor<3x3xi32>) -> tensor<3x1xi32> + return %0 : tensor<3x1xi32> +} + +// ----- + +func.func @reduce_min_constant() -> tensor<2x1x4xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<2x1x4xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[1, 2, 3, 4]], {{\[\[}}13, 14, 15, 16]]]> : tensor<2x1x4xi32>}> : () -> tensor<2x1x4xi32> + // CHECK: return %[[VAL_0]] : tensor<2x1x4xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]> : tensor<2x3x4xi32>}> : () -> tensor<2x3x4xi32> + %0 = tosa.reduce_min %const {axis = 1 : i32} : (tensor<2x3x4xi32>) -> tensor<2x1x4xi32> + return %0 : tensor<2x1x4xi32> +} + +// ----- + +func.func @reduce_min_constant() -> tensor<1x3x3xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<1x3x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[1, 2, 3], [4, 5, 6], [7, 8, 9]]]> : tensor<1x3x3xi32>}> : () -> tensor<1x3x3xi32> + // CHECK: return %[[VAL_0]] : tensor<1x3x3xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]> : tensor<3x3x3xi32>}> : () -> tensor<3x3x3xi32> + %0 = tosa.reduce_min %const {axis = 0 : i32} : (tensor<3x3x3xi32>) -> tensor<1x3x3xi32> + return %0 : tensor<1x3x3xi32> +} + +// ----- + +func.func @reduce_min_constant() -> tensor<2x2x2x1xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<2x2x2x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}{{\[\[}}1], [3]], {{\[\[}}5], [7]]], {{\[\[}}[9], [11]], {{\[\[}}13], [15]]]]> : tensor<2x2x2x1xi32>}> : () -> tensor<2x2x2x1xi32> + // CHECK: return %[[VAL_0]] : tensor<2x2x2x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]]> : tensor<2x2x2x2xi32>}> : () -> tensor<2x2x2x2xi32> + %0 = tosa.reduce_min %const {axis = 3 : i32} : (tensor<2x2x2x2xi32>) -> tensor<2x2x2x1xi32> + return %0 : tensor<2x2x2x1xi32> +} + +// ----- + +func.func @reduce_min_constant() -> tensor<1x1x1xi32> { + // CHECK-LABEL: func.func @reduce_min_constant() -> tensor<1x1x1xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + // CHECK: return %[[VAL_0]] : tensor<1x1x1xi32> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[42]]]> : tensor<1x1x1xi32>}> : () -> tensor<1x1x1xi32> + %0 = tosa.reduce_min %const {axis = 0 : i32} : (tensor<1x1x1xi32>) -> tensor<1x1x1xi32> + return %0 : tensor<1x1x1xi32> +} + +// ----- + +func.func @reduce_any_constant() -> tensor<1x3xi1> { + // CHECK-LABEL: func.func @reduce_any_constant() -> tensor<1x3xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense : tensor<1x3xi1>}> : () -> tensor<1x3xi1> + // CHECK: return %[[VAL_0]] : tensor<1x3xi1> + + %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1> + %0 = tosa.reduce_any %const {axis = 0 : i32} : (tensor<2x3xi1>) -> tensor<1x3xi1> + return %0 : tensor<1x3xi1> +} + + +// ----- + +func.func @reduce_any_constant() -> tensor<2x1xi1> { +// CHECK-LABEL: func.func @reduce_any_constant() -> tensor<2x1xi1> { +// CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense : tensor<2x1xi1>}> : () -> tensor<2x1xi1> +// CHECK: return %[[VAL_0]] : tensor<2x1xi1> +// CHECK: } + + %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1> + %0 = tosa.reduce_any %const {axis = 1 : i32} : (tensor<2x3xi1>) -> tensor<2x1xi1> + return %0 : tensor<2x1xi1> +} + +// ----- + +func.func @reduce_any_constant() -> tensor<3x1xi1> { + // CHECK-LABEL: func.func @reduce_any_constant() -> tensor<3x1xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}true], [false], [true]]> : tensor<3x1xi1>}> : () -> tensor<3x1xi1> + // CHECK: return %[[VAL_0]] : tensor<3x1xi1> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[true, false, false], [false, false, false], [false, false, true]]> : tensor<3x3xi1>}> : () -> tensor<3x3xi1> + %0 = tosa.reduce_any %const {axis = 1 : i32} : (tensor<3x3xi1>) -> tensor<3x1xi1> + return %0 : tensor<3x1xi1> +} + +// ----- + +func.func @reduce_any_constant() -> tensor<2x1x4xi1> { + // CHECK-LABEL: func.func @reduce_any_constant() -> tensor<2x1x4xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}[true, false, true, true]], {{\[\[}}true, false, true, false]]]> : tensor<2x1x4xi1>}> : () -> tensor<2x1x4xi1> + // CHECK: return %[[VAL_0]] : tensor<2x1x4xi1> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[true, false, false, true], [false, false, true, false], [true, false, true, true]], [[false, false, false, false], [false, false, true, false], [true, false, true, false]]]> : tensor<2x3x4xi1>}> : () -> tensor<2x3x4xi1> + %0 = tosa.reduce_any %const {axis = 1 : i32} : (tensor<2x3x4xi1>) -> tensor<2x1x4xi1> + return %0 : tensor<2x1x4xi1> +} + +// ----- + + func.func @reduce_all_constant() -> tensor<1x3xi1> { + // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<1x3xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}true, false, true]]> : tensor<1x3xi1>}> : () -> tensor<1x3xi1> + // CHECK: return %[[VAL_0]] : tensor<1x3xi1> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1> + %0 = tosa.reduce_all %const {axis = 0 : i32} : (tensor<2x3xi1>) -> tensor<1x3xi1> + return %0 : tensor<1x3xi1> + } + +// ----- + + func.func @reduce_all_constant() -> tensor<2x1xi1> { + // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<2x1xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}true], [false]]> : tensor<2x1xi1>}> : () -> tensor<2x1xi1> + // CHECK: return %[[VAL_0]] : tensor<2x1xi1> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[true,true,true], [true,false,true]]> : tensor<2x3xi1>}> : () -> tensor<2x3xi1> + %0 = tosa.reduce_all %const {axis = 1 : i32} : (tensor<2x3xi1>) -> tensor<2x1xi1> + return %0 : tensor<2x1xi1> + } + +// ----- + +func.func @reduce_all_constant() -> tensor<3x1xi1> { + // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<3x1xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense : tensor<3x1xi1>}> : () -> tensor<3x1xi1> + // CHECK: return %[[VAL_0]] : tensor<3x1xi1> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[true, false, false], [false, false, false], [false, false, true]]> : tensor<3x3xi1>}> : () -> tensor<3x3xi1> + %0 = tosa.reduce_all %const {axis = 1 : i32} : (tensor<3x3xi1>) -> tensor<3x1xi1> + return %0 : tensor<3x1xi1> +} + +// ----- + +func.func @reduce_all_constant() -> tensor<2x1x4xi1> { + // CHECK-LABEL: func.func @reduce_all_constant() -> tensor<2x1x4xi1> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense : tensor<2x1x4xi1>}> : () -> tensor<2x1x4xi1> + // CHECK: return %[[VAL_0]] : tensor<2x1x4xi1> + // CHECK: } + %const = "tosa.const"() <{value = dense<[[[true, false, false, true], [false, false, true, false], [true, false, true, true]], [[false, false, false, false], [false, false, true, false], [true, false, true, false]]]> : tensor<2x3x4xi1>}> : () -> tensor<2x3x4xi1> + %0 = tosa.reduce_all %const {axis = 1 : i32} : (tensor<2x3x4xi1>) -> tensor<2x1x4xi1> + return %0 : tensor<2x1x4xi1> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<1x3xi32> { +// CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3xi32> { +// CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<2> : tensor<1x3xi32>}> : () -> tensor<1x3xi32> +// CHECK: return %[[VAL_0]] : tensor<1x3xi32> +// CHECK: } + %const = "tosa.const"() <{value = dense<1> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %0 = tosa.reduce_sum %const {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + return %0 : tensor<1x3xi32> +} + +// ----- + +func.func @reduce_sum_constant() -> tensor<1x3xi32> { + // CHECK-LABEL: func.func @reduce_sum_constant() -> tensor<1x3xi32> { + // CHECK: %[[VAL_0:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + // CHECK: %[[VAL_1:.*]] = "tosa.const"() <{value = dense<{{\[\[}}1, 2, 3], [4, 5, 7]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + // CHECK: %[[VAL_2:.*]] = tosa.add %[[VAL_0]], %[[VAL_1]] : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + // CHECK: %[[VAL_3:.*]] = tosa.reduce_sum %[[VAL_2]] {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + // CHECK: return %[[VAL_3]] : tensor<1x3xi32> + %arg0 = "tosa.const"() <{value = dense<[[1,2,3], [4,5,6]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %arg1 = "tosa.const"() <{value = dense<[[1,2,3], [4,5,7]]> : tensor<2x3xi32>}> : () -> tensor<2x3xi32> + %arg2 = tosa.add %arg0, %arg1 : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + %0 = tosa.reduce_sum %arg2 {axis = 0 : i32} : (tensor<2x3xi32>) -> tensor<1x3xi32> + return %0 : tensor<1x3xi32> +}