diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td @@ -208,7 +208,7 @@ }]>; //===----------------------------------------------------------------------===// -// TOSA Operator. +// TOSA Operator Class. //===----------------------------------------------------------------------===// class Tosa_Op traits = []> : @@ -221,6 +221,20 @@ ["inferReturnTypeComponents"]>, ResultsBroadcastableShape, Pure])> { + let assemblyFormat = + "operands attr-dict `:` functional-type(operands, results)"; +} + +class Tosa_InferTensorTypeOp traits = []> + : Tosa_Op { + let assemblyFormat = + "operands attr-dict `:` functional-type(operands, results)"; +} + +class Tosa_InferShapedTypeOp traits = []> + : Tosa_Op { + let assemblyFormat = + "operands attr-dict `:` functional-type(operands, results)"; } #endif // TOSA_OP_BASE diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -32,7 +32,7 @@ //===----------------------------------------------------------------------===// // Operator: argmax //===----------------------------------------------------------------------===// -def Tosa_ArgMaxOp : Tosa_Op<"argmax", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_ArgMaxOp : Tosa_InferShapedTypeOp<"argmax"> { let summary = "Perform argmax on the input."; let description = [{ @@ -59,7 +59,7 @@ //===----------------------------------------------------------------------===// // Operator: avg_pool2d //===----------------------------------------------------------------------===// -def Tosa_AvgPool2dOp : Tosa_Op<"avg_pool2d", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_AvgPool2dOp : Tosa_InferShapedTypeOp<"avg_pool2d"> { let summary = "Performs max pooling on the input."; let description = [{ @@ -89,7 +89,7 @@ //===----------------------------------------------------------------------===// // Operator: conv2d //===----------------------------------------------------------------------===// -def Tosa_Conv2DOp : Tosa_Op<"conv2d", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_Conv2DOp : Tosa_InferShapedTypeOp<"conv2d"> { let summary = "2D Convolution Operator"; let description = [{ @@ -119,7 +119,7 @@ //===----------------------------------------------------------------------===// // Operator: conv3d //===----------------------------------------------------------------------===// -def Tosa_Conv3DOp : Tosa_Op<"conv3d", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_Conv3DOp : Tosa_InferShapedTypeOp<"conv3d"> { let summary = "3D Convolution operator"; let description = [{ @@ -148,8 +148,7 @@ //===----------------------------------------------------------------------===// // Operator: depthwise_conv2d //===----------------------------------------------------------------------===// -def Tosa_DepthwiseConv2DOp : Tosa_Op<"depthwise_conv2d", - [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_DepthwiseConv2DOp : Tosa_InferShapedTypeOp<"depthwise_conv2d"> { let summary = "Depthwise 2D Convolution operator"; let description = [{ @@ -179,7 +178,7 @@ //===----------------------------------------------------------------------===// // Operator: fft2d //===----------------------------------------------------------------------===// -def Tosa_FFT2dOp : Tosa_Op<"fft2d", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_FFT2dOp : Tosa_InferShapedTypeOp<"fft2d"> { let summary = "Performs FFT2D operation on the input."; let description = [{ @@ -188,6 +187,12 @@ input_real and input_imag tensors. The resulting values in the output are split into the output_real and output_imag tensors. No normalization is applied on either the forward or inverse versions of the operation. + + Example: + + ```mlir + %out_real, %out_imag = tosa.fft2d %in_real, %in_imag : (tensor<8x9xf32>, tensor<8x9xf32>) -> (tensor<8x9xf32>, tensor<8x9xf32>) + ``` }]; let arguments = (ins @@ -201,13 +206,17 @@ Tosa_Tensor3D:$output_real, Tosa_Tensor3D:$output_imag ); + + let assemblyFormat = [{ + $input_real `,` $input_imag attr-dict `:` `(` type($input_real) `,` + type($input_imag) `)` `->` `(` type($output_real) `,` type($output_imag) `)` + }]; } //===----------------------------------------------------------------------===// // Operator: fully_connected //===----------------------------------------------------------------------===// -def Tosa_FullyConnectedOp : Tosa_Op<"fully_connected", [ - InferShapedTypeOpAdaptor, Pure]> { +def Tosa_FullyConnectedOp : Tosa_InferShapedTypeOp<"fully_connected"> { let summary = "Fully Connected operator"; let description = [{ @@ -232,7 +241,7 @@ //===----------------------------------------------------------------------===// // Operator: matmul //===----------------------------------------------------------------------===// -def Tosa_MatMulOp : Tosa_Op<"matmul", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_MatMulOp : Tosa_InferShapedTypeOp<"matmul"> { let summary = "Matrix multiplication with bias"; let description = [{ @@ -257,7 +266,7 @@ //===----------------------------------------------------------------------===// // Operator: max_pool2d //===----------------------------------------------------------------------===// -def Tosa_MaxPool2dOp : Tosa_Op<"max_pool2d", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_MaxPool2dOp : Tosa_InferShapedTypeOp<"max_pool2d"> { let summary = "Performs max pooling on the input."; let description = [{ @@ -285,7 +294,7 @@ //===----------------------------------------------------------------------===// // Operator: rfft2d //===----------------------------------------------------------------------===// -def Tosa_RFFT2dOp : Tosa_Op<"rfft2d", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_RFFT2dOp : Tosa_InferShapedTypeOp<"rfft2d"> { let summary = "Performs RFFT2D operation on the input."; let description = [{ @@ -295,6 +304,12 @@ tensor arguments. RFFT2D takes advantage of Hermitian symmetry to only calculate the first half of the final output axis. Imaginary values with locations (0,0), (0,W/2), (H/2,0) and (H/2,W/2) are zero. + + Example: + + ```mlir + %real, %imag = tosa.rfft2d %in : (tensor<8x16xf32>) -> (tensor<8x9xf32>, tensor<8x9xf32>) + ``` }]; let arguments = (ins @@ -305,13 +320,16 @@ Tosa_Tensor3D:$output_real, Tosa_Tensor3D:$output_imag ); + + let assemblyFormat = [{ + $input attr-dict `:` `(` type($input) `)` `->` `(` type($output_real) `,` type($output_imag) `)` + }]; } //===----------------------------------------------------------------------===// // Operator: transpose_conv2d //===----------------------------------------------------------------------===// -def Tosa_TransposeConv2DOp : Tosa_Op<"transpose_conv2d", - [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_TransposeConv2DOp : Tosa_InferShapedTypeOp<"transpose_conv2d"> { let summary = "Transpose 2D Convolution operator."; let description = [{ @@ -416,7 +434,6 @@ ); } - //===----------------------------------------------------------------------===// // Operator: erf //===----------------------------------------------------------------------===// @@ -440,6 +457,8 @@ let results = (outs Tosa_Tensor:$output ); + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// @@ -459,6 +478,16 @@ let description = [{ Elementwise addition of input1 and input2. Axis of size 1 will be broadcast, as necessary. + + Example: + + ```mlir + // Elementwise addition. + %out = tosa.add %in1, %in2 : tensor<12x6xf32>, tensor<12x6xf32> -> tensor<12x6xf32> + + // Elementwise addition with broadcasting. + %out = tosa.add %in1, %in2 : tensor<12x6xsi32>, tensor<1x1xsi32> -> tensor<12x6xsi32> + ``` }]; let arguments = (ins @@ -826,7 +855,7 @@ //===----------------------------------------------------------------------===// // Operator: table //===----------------------------------------------------------------------===// -def Tosa_TableOp : Tosa_Op<"table", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_TableOp : Tosa_InferShapedTypeOp<"table"> { let summary = "Table lookup op"; let description = [{ @@ -855,6 +884,9 @@ Tosa_Tensor:$output ); + let assemblyFormat = [{ + $input `,` $table attr-dict `:` `(` type($input) `,` type($table) `)` `->` type($output) + }]; } //===----------------------------------------------------------------------===// @@ -871,6 +903,12 @@ let description = [{ Elementwise absolute value operation + + Example: + + ```mlir + %out = tosa.abs(%in) : (tensor<21x3xf32>) -> tensor<21x3xf32> + ``` }]; let arguments = (ins @@ -1117,6 +1155,11 @@ ); let hasCanonicalizeMethod = 1; let hasFolder = 1; + + let assemblyFormat = [{ + operands attr-dict `:` `(` type($pred) `,` type($on_true) `,` type($on_false) + `)` `->` type($output) + }]; } //===----------------------------------------------------------------------===// @@ -1208,8 +1251,7 @@ //===----------------------------------------------------------------------===// // Operator: reduce_all //===----------------------------------------------------------------------===// -def Tosa_ReduceAllOp : Tosa_Op<"reduce_all", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReduceAllOp : Tosa_InferTensorTypeOp<"reduce_all"> { let summary = "Reduce All operator"; let description = [{ @@ -1237,8 +1279,7 @@ //===----------------------------------------------------------------------===// // Operator: reduce_any //===----------------------------------------------------------------------===// -def Tosa_ReduceAnyOp : Tosa_Op<"reduce_any", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReduceAnyOp : Tosa_InferTensorTypeOp<"reduce_any"> { let summary = "Reduce Any operator"; let description = [{ @@ -1266,8 +1307,7 @@ //===----------------------------------------------------------------------===// // Operator: reduce_max //===----------------------------------------------------------------------===// -def Tosa_ReduceMaxOp : Tosa_Op<"reduce_max", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReduceMaxOp : Tosa_InferTensorTypeOp<"reduce_max"> { let summary = "Reduce Max operator"; let description = [{ @@ -1295,8 +1335,7 @@ //===----------------------------------------------------------------------===// // Operator: reduce_min //===----------------------------------------------------------------------===// -def Tosa_ReduceMinOp : Tosa_Op<"reduce_min", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReduceMinOp : Tosa_InferTensorTypeOp<"reduce_min"> { let summary = "Reduce Min operator"; let description = [{ @@ -1324,8 +1363,7 @@ //===----------------------------------------------------------------------===// // Operator: reduce_prod //===----------------------------------------------------------------------===// -def Tosa_ReduceProdOp : Tosa_Op<"reduce_prod", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReduceProdOp : Tosa_InferTensorTypeOp<"reduce_prod"> { let summary = "Reduce Prod operator"; let description = [{ @@ -1353,8 +1391,7 @@ //===----------------------------------------------------------------------===// // Operator: reduce_sum //===----------------------------------------------------------------------===// -def Tosa_ReduceSumOp : Tosa_Op<"reduce_sum", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReduceSumOp : Tosa_InferTensorTypeOp<"reduce_sum"> { let summary = "Reduce Sum operator"; let description = [{ @@ -1387,8 +1424,7 @@ //===----------------------------------------------------------------------===// // Operator: concat //===----------------------------------------------------------------------===// -def Tosa_ConcatOp : Tosa_Op<"concat", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ConcatOp : Tosa_InferTensorTypeOp<"concat"> { let summary = "Concatenates tensors along one dimension."; let description = [{ @@ -1418,7 +1454,7 @@ //===----------------------------------------------------------------------===// // Operator: pad //===----------------------------------------------------------------------===// -def Tosa_PadOp : Tosa_Op<"pad", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> { let summary = "Pads a tensor with value specified."; let description = [{ @@ -1430,14 +1466,14 @@ ```mlir %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>) + tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>) ``` Example 2: ```mlir %0 = arith.constant dense<[[-1, 2], [3, 4]]> : tensor<2x2xi32> - "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) + tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) ``` }]; @@ -1462,8 +1498,7 @@ //===----------------------------------------------------------------------===// // Operator: reshape //===----------------------------------------------------------------------===// -def Tosa_ReshapeOp: Tosa_Op<"reshape", [ - InferTensorTypeAdaptor, Pure]> { +def Tosa_ReshapeOp : Tosa_InferTensorTypeOp<"reshape"> { let summary = "Reshape operator"; let description = [{ @@ -1489,6 +1524,8 @@ /// Method used by InferTypeOpInterface. static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); }]; + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// @@ -1515,12 +1552,14 @@ ); let hasFolder = 1; + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// // Operator: slice //===----------------------------------------------------------------------===// -def Tosa_SliceOp: Tosa_Op<"slice", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_SliceOp : Tosa_InferShapedTypeOp<"slice"> { let summary = "Slice operator"; let description = [{ @@ -1546,7 +1585,7 @@ //===----------------------------------------------------------------------===// // Operator: tile //===----------------------------------------------------------------------===// -def Tosa_TileOp: Tosa_Op<"tile", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_TileOp : Tosa_InferShapedTypeOp<"tile"> { let summary = "Tile operator"; let description = [{ @@ -1567,7 +1606,7 @@ //===----------------------------------------------------------------------===// // Operator: transpose //===----------------------------------------------------------------------===// -def Tosa_TransposeOp : Tosa_Op<"transpose", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_TransposeOp : Tosa_InferShapedTypeOp<"transpose"> { let summary = "Transpose operator"; let description = [{ @@ -1599,7 +1638,7 @@ //===----------------------------------------------------------------------===// // Operator: gather //===----------------------------------------------------------------------===// -def Tosa_GatherOp : Tosa_Op<"gather", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_GatherOp : Tosa_InferShapedTypeOp<"gather"> { let summary = "Gather operation,"; let description = [{ @@ -1620,7 +1659,7 @@ //===----------------------------------------------------------------------===// // Operator: scatter //===----------------------------------------------------------------------===// -def Tosa_ScatterOp : Tosa_Op<"scatter", [InferShapedTypeOpAdaptor, Pure]> { +def Tosa_ScatterOp : Tosa_InferShapedTypeOp<"scatter"> { let summary = "Scatter operation,"; let description = [{ @@ -1647,8 +1686,7 @@ //===----------------------------------------------------------------------===// // Operator: resize //===----------------------------------------------------------------------===// -def Tosa_ResizeOp : Tosa_Op<"resize", [InferShapedTypeOpAdaptor, Pure]> { - +def Tosa_ResizeOp : Tosa_InferShapedTypeOp<"resize"> { let summary = "Resize operation, supports various resize/upsample modes"; let description = [{ @@ -1722,6 +1760,8 @@ Tosa_Tensor_Plus_F64:$output ); + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; + let hasFolder = 1; } @@ -1766,6 +1806,8 @@ let results = (outs Tosa_Tensor:$output ); + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// @@ -1784,6 +1826,13 @@ let description = [{ A node containing constant data for use as the input to an operation. May hold data in any of the supported data formats. + + Example: + + ```mlir + // Generic form + %out = "tosa.const"() {value = dense<0> : tensor<2x3xi32>} : () -> tensor<2x3xi32> + ``` }]; let arguments = (ins @@ -1793,6 +1842,7 @@ let results = (outs TensorOf<[AnyTypeOf<[Tosa_AnyNumber_Plus_F64, Tosa_Int4]>]>:$output ); + let hasFolder = 1; } @@ -1815,6 +1865,8 @@ let results = (outs Tosa_Tensor:$output ); + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// @@ -1848,6 +1900,13 @@ `outputs is the list of tensors returned by the operator. The number of operators is backend specific. + + Example: + + ```mlir + %out = tosa.custom %in {config = "tosa_mlir_test", identifier = "custom_test", + implementation_attrs = ""} : (tensor<10xi32>) -> (tensor<10xi32>) + ``` }]; let arguments = (ins @@ -1860,6 +1919,8 @@ let results = (outs Variadic:$outputs ); + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// @@ -1897,6 +1958,8 @@ SizedRegion<1>:$then_branch, SizedRegion<1>:$else_branch ); + + let hasCustomAssemblyFormat = 1; } //===----------------------------------------------------------------------===// @@ -1931,6 +1994,8 @@ SizedRegion<1>:$cond, SizedRegion<1>:$body ); + + let hasCustomAssemblyFormat = 1; } include "mlir/Dialect/Tosa/IR/TosaUtilOps.td" diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaUtilOps.td @@ -54,6 +54,8 @@ let extraClassDeclaration = [{ std::optional> getShapeForUnroll(); }]; + + let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } //===----------------------------------------------------------------------===// @@ -73,6 +75,8 @@ let arguments = (ins Variadic:$inputs ); + + let assemblyFormat = "$inputs attr-dict `:` type($inputs)"; } #endif // TOSA_UTIL_OPS diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -1493,6 +1493,134 @@ return std::nullopt; } +// parse and print of IfOp refer to the implementation of SCF dialect. +ParseResult IfOp::parse(OpAsmParser &parser, OperationState &result) { + // Create the regions for 'then'. + result.regions.reserve(2); + Region *thenRegion = result.addRegion(); + Region *elseRegion = result.addRegion(); + + auto &builder = parser.getBuilder(); + OpAsmParser::UnresolvedOperand cond; + // Create a i1 tensor type for the boolean condition. + Type i1Type = RankedTensorType::get({}, builder.getIntegerType(1)); + if (parser.parseOperand(cond) || + parser.resolveOperand(cond, i1Type, result.operands)) + return failure(); + // Parse optional results type list. + if (parser.parseOptionalArrowTypeList(result.types)) + return failure(); + // Parse the 'then' region. + if (parser.parseRegion(*thenRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + + // If we find an 'else' keyword then parse the 'else' region. + if (!parser.parseOptionalKeyword("else")) { + if (parser.parseRegion(*elseRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + } + + // Parse the optional attribute list. + if (parser.parseOptionalAttrDict(result.attributes)) + return failure(); + return success(); +} + +void IfOp::print(OpAsmPrinter &p) { + bool printBlockTerminators = false; + + p << " " << getCond(); + if (!getResults().empty()) { + p << " -> (" << getResultTypes() << ")"; + // Print yield explicitly if the op defines values. + printBlockTerminators = true; + } + p << ' '; + p.printRegion(getThenBranch(), + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/printBlockTerminators); + + // Print the 'else' regions if it exists and has a block. + auto &elseRegion = getElseBranch(); + if (!elseRegion.empty()) { + p << " else "; + p.printRegion(elseRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/printBlockTerminators); + } + + p.printOptionalAttrDict((*this)->getAttrs()); +} + +// parse and print of WhileOp refer to the implementation of SCF dialect. +ParseResult WhileOp::parse(OpAsmParser &parser, OperationState &result) { + SmallVector regionArgs; + SmallVector operands; + Region *cond = result.addRegion(); + Region *body = result.addRegion(); + + OptionalParseResult listResult = + parser.parseOptionalAssignmentList(regionArgs, operands); + if (listResult.has_value() && failed(listResult.value())) + return failure(); + + FunctionType functionType; + SMLoc typeLoc = parser.getCurrentLocation(); + if (failed(parser.parseColonType(functionType))) + return failure(); + + result.addTypes(functionType.getResults()); + + if (functionType.getNumInputs() != operands.size()) { + return parser.emitError(typeLoc) + << "expected as many input types as operands " + << "(expected " << operands.size() << " got " + << functionType.getNumInputs() << ")"; + } + + // Resolve input operands. + if (failed(parser.resolveOperands(operands, functionType.getInputs(), + parser.getCurrentLocation(), + result.operands))) + return failure(); + + // Propagate the types into the region arguments. + for (size_t i = 0, e = regionArgs.size(); i != e; ++i) + regionArgs[i].type = functionType.getInput(i); + + return failure(parser.parseRegion(*cond, regionArgs) || + parser.parseKeyword("do") || parser.parseRegion(*body) || + parser.parseOptionalAttrDictWithKeyword(result.attributes)); +} + +static void printInitializationList(OpAsmPrinter &parser, + Block::BlockArgListType blocksArgs, + ValueRange initializers, + StringRef prefix = "") { + assert(blocksArgs.size() == initializers.size() && + "expected same length of arguments and initializers"); + if (initializers.empty()) + return; + + parser << prefix << '('; + llvm::interleaveComma( + llvm::zip(blocksArgs, initializers), parser, + [&](auto it) { parser << std::get<0>(it) << " = " << std::get<1>(it); }); + parser << ")"; +} + +void WhileOp::print(OpAsmPrinter &parser) { + printInitializationList(parser, getCond().front().getArguments(), getInputs(), + " "); + parser << " : "; + parser.printFunctionalType(getInputs().getTypes(), getResults().getTypes()); + parser << ' '; + parser.printRegion(getCond(), /*printEntryBlockArgs=*/false); + parser << " do "; + parser.printRegion(getBody()); + parser.printOptionalAttrDictWithKeyword((*this)->getAttrs()); +} + //===----------------------------------------------------------------------===// // TOSA Attribute Definitions. //===----------------------------------------------------------------------===// diff --git a/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir b/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir --- a/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir +++ b/mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir @@ -13,7 +13,7 @@ // ----- // CHECK-LABEL: @apply_scale_test_i32 -// SCALE: "tosa.apply_scale" +// SCALE: tosa.apply_scale func.func @apply_scale_test_i32(%arg0 : i32, %arg1 : i32, %arg2 : i8) -> (i32) { // CHECK-DAG: %[[S32:.+]] = arith.extui %arg2 : i8 to i32 // CHECK-DAG: %[[C0:.+]] = arith.constant 0 : i32 @@ -67,24 +67,24 @@ // CHECK-DAG: %[[LOWALIGN:.+]] = arith.select %[[OVER31]], %[[C0]], %[[LOR]] // CHECK-DAG: %[[RESULT:.+]] = arith.addi %[[LOWALIGN]], %[[HIALIGN]] // CHECK: return %[[RESULT]] - %res = "tosa.apply_scale"(%arg0, %arg1, %arg2) {double_round = true} : (i32, i32, i8) -> i32 + %res = tosa.apply_scale %arg0, %arg1, %arg2 {double_round = true} : (i32, i32, i8) -> i32 return %res : i32 } // ----- // CHECK-LABEL: @apply_scale_test_vector -// SCALE: "tosa.apply_scale" +// SCALE: tosa.apply_scale func.func @apply_scale_test_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>, %arg2 : vector<4xi8>) -> (vector<4xi32>) { // CHECK-NOT: "tosa.apply_scale" - %res = "tosa.apply_scale"(%arg0, %arg1, %arg2) {double_round = true} : (vector<4xi32>, vector<4xi32>, vector<4xi8>) -> vector<4xi32> + %res = tosa.apply_scale %arg0, %arg1, %arg2 {double_round = true} : (vector<4xi32>, vector<4xi32>, vector<4xi8>) -> vector<4xi32> return %res : vector<4xi32> } // ----- // CHECK-LABEL: @apply_scale_test_i48 -// SCALE: "tosa.apply_scale" +// SCALE: tosa.apply_scale func.func @apply_scale_test_i48(%arg0 : i48, %arg1 : i32, %arg2 : i8) -> (i32) { // CHECK-DAG: %[[C0:.+]] = arith.constant 0 : i48 // CHECK-DAG: %[[C1:.+]] = arith.constant 1 : i64 @@ -115,6 +115,6 @@ // CHECK-DAG: %[[SHR:.+]] = arith.shrsi %[[RES64]], %[[S64]] // CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[SHR]] : i64 to i32 // CHECK: return %[[TRUNC]] - %res = "tosa.apply_scale"(%arg0, %arg1, %arg2) {double_round = true} : (i48, i32, i8) -> i32 + %res = tosa.apply_scale %arg0, %arg1, %arg2 {double_round = true} : (i48, i32, i8) -> i32 return %res : i32 } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -6,7 +6,7 @@ // CHECK: [[INIT:%.+]] = tensor.empty() // CHECK: [[FILLED:%.+]] = linalg.fill ins([[C0]] : f32) outs([[INIT]] : tensor<1x5x6xf32>) -> tensor<1x5x6xf32> // CHECK: linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x5x3xf32>, tensor<1x3x6xf32>) outs([[FILLED]] : tensor<1x5x6xf32>) -> tensor<1x5x6xf32> - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor<1x5x3xf32>, tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) + %0 = tosa.matmul %arg0, %arg1 : (tensor<1x5x3xf32>, tensor<1x3x6xf32>) -> tensor<1x5x6xf32> return %0 : tensor<1x5x6xf32> } @@ -21,7 +21,7 @@ // CHECK: [[ONE:%.+]] = arith.constant 1 // CHECK: [[TWO:%.+]] = arith.constant 2 // CHECK: linalg.quantized_batch_matmul ins(%arg0, %arg1, [[ONE]], [[TWO]] : tensor<1x5x3xi8>, tensor<1x3x6xi8>, i32, i32) outs([[FILLED]] : tensor<1x5x6xi32>) -> tensor<1x5x6xi32> - %0 = "tosa.matmul"(%arg0, %arg1) {quantization_info = #tosa.matmul_quant} : (tensor<1x5x3xi8>, tensor<1x3x6xi8>) -> (tensor<1x5x6xi32>) + %0 = tosa.matmul %arg0, %arg1 {quantization_info = #tosa.matmul_quant} : (tensor<1x5x3xi8>, tensor<1x3x6xi8>) -> tensor<1x5x6xi32> return %0 : tensor<1x5x6xi32> } @@ -35,7 +35,7 @@ // CHECK: %[[INIT:.+]] = tensor.empty(%[[DIM]]) // CHECK: %[[FILLED:.+]] = linalg.fill ins(%[[C0_0]] : f32) outs(%[[INIT]] : tensor) -> tensor // CHECK: linalg.batch_matmul ins(%arg0, %arg1 : tensor, tensor) outs(%[[FILLED]] : tensor) -> tensor - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor, tensor) -> (tensor) + %0 = tosa.matmul %arg0, %arg1 : (tensor, tensor) -> tensor return %0 : tensor } @@ -49,7 +49,7 @@ // CHECK: %[[INIT:.+]] = tensor.empty(%[[DIM]]) // CHECK: %[[FILLED:.+]] = linalg.fill ins(%[[C0]] : f32) outs(%[[INIT]] : tensor<1x5x?xf32>) -> tensor<1x5x?xf32> // CHECK: linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x5x3xf32>, tensor<1x3x?xf32>) outs(%[[FILLED]] : tensor<1x5x?xf32>) -> tensor<1x5x?xf32> - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor<1x5x3xf32>, tensor<1x3x?xf32>) -> (tensor<1x5x?xf32>) + %0 = tosa.matmul %arg0, %arg1 : (tensor<1x5x3xf32>, tensor<1x3x?xf32>) -> tensor<1x5x?xf32> return %0 : tensor<1x5x?xf32> } @@ -61,7 +61,7 @@ // CHECK: %[[INIT:.+]] = tensor.empty() // CHECK: %[[FILLED:.+]] = linalg.fill ins(%[[C0]] : f32) outs(%[[INIT]] : tensor<1x5x6xf32>) -> tensor<1x5x6xf32> // CHECK: linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x5x?xf32>, tensor<1x?x6xf32>) outs(%[[FILLED]] : tensor<1x5x6xf32>) -> tensor<1x5x6xf32> - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor<1x5x?xf32>, tensor<1x?x6xf32>) -> (tensor<1x5x6xf32>) + %0 = tosa.matmul %arg0, %arg1 : (tensor<1x5x?xf32>, tensor<1x?x6xf32>) -> tensor<1x5x6xf32> return %0 : tensor<1x5x6xf32> } @@ -76,7 +76,7 @@ // CHECK: [[ZERO:%.+]] = arith.constant 0 // CHECK: [[FILL:%.+]] = linalg.fill ins([[ZERO]]{{.*}}outs([[INITT]] // CHECK: [[PERM:%.+]] = arith.constant dense<[1, 0]> - // CHECK: [[TRANSPOSE:%.+]] = "tosa.transpose"(%arg1, [[PERM]]) + // CHECK: [[TRANSPOSE:%.+]] = tosa.transpose %arg1, [[PERM]] // CHECK: [[INITB:%.+]] = tensor.empty() // CHECK: [[MATMUL:%.+]] = linalg.matmul ins(%arg0, [[TRANSPOSE]] : tensor<5x3xf32>, tensor<3x6xf32>) outs([[FILL]] : tensor<5x6xf32>) -> tensor<5x6xf32> // CHECK: [[ADDED:%.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel"]} ins(%arg2, [[MATMUL]] : tensor<6xf32>, tensor<5x6xf32>) outs([[INITB]] : tensor<5x6xf32>) { @@ -84,7 +84,7 @@ // CHECK: [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32 // CHECK: linalg.yield [[ADD]] : f32 - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<5x3xf32>, tensor<6x3xf32>, tensor<6xf32>) -> (tensor<5x6xf32>) + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<5x3xf32>, tensor<6x3xf32>, tensor<6xf32>) -> tensor<5x6xf32> return %0 : tensor<5x6xf32> } @@ -99,7 +99,7 @@ // CHECK: [[ZERO:%.+]] = arith.constant 0 // CHECK: [[FILL:%.+]] = linalg.fill ins([[ZERO]]{{.*}}outs([[INITT]] // CHECK: [[PERM:%.+]] = arith.constant dense<[1, 0]> - // CHECK: [[TRANSPOSE:%.+]] = "tosa.transpose"(%arg1, [[PERM]]) + // CHECK: [[TRANSPOSE:%.+]] = tosa.transpose %arg1, [[PERM]] // CHECK: [[INITB:%.+]] = tensor.empty() // CHECK: [[ONE:%.+]] = arith.constant 1 // CHECK: [[TWO:%.+]] = arith.constant 2 @@ -108,7 +108,7 @@ // CHECK: ^bb0([[IN1:%.+]]: i32, [[IN2:%.+]]: i32, [[UNUSED:%.+]]: i32): // CHECK: [[ADD:%.+]] = arith.addi // CHECK: linalg.yield [[ADD]] : i32 - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) {quantization_info = #tosa.conv_quant} : (tensor<5x3xi8>, tensor<6x3xi8>, tensor<6xi32>) -> (tensor<5x6xi32>) + %0 = tosa.fully_connected %arg0, %arg1, %arg2 {quantization_info = #tosa.conv_quant} : (tensor<5x3xi8>, tensor<6x3xi8>, tensor<6xi32>) -> tensor<5x6xi32> return %0 : tensor<5x6xi32> } @@ -125,7 +125,7 @@ // CHECK: %[[ZERO:.+]] = arith.constant 0 // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[ZERO]]{{.*}}outs(%[[INITT]] // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 0]> - // CHECK: %[[TRANSPOSE:.+]] = "tosa.transpose"(%arg1, %[[PERM]]) + // CHECK: %[[TRANSPOSE:.+]] = tosa.transpose %arg1, %[[PERM]] // CHECK: %[[INITB:.+]] = tensor.empty(%[[DIM]]) // CHECK: %[[MATMUL:.+]] = linalg.matmul ins(%arg0, %[[TRANSPOSE]] : tensor, tensor<3x6xf32>) outs(%[[FILL]] : tensor) -> tensor // CHECK: %[[ADDED:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel"]} ins(%arg2, %[[MATMUL]] : tensor<6xf32>, tensor) outs(%[[INITB]] : tensor) { @@ -133,7 +133,7 @@ // CHECK: %[[ADD:.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32 // CHECK: linalg.yield %[[ADD]] : f32 - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor, tensor<6x3xf32>, tensor<6xf32>) -> (tensor) + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor, tensor<6x3xf32>, tensor<6xf32>) -> tensor return %0 : tensor } @@ -146,7 +146,7 @@ // CHECK-DAG: [[FILL:%.+]] = linalg.fill ins([[CONST]]{{.*}}outs([[INIT]] // CHECK-DAG: [[KERNEL:%.+]] = tensor.empty() // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg0, [[KERNEL]] : tensor<1x6x34x62xf32>, tensor<3x3xf32>) outs([[FILL]] : tensor<1x4x32x62xf32>) - %0 = "tosa.max_pool2d"(%arg0) {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xf32>) -> (tensor<1x4x32x62xf32>) + %0 = tosa.max_pool2d %arg0 {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xf32>) -> tensor<1x4x32x62xf32> return } @@ -160,7 +160,7 @@ // CHECK-DAG: [[FILL:%.+]] = linalg.fill ins([[INITVAL]]{{.*}}outs([[INIT]] // CHECK-DAG: [[KERNEL:%.+]] = tensor.empty() // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins([[PAD]], [[KERNEL]] : tensor<1x6x35x62xf32>, tensor<3x3xf32>) outs([[FILL]] : tensor<1x4x33x62xf32>) - %0 = "tosa.max_pool2d"(%arg0) {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xf32>) -> (tensor<1x4x33x62xf32>) + %0 = tosa.max_pool2d %arg0 {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xf32>) -> tensor<1x4x33x62xf32> return } @@ -173,7 +173,7 @@ // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CONST]]{{.*}}outs(%[[INIT]] // CHECK: %[[KERNEL:.+]] = tensor.empty() // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg0, %[[KERNEL]] : tensor, tensor<3x3xf32>) outs(%[[FILL]] : tensor) - %0 = "tosa.max_pool2d"(%arg0) {pad = array, kernel = array, stride = array} : (tensor) -> (tensor) + %0 = tosa.max_pool2d %arg0 {pad = array, kernel = array, stride = array} : (tensor) -> tensor return } @@ -181,7 +181,7 @@ func.func @max_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> () { // CHECK: arith.constant -128 // CHECK: linalg.pooling_nhwc_max - %0 = "tosa.max_pool2d"(%arg0) {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi8>) -> (tensor<1x4x32x62xi8>) + %0 = tosa.max_pool2d %arg0 {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi8>) -> tensor<1x4x32x62xi8> return } @@ -189,7 +189,7 @@ func.func @max_pool_i16(%arg0: tensor<1x6x34x62xi16>) -> () { // CHECK: arith.constant -32768 // CHECK: linalg.pooling_nhwc_max - %0 = "tosa.max_pool2d"(%arg0) {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi16>) -> (tensor<1x4x32x62xi16>) + %0 = tosa.max_pool2d %arg0 {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi16>) -> tensor<1x4x32x62xi16> return } @@ -197,7 +197,7 @@ func.func @max_pool_i32(%arg0: tensor<1x6x34x62xi32>) -> () { // CHECK: arith.constant -2147483648 // CHECK: linalg.pooling_nhwc_max - %0 = "tosa.max_pool2d"(%arg0) {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi32>) -> (tensor<1x4x32x62xi32>) + %0 = tosa.max_pool2d %arg0 {pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi32>) -> tensor<1x4x32x62xi32> return } @@ -286,7 +286,7 @@ // CHECK: %[[FLT:.+]] = arith.sitofp %[[CAST]] // CHECK: %[[DIV:.+]] = arith.divf %[[IN]], %[[FLT]] // CHECK: linalg.yield %[[DIV]] - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xf32>) -> tensor<1x5x33x62xf32> return %0 : tensor<1x5x33x62xf32> } @@ -318,7 +318,7 @@ // CHECK: %[[TRUNC_SHIFT:.+]] = arith.trunci %[[SUB]] // CHECK: %[[C30:.+]] = arith.constant 30 // CHECK: %[[SHIFT:.+]] = arith.addi %[[TRUNC_SHIFT]], %[[C30]] : i8 - // CHECK: %[[SCALED:.+]] = "tosa.apply_scale"(%[[IN]], %[[TRUNC_MUL]], %[[SHIFT]]) <{double_round = false}> + // CHECK: %[[SCALED:.+]] = tosa.apply_scale %[[IN]], %[[TRUNC_MUL]], %[[SHIFT]] {double_round = false} // Perform the normalization. // CHECK: %[[CMIN:.+]] = arith.constant -128 @@ -329,7 +329,7 @@ // CHECK: %[[CLAMP:.+]] = arith.select %[[CMP]], %[[CMAX]], %[[SEL]] // CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLAMP]] // CHECK: linalg.yield %[[TRUNC]] - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = i32, pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi8>) -> (tensor<1x5x33x62xi8>) + %0 = tosa.avg_pool2d %arg0 {acc_type = i32, pad = array, kernel = array, stride = array} : (tensor<1x6x34x62xi8>) -> tensor<1x5x33x62xi8> return %0 : tensor<1x5x33x62xi8> } @@ -352,7 +352,7 @@ // CHECK-SAME: outs(%[[FILL]] : tensor) -> tensor // CHECK: %[[EMPTY:.+]] = tensor.empty(%[[BATCH]]) : tensor // CHECK: %[[GENERIC:.+]] = linalg.generic - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, pad = array, kernel = array, stride = array} : (tensor) -> (tensor) + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, pad = array, kernel = array, stride = array} : (tensor) -> tensor return %0 : tensor } @@ -364,7 +364,7 @@ // CHECK-LABEL: @conv2d_i8 func.func @conv2d_i8(%input: tensor<1x49x42x27xi8>, %weights: tensor<28x1x1x27xi8>, %bias: tensor<28xi8>) -> () { // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 2, 3, 0]> - // CHECK: %[[W:.+]] = "tosa.transpose"(%arg1, %[[PERM]]) + // CHECK: %[[W:.+]] = tosa.transpose %arg1, %[[PERM]] // CHECK: %[[M_IN:.+]] = tensor.empty() // CHECK: %[[CST:.+]] = arith.constant 0 // CHECK: %[[FILL:.+]] = linalg.fill @@ -374,7 +374,7 @@ // CHECK: arith.extsi // CHECK: arith.addi // CHECK: linalg.yield - %0 = "tosa.conv2d"(%input, %weights, %bias) {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} : (tensor<1x49x42x27xi8>, tensor<28x1x1x27xi8>, tensor<28xi8>) -> (tensor<1x45x40x28xi32>) + %0 = tosa.conv2d %input, %weights, %bias {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} : (tensor<1x49x42x27xi8>, tensor<28x1x1x27xi8>, tensor<28xi8>) -> tensor<1x45x40x28xi32> return } @@ -386,7 +386,7 @@ // CHECK-LABEL: @conv2d_f32 func.func @conv2d_f32(%input: tensor<1x49x42x27xf32>, %weights: tensor<28x3x3x27xf32>, %bias: tensor<28xf32>) -> () { // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 2, 3, 0]> - // CHECK: %[[W:.+]] = "tosa.transpose"(%arg1, %[[PERM]]) + // CHECK: %[[W:.+]] = tosa.transpose %arg1, %[[PERM]] // CHECK: %[[M_IN:.+]] = tensor.empty() // CHECK: %[[CST:.+]] = arith.constant 0 // CHECK: %[[FILL:.+]] = linalg.fill @@ -395,7 +395,7 @@ // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor<1x45x40x28xf32>) outs(%[[B_IN]] : tensor<1x45x40x28xf32>) // CHECK: arith.addf // CHECK: linalg.yield - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<1x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>) -> (tensor<1x45x40x28xf32>) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<1x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>) -> tensor<1x45x40x28xf32> return } @@ -409,7 +409,7 @@ // CHECK: %[[C0:.+]] = arith.constant 0 // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]] // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 2, 3, 0]> - // CHECK: %[[W:.+]] = "tosa.transpose"(%arg1, %[[PERM]]) + // CHECK: %[[W:.+]] = tosa.transpose %arg1, %[[PERM]] // CHECK: %[[M_IN:.+]] = tensor.empty(%[[BATCH]]) // CHECK: %[[CST:.+]] = arith.constant 0 // CHECK: %[[FILL:.+]] = linalg.fill @@ -418,7 +418,7 @@ // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor) outs(%[[B_IN]] : tensor) // CHECK: %[[ADD:.+]] = arith.addf // CHECK: linalg.yield %[[ADD]] : f32 - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor, tensor<28x3x3x27xf32>, tensor<28xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor, tensor<28x3x3x27xf32>, tensor<28xf32>) -> tensor return } @@ -469,7 +469,7 @@ // Running convolution // CHECK: %[[PERM:.+]] = arith.constant dense<[1, 2, 3, 0]> - // CHECK: %[[WEIGHT:.+]] = "tosa.transpose"(%arg1, %[[PERM]]) + // CHECK: %[[WEIGHT:.+]] = tosa.transpose %arg1, %[[PERM]] // CHECK: %[[M_IN:.+]] = tensor.empty(%[[H_OUT]], %[[W_OUT]]) // CHECK: %[[CST:.+]] = arith.constant 0 // CHECK: %[[FILL:.+]] = linalg.fill @@ -478,7 +478,7 @@ // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor<1x?x?x28xf32>) outs(%[[B_IN]] : tensor<1x?x?x28xf32>) // CHECK: %[[ADD:.+]] = arith.addf // CHECK: linalg.yield %[[ADD]] : f32 - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<1x?x?x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>) -> (tensor<1x?x?x28xf32>) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<1x?x?x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>) -> tensor<1x?x?x28xf32> return } @@ -490,7 +490,7 @@ // CHECK: tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0] // CHECK: tensor.yield %[[C0]] // CHECK: linalg.conv_2d_nhwc_hwcf - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<1x47x40x28xf32>, tensor<28x3x3x28xf32>, tensor<28xf32>) -> (tensor<1x45x40x28xf32>) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<1x47x40x28xf32>, tensor<28x3x3x28xf32>, tensor<28xf32>) -> tensor<1x45x40x28xf32> return } @@ -502,7 +502,7 @@ // CHECK: tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0] // CHECK: tensor.yield %[[C22]] // CHECK: linalg.conv_2d_nhwc_hwcf_q - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} : (tensor<1x12x12x1xi8>, tensor<1024x3x3x1xi8>, tensor<1024xi32>) -> tensor<1x12x12x1024xi32> + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} : (tensor<1x12x12x1xi8>, tensor<1024x3x3x1xi8>, tensor<1024xi32>) -> tensor<1x12x12x1024xi32> return } @@ -524,7 +524,7 @@ // CHECK: [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32 // CHECK: linalg.yield [[ADD]] : f32 // CHECK: } -> tensor<1x5x5x33xf32> - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = array, stride = array, dilation = array } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>) + %2 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 { pad = array, stride = array, dilation = array } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> tensor<1x5x5x33xf32> return } @@ -548,7 +548,7 @@ // CHECK: %[[ADD:.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32 // CHECK: linalg.yield %[[ADD]] : f32 // CHECK: } -> tensor - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = array, stride = array, dilation = array } : (tensor, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor) + %2 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 { pad = array, stride = array, dilation = array } : (tensor, tensor<3x1x3x11xf32>, tensor<33xf32>) -> tensor return } @@ -570,7 +570,7 @@ // CHECK: [[ADD:%.+]] = arith.addf %[[ARG3]], %[[ARG4]] : f32 // CHECK: linalg.yield [[ADD]] : f32 // CHECK: } -> tensor<1x5x5x33xf32> - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = array, stride = array, dilation = array } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>) + %2 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 { pad = array, stride = array, dilation = array } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> tensor<1x5x5x33xf32> return } @@ -598,7 +598,7 @@ // CHECK: [[ADD:%.+]] = arith.addi %[[ARG3]], %[[ARG4]] : i32 // CHECK: linalg.yield [[ADD]] : i32 // CHECK: } -> tensor<1x12x12x512xi32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, quantization_info = #tosa.conv_quant, stride = array, dilation = array } : (tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x12x12x512xi32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, quantization_info = #tosa.conv_quant, stride = array, dilation = array } : (tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x12x12x512xi32> return } @@ -622,7 +622,7 @@ // CHECK: [[ADD:%.+]] = arith.addi %[[ARG3]], %[[ARG4]] : i32 // CHECK: linalg.yield [[ADD]] : i32 // CHECK: } -> tensor<1x10x10x512xi32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, quantization_info = #tosa.conv_quant, stride = array, dilation = array } : (tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x10x10x512xi32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, quantization_info = #tosa.conv_quant, stride = array, dilation = array } : (tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x10x10x512xi32> return } @@ -638,7 +638,7 @@ // CHECK: } : tensor<2x?x?x3xf32> to tensor<2x?x?x3xf32> // CHECK: %[[CONV:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<[2, 1]> : tensor<2xi64>, strides = dense<[1, 2]> : tensor<2xi64>} ins(%[[PADDED]], %arg1 : tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>) outs(%{{.*}} : tensor<2x?x?x3x5xf32>) -> tensor<2x?x?x3x5xf32> // CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[CONV]] {{\[}}[0], [1], [2], [3, 4]] - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, dilation = array, stride = array} : (tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x?x?x15xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, dilation = array, stride = array} : (tensor<2x?x?x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x?x?x15xf32> return } @@ -647,7 +647,7 @@ // CHECK-LABEL: @conv3d_f32 func.func @conv3d_f32(%input: tensor<1x49x48x47x27xf32>, %weights: tensor<28x3x4x5x27xf32>, %bias: tensor<28xf32>) -> () { // CHECK-DAG: %[[PERMS:.+]] = arith.constant dense<[1, 2, 3, 4, 0]> - // CHECK-DAG: %[[TRANSPOSE:.+]] = "tosa.transpose"(%arg1, %[[PERMS]]) + // CHECK-DAG: %[[TRANSPOSE:.+]] = tosa.transpose %arg1, %[[PERMS]] // CHECK-DAG: %[[EMPTY:.+]] = tensor.empty() // CHECK-DAG: %[[ZERO:.+]] = arith.constant 0 // CHECK-DAG: %[[FILL:.+]] = linalg.fill ins(%[[ZERO]] : f32) outs(%[[EMPTY]] : tensor<1x47x45x43x28xf32>) @@ -663,7 +663,7 @@ // CHECK: ^bb0(%[[A1:.+]]: f32, %[[A2:.+]]: f32, %{{.+}}: f32): // CHECK: %[[ADD:.+]] = arith.addf %[[A1]], %[[A2]] : f32 // CHECK: linalg.yield %[[ADD]] - %0 = "tosa.conv3d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<1x49x48x47x27xf32>, tensor<28x3x4x5x27xf32>, tensor<28xf32>) -> tensor<1x47x45x43x28xf32> + %0 = tosa.conv3d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<1x49x48x47x27xf32>, tensor<28x3x4x5x27xf32>, tensor<28xf32>) -> tensor<1x47x45x43x28xf32> return } @@ -672,7 +672,7 @@ // CHECK-LABEL: @conv3d_i8 func.func @conv3d_i8(%input: tensor<1x49x48x47x27xi8>, %weights: tensor<28x3x4x5x27xi8>, %bias: tensor<28xi32>) -> () { // CHECK-DAG: %[[PERMS:.+]] = arith.constant dense<[1, 2, 3, 4, 0]> - // CHECK-DAG: %[[TRANSPOSE:.+]] = "tosa.transpose"(%arg1, %[[PERMS]]) + // CHECK-DAG: %[[TRANSPOSE:.+]] = tosa.transpose %arg1, %[[PERMS]] // CHECK-DAG: %[[EMPTY:.+]] = tensor.empty() // CHECK-DAG: %[[ZERO:.+]] = arith.constant 0 // CHECK-DAG: %[[FILL:.+]] = linalg.fill ins(%[[ZERO]] : i32) outs(%[[EMPTY]] : tensor<1x47x45x43x28xi32>) @@ -690,6 +690,6 @@ // CHECK: ^bb0(%[[A1:.+]]: i32, %[[A2:.+]]: i32, %{{.+}}: i32): // CHECK: %[[ADD:.+]] = arith.addi %[[A1]], %[[A2]] : i32 // CHECK: linalg.yield %[[ADD]] - %0 = "tosa.conv3d"(%input, %weights, %bias) {pad = array, quantization_info = #tosa.conv_quant, stride = array, dilation = array} : (tensor<1x49x48x47x27xi8>, tensor<28x3x4x5x27xi8>, tensor<28xi32>) -> tensor<1x47x45x43x28xi32> + %0 = tosa.conv3d %input, %weights, %bias {pad = array, quantization_info = #tosa.conv_quant, stride = array, dilation = array} : (tensor<1x49x48x47x27xi8>, tensor<28x3x4x5x27xi8>, tensor<28xi32>) -> tensor<1x47x45x43x28xi32> return } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -11,7 +11,7 @@ // CHECK: [[ELEMENT:%.*]] = math.absf [[ARG1]] : f32 // CHECK: linalg.yield [[ELEMENT]] : f32 // CHECK: } -> tensor - %0 = "tosa.abs"(%arg0) : (tensor) -> tensor + %0 = tosa.abs %arg0 : (tensor) -> tensor // CHECK: return [[GENERIC]] : tensor return %0 : tensor @@ -72,7 +72,7 @@ // CHECK: [[ABSF:%.+]] = math.absf [[IN0]] : f32 // CHECK: linalg.yield [[ABSF]] : f32 // CHECK: } -> tensor - %0 = "tosa.abs"(%arg0) : (tensor) -> tensor + %0 = tosa.abs %arg0 : (tensor) -> tensor // CHECK: return [[RESULT]] : tensor return %0 : tensor @@ -92,7 +92,7 @@ // CHECK: [[ADDF:%.+]] = arith.addf [[IN0]], [[IN1]] : f32 // CHECK: linalg.yield [[ADDF]] : f32 // CHECK: } -> tensor - %0 = "tosa.add"(%arg0, %arg1) : (tensor, tensor) -> tensor + %0 = tosa.add %arg0, %arg1 : (tensor, tensor) -> tensor // CHECK: return [[RESULT]] : tensor return %0 : tensor @@ -142,7 +142,7 @@ // CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_13]], %[[VAL_14]] : f32 // CHECK: linalg.yield %[[VAL_16]] : f32 // CHECK: } -> tensor - %0 = "tosa.add"(%arg0, %arg1) : (tensor, tensor) -> tensor + %0 = tosa.add %arg0, %arg1 : (tensor, tensor) -> tensor // CHECK: return %[[RESULT]] : tensor return %0 : tensor @@ -178,7 +178,7 @@ // CHECK: %[[VAL_10:.*]] = arith.addf %[[VAL_7]], %[[VAL_8]] : f32 // CHECK: linalg.yield %[[VAL_10]] : f32 // CHECK: } -> tensor<5xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<5xf32>, tensor) -> tensor<5xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<5xf32>, tensor) -> tensor<5xf32> // CHECK: return %[[RESULT]] : tensor<5xf32> return %0 : tensor<5xf32> @@ -201,7 +201,7 @@ // CHECK: %[[VAL_4:.*]] = arith.addf %[[VAL_1]], %[[VAL_2]] : f32 // CHECK: linalg.yield %[[VAL_4]] : f32 // CHECK: } -> tensor - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor) -> tensor + %0 = tosa.add %arg0, %arg1 : (tensor<1xf32>, tensor) -> tensor // CHECK: return %[[RESULT]] : tensor return %0 : tensor @@ -222,7 +222,7 @@ // CHECK: %[[VAL_4:.*]] = arith.addf %[[VAL_1]], %[[VAL_2]] : f32 // CHECK: linalg.yield %[[VAL_4]] : f32 // CHECK: } -> tensor<3xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor<3xf32>) -> tensor<3xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<1xf32>, tensor<3xf32>) -> tensor<3xf32> // CHECK: return %[[RESULT]] : tensor<3xf32> return %0 : tensor<3xf32> @@ -242,7 +242,7 @@ // CHECK: %[[VAL_4:.*]] = arith.addf %[[VAL_1]], %[[VAL_2]] : f32 // CHECK: linalg.yield %[[VAL_4]] : f32 // CHECK: } -> tensor<3xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> // CHECK: return %[[RESULT]] : tensor<3xf32> return %0 : tensor<3xf32> @@ -329,7 +329,7 @@ // CHECK: %[[VAL_32:.*]] = arith.addf %[[VAL_29]], %[[VAL_30]] : f32 // CHECK: linalg.yield %[[VAL_32]] : f32 // CHECK: } -> tensor - %0 = "tosa.add"(%arg0, %arg1) : (tensor, tensor) -> tensor + %0 = tosa.add %arg0, %arg1 : (tensor, tensor) -> tensor // CHECK: return %[[RESULT]] : tensor return %0 : tensor @@ -351,7 +351,7 @@ // CHECK: %[[VAL_4:.*]] = arith.addf %[[VAL_1]], %[[VAL_2]] : f32 // CHECK: linalg.yield %[[VAL_4]] : f32 // CHECK: } -> tensor<2x3x4xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<3x4xf32>, tensor<2x3x4xf32>) -> tensor<2x3x4xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<3x4xf32>, tensor<2x3x4xf32>) -> tensor<2x3x4xf32> // CHECK: return %[[RESULT]] : tensor<2x3x4xf32> return %0 : tensor<2x3x4xf32> @@ -419,7 +419,7 @@ // CHECK: %[[VAL_24:.*]] = arith.select %[[VAL_20]], %[[VAL_21]], %[[VAL_22]] : f32 // CHECK: linalg.yield %[[VAL_24]] : f32 // CHECK: } -> tensor<2x?xf32> - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<2x?xi1>, tensor<2x?xf32>, tensor<2x?xf32>) -> tensor<2x?xf32> + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<2x?xi1>, tensor<2x?xf32>, tensor<2x?xf32>) -> tensor<2x?xf32> // CHECK: return %[[RESULT]] : tensor<2x?xf32> return %0 : tensor<2x?xf32> @@ -431,87 +431,87 @@ func.func @test_simple_f32(%arg0: tensor<1xf32>) -> () { // CHECK: linalg.generic // CHECK: tanh - %0 = "tosa.tanh"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> + %0 = tosa.tanh %arg0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: math.absf - %1 = "tosa.abs"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> + %1 = tosa.abs %arg0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.addf - %2 = "tosa.add"(%0, %0) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %2 = tosa.add %0, %0 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.subf - %3 = "tosa.sub"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %3 = tosa.sub %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.mulf - %4 = "tosa.mul"(%0, %1) {shift = 0 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %4 = tosa.mul %0, %1 {shift = 0 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.negf - %5 = "tosa.negate"(%0) : (tensor<1xf32>) -> tensor<1xf32> + %5 = tosa.negate %0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: pow - %6 = "tosa.pow"(%1, %2) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %6 = tosa.pow %1, %2 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: rsqrt - %7 = "tosa.rsqrt"(%1) : (tensor<1xf32>) -> tensor<1xf32> + %7 = tosa.rsqrt %1 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: log - %8 = "tosa.log"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> + %8 = tosa.log %arg0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: exp - %9 = "tosa.exp"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> + %9 = tosa.exp %arg0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.cmpf - %10 = "tosa.greater"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1> + %10 = tosa.greater %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.cmpf - %11 = "tosa.greater_equal"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1> + %11 = tosa.greater_equal %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.cmpf - %12 = "tosa.equal"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1> + %12 = tosa.equal %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: select - %13 = "tosa.select"(%10, %0, %1) : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %13 = tosa.select %10, %0, %1 : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.maxf - %14 = "tosa.maximum"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %14 = tosa.maximum %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.minf - %15 = "tosa.minimum"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %15 = tosa.minimum %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: ceil - %16 = "tosa.ceil"(%0) : (tensor<1xf32>) -> tensor<1xf32> + %16 = tosa.ceil %0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: floor - %17 = "tosa.floor"(%0) : (tensor<1xf32>) -> tensor<1xf32> + %17 = tosa.floor %0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.minf // CHECK: arith.maxf - %18 = "tosa.clamp"(%0) {min_int = 1 : i64, max_int = 5 : i64, min_fp = 1.0 : f32, max_fp = 5.0 : f32} : (tensor<1xf32>) -> tensor<1xf32> + %18 = tosa.clamp %0 {min_int = 1 : i64, max_int = 5 : i64, min_fp = 1.0 : f32, max_fp = 5.0 : f32} : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.negf // CHECK: exp // CHECK: arith.addf // CHECK: arith.divf - %19 = "tosa.sigmoid"(%0) : (tensor<1xf32>) -> tensor<1xf32> + %19 = tosa.sigmoid %0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.constant -2.14748365E+9 @@ -520,24 +520,24 @@ // CHECK: arith.minf // CHECK: arith.maxf // CHECK: arith.fptosi - %20 = "tosa.cast"(%0) : (tensor<1xf32>) -> tensor<1xi32> + %20 = tosa.cast %0 : (tensor<1xf32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.constant 0 // CHECK: arith.cmpf - %21 = "tosa.cast"(%0) : (tensor<1xf32>) -> tensor<1xi1> + %21 = tosa.cast %0 : (tensor<1xf32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.truncf - %22 = "tosa.cast"(%0) : (tensor<1xf32>) -> tensor<1xf16> + %22 = tosa.cast %0 : (tensor<1xf32>) -> tensor<1xf16> // CHECK: linalg.generic // CHECK: arith.divf - %23 = "tosa.reciprocal"(%0) : (tensor<1xf32>) -> tensor<1xf32> + %23 = tosa.reciprocal %0 : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: math.erf - %24 = "tosa.erf"(%0) : (tensor<1xf32>) -> tensor<1xf32> + %24 = tosa.erf %0 : (tensor<1xf32>) -> tensor<1xf32> return } @@ -549,7 +549,7 @@ // CHECK: linalg.generic // CHECK: arith.extf - %0 = "tosa.cast"(%arg0) : (tensor<1xf16>) -> tensor<1xf32> + %0 = tosa.cast %arg0 : (tensor<1xf16>) -> tensor<1xf32> return } @@ -562,7 +562,7 @@ // CHECK: arith.extsi // CHECK: arith.extsi // CHECK: arith.muli - %0 = "tosa.mul"(%arg0, %arg0) {shift = 0 : i32} : (tensor<1xi16>, tensor<1xi16>) -> tensor<1xi32> + %0 = tosa.mul %arg0, %arg0 {shift = 0 : i32} : (tensor<1xi16>, tensor<1xi16>) -> tensor<1xi32> return } @@ -572,7 +572,7 @@ // CHECK-LABEL: @test_simple_ui8 func.func @test_simple_ui8(%arg0: tensor<1xui8>) -> () { // CHECK: arith.uitofp - %0 = "tosa.cast"(%arg0) : (tensor<1xui8>) -> tensor<1xf32> + %0 = tosa.cast %arg0 : (tensor<1xui8>) -> tensor<1xf32> return } @@ -582,54 +582,54 @@ func.func @test_simple_i32(%arg0: tensor<1xi32>) -> () { // CHECK: linalg.generic // CHECK: arith.addi - %0 = "tosa.add"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %0 = tosa.add %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.subi - %1 = "tosa.sub"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %1 = tosa.sub %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.muli - %2 = "tosa.mul"(%arg0, %arg0) {shift = 0 : i32} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %2 = tosa.mul %arg0, %arg0 {shift = 0 : i32} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.constant 2 // CHECK: apply_scale - %3 = "tosa.mul"(%arg0, %arg0) {shift = 2 : i32} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %3 = tosa.mul %arg0, %arg0 {shift = 2 : i32} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.divsi - %4 = "tosa.div"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %4 = tosa.div %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: ^bb0(%[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32): // CHECK: [[ZERO:%.+]] = arith.constant 0 // CHECK: arith.subi [[ZERO]], %[[ARG1]] - %5 = "tosa.negate"(%arg0) : (tensor<1xi32>) -> tensor<1xi32> + %5 = tosa.negate %arg0 : (tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: and - %6 = "tosa.bitwise_and"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %6 = tosa.bitwise_and %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: or - %7 = "tosa.bitwise_or"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %7 = tosa.bitwise_or %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.xori - %8 = "tosa.bitwise_xor"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %8 = tosa.bitwise_xor %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.shli - %9 = "tosa.logical_left_shift"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %9 = tosa.logical_left_shift %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.shrui - %10 = "tosa.logical_right_shift"(%arg0, %arg0) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %10 = tosa.logical_right_shift %arg0, %arg0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.shrsi - %11 = "tosa.arithmetic_right_shift"(%arg0, %arg0) {round = 0 : i1} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %11 = tosa.arithmetic_right_shift %arg0, %arg0 {round = 0 : i1} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.constant 1 @@ -643,61 +643,61 @@ // CHECK: and // CHECK: arith.extui // CHECK: arith.addi - %12 = "tosa.arithmetic_right_shift"(%arg0, %arg0) {round = 1 : i1} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %12 = tosa.arithmetic_right_shift %arg0, %arg0 {round = 1 : i1} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: math.ctlz - %13 = "tosa.clz"(%arg0) : (tensor<1xi32>) -> tensor<1xi32> + %13 = tosa.clz %arg0 : (tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.cmpi - %14 = "tosa.greater"(%0, %1) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi1> + %14 = tosa.greater %0, %1 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.cmpi - %15 = "tosa.greater_equal"(%0, %1) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi1> + %15 = tosa.greater_equal %0, %1 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: select - %16 = "tosa.select"(%14, %0, %1) : (tensor<1xi1>, tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %16 = tosa.select %14, %0, %1 : (tensor<1xi1>, tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.cmpi // CHECK: select - %17 = "tosa.maximum"(%0, %1) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %17 = tosa.maximum %0, %1 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.cmpi // CHECK: select - %18 = "tosa.minimum"(%0, %1) : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> + %18 = tosa.minimum %0, %1 : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.cmpi // CHECK: select - %19 = "tosa.clamp"(%0) {min_int = 1 : i64, max_int = 5 : i64, min_fp = 1.0 : f32, max_fp = 5.0 : f32} : (tensor<1xi32>) -> tensor<1xi32> + %19 = tosa.clamp %0 {min_int = 1 : i64, max_int = 5 : i64, min_fp = 1.0 : f32, max_fp = 5.0 : f32} : (tensor<1xi32>) -> tensor<1xi32> // CHECK: linalg.generic // CHECK: arith.trunci - %20 = "tosa.cast"(%0) : (tensor<1xi32>) -> tensor<1xi16> + %20 = tosa.cast %0 : (tensor<1xi32>) -> tensor<1xi16> // CHECK: linalg.generic // CHECK: arith.extsi - %21 = "tosa.cast"(%0) : (tensor<1xi32>) -> tensor<1xi64> + %21 = tosa.cast %0 : (tensor<1xi32>) -> tensor<1xi64> // CHECK: linalg.generic // CHECK: arith.constant 0 // CHECK: arith.cmpi - %22 = "tosa.cast"(%0) : (tensor<1xi32>) -> tensor<1xi1> + %22 = tosa.cast %0 : (tensor<1xi32>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.sitofp - %23 = "tosa.cast"(%0) : (tensor<1xi32>) -> tensor<1xf32> + %23 = tosa.cast %0 : (tensor<1xi32>) -> tensor<1xf32> // CHECK: linalg.generic // CHECK: arith.constant 0 // CHECK: arith.cmpi sgt // CHECK: arith.subi // CHECK: select - %24 = "tosa.abs"(%arg0) : (tensor<1xi32>) -> tensor<1xi32> + %24 = tosa.abs %arg0 : (tensor<1xi32>) -> tensor<1xi32> return } @@ -709,7 +709,7 @@ // CHECK: linalg.generic // CHECK: sitofp - %0 = "tosa.cast"(%arg0) : (tensor<1xi8>) -> tensor<1xf32> + %0 = tosa.cast %arg0 : (tensor<1xi8>) -> tensor<1xf32> return } @@ -726,7 +726,7 @@ // CHECK-DAG: %[[SEL1:.+]] = arith.select %[[CMP1]], %[[C127]] // CHECK-DAG: %[[CMP2:.+]] = arith.cmpi slt, %[[C126]], %[[ARG1]] // CHECK: %[[SEL2:.+]] = arith.select %[[CMP2]], %[[C126]], %[[SEL1]] - %0 = "tosa.clamp"(%arg0) {min_int = -127 : i64, max_int = 126 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi8>) -> tensor<1xi8> + %0 = tosa.clamp %arg0 {min_int = -127 : i64, max_int = 126 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi8>) -> tensor<1xi8> // CHECK: linalg.generic // CHECK: ^bb0(%[[ARG1:.+]]: i8, @@ -736,7 +736,7 @@ // CHECK-DAG: %[[SEL1:.+]] = arith.select %[[CMP1]], %[[C128]] // CHECK-DAG: %[[CMP2:.+]] = arith.cmpi slt, %[[C127]], %[[ARG1]] // CHECK: %[[SEL2:.+]] = arith.select %[[CMP2]], %[[C127]], %[[SEL1]] - %1 = "tosa.clamp"(%arg0) {min_int = -130 : i64, max_int = 130 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi8>) -> tensor<1xi8> + %1 = tosa.clamp %arg0 {min_int = -130 : i64, max_int = 130 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi8>) -> tensor<1xi8> return } @@ -751,7 +751,7 @@ // CHECK-DAG: %[[C6:.+]] = arith.constant 6.0 // CHECK-DAG: %[[MIN:.+]] = arith.minf %[[ARG1]], %[[C6]] // CHECK-DAG: %[[MAX:.+]] = arith.maxf %[[MIN]], %[[C0]] - %0 = "tosa.clamp"(%arg0) {min_int = 0 : i64, max_int = 0 : i64, min_fp = 0.0 : f32, max_fp = 6.0 : f32} : (tensor<1xf16>) -> tensor<1xf16> + %0 = tosa.clamp %arg0 {min_int = 0 : i64, max_int = 0 : i64, min_fp = 0.0 : f32, max_fp = 6.0 : f32} : (tensor<1xf16>) -> tensor<1xf16> return } @@ -762,20 +762,20 @@ func.func @test_bool(%arg0: tensor<1xi1>, %arg1: tensor<1xi1>) -> () { // CHECK: linalg.generic // CHECK: and - %0 = "tosa.logical_and"(%arg0, %arg1) : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1> + %0 = tosa.logical_and %arg0, %arg1 : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: or - %1 = "tosa.logical_or"(%arg0, %arg1) : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1> + %1 = tosa.logical_or %arg0, %arg1 : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.xori - %2 = "tosa.logical_xor"(%arg0, %arg1) : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1> + %2 = tosa.logical_xor %arg0, %arg1 : (tensor<1xi1>, tensor<1xi1>) -> tensor<1xi1> // CHECK: linalg.generic // CHECK: arith.constant true // CHECK: arith.xori - %3 = "tosa.logical_not"(%arg0) : (tensor<1xi1>) -> tensor<1xi1> + %3 = tosa.logical_not %arg0 : (tensor<1xi1>) -> tensor<1xi1> return } @@ -797,17 +797,17 @@ // CHECK: [[UBOUND:%.+]] = arith.select [[PRED2]], [[MAX]], [[LBOUND]] // CHECK: [[TRUNC:%.+]] = arith.trunci [[UBOUND]] // CHECK: linalg.yield [[TRUNC]] - %0 = "tosa.negate"(%arg0) {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> + %0 = tosa.negate %arg0 {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> // CHECK: linalg.generic // CHECK: ^bb0(%[[BBARG0:.+]]: i8, // CHECK: [[EXT:%.+]] = arith.extsi %[[BBARG0]] : i8 to i16 - %1 = "tosa.negate"(%arg0) {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> + %1 = tosa.negate %arg0 {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> // CHECK: linalg.generic // CHECK: ^bb0(%[[BBARG0:.+]]: i8, // CHECK: [[EXT:%.+]] = arith.extsi %[[BBARG0]] : i8 to i32 - %2 = "tosa.negate"(%arg0) {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> + %2 = tosa.negate %arg0 {quantization_info = #tosa.unary_quant} : (tensor<1xi8>) -> tensor<1xi8> return } @@ -819,8 +819,8 @@ // CHECK-SAME: %[[ARG0:[0-9a-zA-Z_]*]]: tensor<1xf32>, // CHECK-SAME: %[[ARG1:[0-9a-zA-Z_]*]]: tensor<1xi32> func.func @test_identity(%arg0: tensor<1xf32>, %arg1: tensor<1xi32>) -> (tensor<1xf32>, tensor<1xi32>) { - %0 = "tosa.identity"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> - %1 = "tosa.identity"(%arg1) : (tensor<1xi32>) -> tensor<1xi32> + %0 = tosa.identity %arg0 : (tensor<1xf32>) -> tensor<1xf32> + %1 = tosa.identity %arg1 : (tensor<1xi32>) -> tensor<1xi32> // CHECK: return %[[ARG0]], %[[ARG1]] return %0, %1 : tensor<1xf32>, tensor<1xi32> @@ -840,7 +840,7 @@ // CHECK: ^bb0([[ARG1:%.+]]: i32, [[ARG2:%.+]]: i32) // CHECK: linalg.yield [[ARG1]] // CHECK: } - %1 = "tosa.transpose"(%arg0, %0) : (tensor<1x2x3xi32>, tensor<3xi32>) -> (tensor<2x3x1xi32>) + %1 = tosa.transpose %arg0, %0 : (tensor<1x2x3xi32>, tensor<3xi32>) -> tensor<2x3x1xi32> return } @@ -860,7 +860,7 @@ // CHECK: ^bb0([[ARG1:%.+]]: i32, [[ARG2:%.+]]: i32) // CHECK: linalg.yield [[ARG1]] // CHECK: } - %1 = "tosa.transpose"(%arg0, %0) : (tensor<1x?x3x4xi32>, tensor<4xi32>) -> (tensor) + %1 = tosa.transpose %arg0, %0 : (tensor<1x?x3x4xi32>, tensor<4xi32>) -> tensor return } @@ -882,7 +882,7 @@ // CHECK: ^bb0([[ARG1:%.+]]: f32, [[ARG2:%.+]]: f32) // CHECK: linalg.yield [[ARG1]] // CHECK: } - %1 = "tosa.transpose"(%arg0, %0) : (tensor, tensor<2xi32>) -> (tensor) + %1 = tosa.transpose %arg0, %0 : (tensor, tensor<2xi32>) -> tensor return } @@ -903,7 +903,7 @@ // CHECK: [[RES:%.+]] = arith.addf %[[ARG1]], %[[ARG2]] : f32 // CHECK: linalg.yield [[RES]] : f32 // CHECK: tensor.expand_shape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xf32> into tensor<1x4xf32> - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> // CHECK: [[INIT:%.+]] = tensor.empty() : tensor<5xf32> // CHECK: [[CST0:%.+]] = arith.constant 0.0 @@ -913,25 +913,25 @@ // CHECK: [[RES:%.+]] = arith.addf %[[ARG1]], %[[ARG2]] : f32 // CHECK: linalg.yield [[RES]] : f32 // CHECK: tensor.expand_shape [[GENERIC]] {{\[}}[0, 1]] : tensor<5xf32> into tensor<5x1xf32> - %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<5x4xf32>) -> tensor<5x1xf32> + %1 = tosa.reduce_sum %arg0 {axis = 1 : i64} : (tensor<5x4xf32>) -> tensor<5x1xf32> // CHECK: arith.constant 1.0 // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: arith.mulf - %2 = "tosa.reduce_prod"(%arg0) {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> + %2 = tosa.reduce_prod %arg0 {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> // CHECK: arith.constant 3.40282347E+38 : f32 // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: arith.minf - %3 = "tosa.reduce_min"(%arg0) {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> + %3 = tosa.reduce_min %arg0 {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> // CHECK: arith.constant -3.40282347E+38 : f32 // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: arith.maxf - %4 = "tosa.reduce_max"(%arg0) {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> + %4 = tosa.reduce_max %arg0 {axis = 0 : i64} : (tensor<5x4xf32>) -> tensor<1x4xf32> return } @@ -953,7 +953,7 @@ // CHECK: %[[RES:.+]] = arith.addf %[[ARG1]], %[[ARG2]] : f32 // CHECK: linalg.yield %[[RES]] : f32 // CHECK: tensor.expand_shape %[[GENERIC]] {{\[}}[0], [1, 2]] : tensor into tensor - %0 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor) -> tensor + %0 = tosa.reduce_sum %arg0 {axis = 1 : i64} : (tensor) -> tensor return } @@ -973,7 +973,7 @@ // CHECK: %[[RES:.+]] = arith.addf %[[ARG1]], %[[ARG2]] : f32 // CHECK: linalg.yield %[[RES]] : f32 // CHECK: tensor.expand_shape %[[GENERIC]] {{\[}}] : tensor into tensor<1xf32> - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor) -> tensor<1xf32> + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor) -> tensor<1xf32> return } @@ -995,7 +995,7 @@ // CHECK: %[[RES:.+]] = arith.mulf %[[ARG1]], %[[ARG2]] : f32 // CHECK: linalg.yield %[[RES]] : f32 // CHECK: tensor.expand_shape %[[GENERIC]] {{\[}}[0], [1, 2]] : tensor<5x?xf32> into tensor<5x?x1xf32> - %0 = "tosa.reduce_prod"(%arg0) {axis = 2 : i64} : (tensor<5x?x4xf32>) -> tensor<5x?x1xf32> + %0 = tosa.reduce_prod %arg0 {axis = 2 : i64} : (tensor<5x?x4xf32>) -> tensor<5x?x1xf32> return } @@ -1017,7 +1017,7 @@ // CHECK: %[[MAX:.+]] = arith.maxf %[[ARG1]], %[[ARG2]] : f32 // CHECK: linalg.yield %[[MAX]] : f32 // CHECK: tensor.expand_shape %[[GENERIC]] {{\[}}[0, 1]] : tensor into tensor - %0 = "tosa.reduce_max"(%arg0) {axis = 1 : i64} : (tensor) -> tensor + %0 = tosa.reduce_max %arg0 {axis = 1 : i64} : (tensor) -> tensor return } @@ -1038,7 +1038,7 @@ // CHECK: [[RES:%.+]] = arith.addi %[[ARG1]], %[[ARG2]] : i32 // CHECK: linalg.yield [[RES]] : i32 // CHECK: tensor.expand_shape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xi32> into tensor<1x4xi32> - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> // CHECK: [[INIT:%.+]] = tensor.empty() // CHECK: [[CST0:%.+]] = arith.constant 0 @@ -1048,27 +1048,27 @@ // CHECK: [[RES:%.+]] = arith.addi %[[ARG1]], %[[ARG2]] : i32 // CHECK: linalg.yield [[RES]] : i32 // CHECK: tensor.expand_shape [[GENERIC]] {{\[}}[0, 1]] : tensor<5xi32> into tensor<5x1xi32> - %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<5x4xi32>) -> tensor<5x1xi32> + %1 = tosa.reduce_sum %arg0 {axis = 1 : i64} : (tensor<5x4xi32>) -> tensor<5x1xi32> // CHECK: arith.constant 1 // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: arith.muli - %2 = "tosa.reduce_prod"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> + %2 = tosa.reduce_prod %arg0 {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> // CHECK: arith.constant 2147483647 : i32 // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: arith.cmpi slt // CHECK: select - %3 = "tosa.reduce_min"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> + %3 = tosa.reduce_min %arg0 {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> // CHECK: arith.constant -2147483648 : i32 // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: arith.cmpi sgt // CHECK: select - %4 = "tosa.reduce_max"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> + %4 = tosa.reduce_max %arg0 {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<1x4xi32> return } @@ -1088,13 +1088,13 @@ // CHECK: [[RES:%.+]] = arith.andi %[[ARG1]], %[[ARG2]] : i1 // CHECK: linalg.yield [[RES]] : i1 // CHECK: tensor.expand_shape [[GENERIC]] {{\[}}[0, 1]] : tensor<4xi1> into tensor<1x4xi1> - %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<5x4xi1>) -> tensor<1x4xi1> + %0 = tosa.reduce_all %arg0 {axis = 0 : i64} : (tensor<5x4xi1>) -> tensor<1x4xi1> // CHECK: arith.constant false // CHECK: linalg.fill // CHECK: linalg.generic // CHECK: or - %1 = "tosa.reduce_any"(%arg0) {axis = 0 : i64} : (tensor<5x4xi1>) -> tensor<1x4xi1> + %1 = tosa.reduce_any %arg0 {axis = 0 : i64} : (tensor<5x4xi1>) -> tensor<1x4xi1> return } @@ -1114,7 +1114,7 @@ // CHECK: [[C22:%.+]] = arith.constant 22 // CHECK-DAG: [[IN32:%.+]] = arith.extsi [[IN]] // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C17]] - // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) <{double_round = false} + // CHECK-DAG: [[SCALED:%.+]] = tosa.apply_scale [[IN_ZEROED]], [[C0]], [[C1]] {double_round = false} // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C22]] // CHECK-DAG: [[CMIN:%.+]] = arith.constant -128 // CHECK-DAG: [[CMAX:%.+]] = arith.constant 127 @@ -1124,7 +1124,7 @@ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]] // CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]] // CHECK-DAG: linalg.yield [[TRUNC]] - %0 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<2xi8>) -> (tensor<2xi8>) + %0 = tosa.rescale %arg0 {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<2xi8>) -> tensor<2xi8> // CHECK: [[C0:%.+]] = arith.constant 19689 // CHECK: [[C1:%.+]] = arith.constant 15 @@ -1135,7 +1135,7 @@ // CHECK: [[C22:%.+]] = arith.constant 22 // CHECK-DAG: [[IN32:%.+]] = arith.extsi [[IN]] // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C17]] - // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) <{double_round = false} + // CHECK-DAG: [[SCALED:%.+]] = tosa.apply_scale [[IN_ZEROED]], [[C0]], [[C1]] {double_round = false} // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C22]] // CHECK-DAG: [[CMIN:%.+]] = arith.constant 0 // CHECK-DAG: [[CMAX:%.+]] = arith.constant 255 @@ -1146,7 +1146,7 @@ // CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]] // CHECK-DAG: [[CAST:%.+]] = builtin.unrealized_conversion_cast [[TRUNC]] : i8 to ui8 // CHECK: linalg.yield [[CAST]] - %1 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<2xi8>) -> (tensor<2xui8>) + %1 = tosa.rescale %arg0 {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<2xi8>) -> tensor<2xui8> // CHECK: return return @@ -1163,13 +1163,13 @@ // CHECK: %[[BATCH:.+]] = tensor.dim %[[ARG0]], %[[C0]] // CHECK: %[[INIT:.+]] = tensor.empty(%[[BATCH]]) : tensor // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins(%[[ARG0]] : tensor) outs(%[[INIT]] : tensor) - %0 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor) -> (tensor) + %0 = tosa.rescale %arg0 {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor) -> tensor // CHECK: %[[C0:.+]] = arith.constant 0 // CHECK: %[[BATCH:.+]] = tensor.dim %[[ARG0]], %[[C0]] // CHECK: %[[INIT:.+]] = tensor.empty(%[[BATCH]]) : tensor // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins(%[[ARG0]] : tensor) outs(%[[INIT]] : tensor) - %1 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor) -> (tensor) + %1 = tosa.rescale %arg0 {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor) -> tensor return } @@ -1187,7 +1187,7 @@ // CHECK: %[[DIM2:.+]] = tensor.dim %[[ARG0]], %[[C2]] // CHECK: %[[INIT:.+]] = tensor.empty(%[[DIM1]], %[[DIM2]]) // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<1x?x?x32xi32>) outs(%[[INIT]] : tensor<1x?x?x32xi8>) - %0 = "tosa.rescale"(%arg0) {double_round = true, input_zp = 0 : i32, multiplier = array, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array} : (tensor<1x?x?x32xi32>) -> tensor<1x?x?x32xi8> + %0 = tosa.rescale %arg0 {double_round = true, input_zp = 0 : i32, multiplier = array, output_zp = 0 : i32, per_channel = false, scale32 = true, shift = array} : (tensor<1x?x?x32xi32>) -> tensor<1x?x?x32xi8> return } @@ -1208,7 +1208,7 @@ // CHECK-DAG: [[CAST:%.+]] = builtin.unrealized_conversion_cast [[IN]] : ui8 to i8 // CHECK-DAG: [[IN32:%.+]] = arith.extui [[CAST]] // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C17]] - // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) <{double_round = false} + // CHECK-DAG: [[SCALED:%.+]] = tosa.apply_scale [[IN_ZEROED]], [[C0]], [[C1]] {double_round = false} // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C22]] // CHECK-DAG: [[CMIN:%.+]] = arith.constant -128 // CHECK-DAG: [[CMAX:%.+]] = arith.constant 127 @@ -1218,7 +1218,7 @@ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]] // CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]] // CHECK: linalg.yield [[TRUNC]] - %0 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<2xui8>) -> (tensor<2xi8>) + %0 = tosa.rescale %arg0 {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<2xui8>) -> tensor<2xi8> return } @@ -1240,7 +1240,7 @@ // CHECK-DAG: [[IN32:%.+]] = arith.extsi [[IN]] // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C243]] - // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[MULTIPLIER]], [[SHIFT]]) <{double_round = false} + // CHECK-DAG: [[SCALED:%.+]] = tosa.apply_scale [[IN_ZEROED]], [[MULTIPLIER]], [[SHIFT]] {double_round = false} // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C252]] // CHECK-DAG: [[CMIN:%.+]] = arith.constant -128 // CHECK-DAG: [[CMAX:%.+]] = arith.constant 127 @@ -1250,7 +1250,7 @@ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]] // CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]] // CHECK-DAG: linalg.yield [[TRUNC]] - %0 = "tosa.rescale"(%arg0) {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<3xi8>) -> (tensor<3xi8>) + %0 = tosa.rescale %arg0 {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<3xi8>) -> tensor<3xi8> // CHECK: return [[GENERIC]] return %0 : tensor<3xi8> @@ -1261,18 +1261,18 @@ // CHECK-LABEL: @rescaleDoubleRound func.func @rescaleDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) { // CHECK: linalg.generic - // CHECK: "tosa.apply_scale" + // CHECK: tosa.apply_scale // CHECK-SAME: {double_round = true} - %0 = "tosa.rescale"(%arg0) {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = true, double_round = true, per_channel = false} : (tensor<2xi8>) -> (tensor<2xi8>) + %0 = tosa.rescale %arg0 {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = true, double_round = true, per_channel = false} : (tensor<2xi8>) -> tensor<2xi8> return %0 : tensor<2xi8> } // CHECK-LABEL: @rescaleUnnecessaryDoubleRound func.func @rescaleUnnecessaryDoubleRound(%arg0 : tensor<2xi8>) -> (tensor<2xi8>) { // CHECK: linalg.generic - // CHECK: "tosa.apply_scale" + // CHECK: tosa.apply_scale // CHECK-SAME: {double_round = false} - %0 = "tosa.rescale"(%arg0) {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = true, double_round = true, per_channel = false} : (tensor<2xi8>) -> (tensor<2xi8>) + %0 = tosa.rescale %arg0 {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = true, double_round = true, per_channel = false} : (tensor<2xi8>) -> tensor<2xi8> return %0 : tensor<2xi8> } @@ -1294,7 +1294,7 @@ // CHECK-DAG: %[[READ_DIM:.+]] = arith.subi %[[RDIM_MINUS_C1]], %[[I0]] // CHECK-DAG: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[READ_DIM]], %[[I1]]] : tensor<5x4xi32> // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.reverse"(%arg0) {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<5x4xi32> + %0 = tosa.reverse %arg0 {axis = 0 : i64} : (tensor<5x4xi32>) -> tensor<5x4xi32> // CHECK: %[[C1:.+]] = arith.constant 1 // CHECK: %[[RDIM:.+]] = tensor.dim %[[ARG0]], %[[C1]] @@ -1307,7 +1307,7 @@ // CHECK-DAG: %[[READ_DIM:.+]] = arith.subi %[[RDIM_MINUS_C1]], %[[I1]] // CHECK-DAG: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[I0]], %[[READ_DIM]]] : tensor<5x4xi32> // CHECK: linalg.yield %[[EXTRACT]] - %1 = "tosa.reverse"(%arg0) {axis = 1 : i64} : (tensor<5x4xi32>) -> tensor<5x4xi32> + %1 = tosa.reverse %arg0 {axis = 1 : i64} : (tensor<5x4xi32>) -> tensor<5x4xi32> return } @@ -1330,7 +1330,7 @@ // CHECK-DAG: %[[READ_DIM:.+]] = arith.subi %[[RDIM_MINUS_C1]], %[[I0]] // CHECK-DAG: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[READ_DIM]]] : tensor // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.reverse"(%arg0) {axis = 0 : i64} : (tensor) -> tensor + %0 = tosa.reverse %arg0 {axis = 0 : i64} : (tensor) -> tensor return } @@ -1346,22 +1346,22 @@ // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: "tosa.reshape"([[GENERIC]]) <{new_shape = array} - %0 = "tosa.tile"(%arg0) {multiples = array} : (tensor<2x3xi8>) -> (tensor<4x3xi8>) + // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + %0 = tosa.tile %arg0 {multiples = array} : (tensor<2x3xi8>) -> tensor<4x3xi8> // CHECK: [[INIT:%.+]] = tensor.empty() // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<1x2x2x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: "tosa.reshape"([[GENERIC]]) <{new_shape = array} - %1 = "tosa.tile"(%arg0) {multiples = array} : (tensor<2x3xi8>) -> (tensor<2x6xi8>) + // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + %1 = tosa.tile %arg0 {multiples = array} : (tensor<2x3xi8>) -> tensor<2x6xi8> // CHECK: [[INIT:%.+]] = tensor.empty() // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<5x2x7x3xi8>) // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8 // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: "tosa.reshape"([[GENERIC]]) <{new_shape = array} - %2 = "tosa.tile"(%arg0) {multiples = array} : (tensor<2x3xi8>) -> (tensor<10x21xi8>) + // CHECK: tosa.reshape [[GENERIC]] {new_shape = array} + %2 = tosa.tile %arg0 {multiples = array} : (tensor<2x3xi8>) -> tensor<10x21xi8> return } @@ -1380,8 +1380,8 @@ // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor) outs(%[[INIT]] : tensor<2x?x1x3xi8>) // CHECK: ^bb0(%[[ARG1:.+]]: i8, // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: "tosa.reshape"(%[[GENERIC]]) <{new_shape = array} - %0 = "tosa.tile"(%arg0) {multiples = array} : (tensor) -> (tensor) + // CHECK: tosa.reshape %[[GENERIC]] {new_shape = array} + %0 = tosa.tile %arg0 {multiples = array} : (tensor) -> tensor return } @@ -1400,8 +1400,8 @@ // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs(%[[INIT]] : tensor<2x2x?x3xi8>) // CHECK: ^bb0(%[[ARG1:.+]]: i8, // CHECK: linalg.yield %[[ARG1]] : i8 - // CHECK: "tosa.reshape"(%[[GENERIC]]) <{new_shape = array} - %0 = "tosa.tile"(%arg0) {multiples = array} : (tensor<2x3xi8>) -> (tensor<2x?xi8>) + // CHECK: tosa.reshape %[[GENERIC]] {new_shape = array} + %0 = tosa.tile %arg0 {multiples = array} : (tensor<2x3xi8>) -> tensor<2x?xi8> return } @@ -1429,7 +1429,7 @@ // CHECK: [[SELECT_VAL:%.+]] = arith.select [[CMP]], %[[ARG1]], %[[ARG3]] // CHECK: [[SELECT_IDX:%.+]] = arith.select [[CMP]], [[CAST]], %[[ARG2]] // CHECK: linalg.yield [[SELECT_IDX]], [[SELECT_VAL]] - %0 = "tosa.argmax"(%arg0) { axis = 0 : i64} : (tensor<3x2xi32>) -> (tensor<2xi32>) + %0 = tosa.argmax %arg0 { axis = 0 : i64} : (tensor<3x2xi32>) -> tensor<2xi32> // CHECK: [[IDX_INIT:%.+]] = tensor.empty() // CHECK: [[IDX_MIN:%.+]] = arith.constant 0 : i32 @@ -1445,7 +1445,7 @@ // CHECK: [[SELECT_VAL:%.+]] = arith.select [[CMP]], %[[ARG1]], %[[ARG3]] // CHECK: [[SELECT_IDX:%.+]] = arith.select [[CMP]], [[CAST]], %[[ARG2]] // CHECK: linalg.yield [[SELECT_IDX]], [[SELECT_VAL]] - %1 = "tosa.argmax"(%arg0) { axis = 1 : i64} : (tensor<3x2xi32>) -> (tensor<3xi32>) + %1 = tosa.argmax %arg0 { axis = 1 : i64} : (tensor<3x2xi32>) -> tensor<3xi32> // CHECK: arith.constant -3.40282347E+38 : f32 // CHECK: linalg.index @@ -1454,7 +1454,7 @@ // CHECK: select // CHECK: select // CHECK: linalg.yield - %2 = "tosa.argmax"(%arg1) { axis = 0 : i64} : (tensor<6xf32>) -> (tensor) + %2 = tosa.argmax %arg1 { axis = 0 : i64} : (tensor<6xf32>) -> tensor return } @@ -1481,7 +1481,7 @@ // CHECK: %[[SELECT_VAL:.+]] = arith.select %[[CMP]], %[[ARG1]], %[[ARG3]] // CHECK: %[[SELECT_IDX:.+]] = arith.select %[[CMP]], %[[CAST]], %[[ARG2]] // CHECK: linalg.yield %[[SELECT_IDX]], %[[SELECT_VAL]] - %0 = "tosa.argmax"(%arg0) { axis = 0 : i64} : (tensor<3x?xi32>) -> (tensor) + %0 = tosa.argmax %arg0 { axis = 0 : i64} : (tensor<3x?xi32>) -> tensor return } @@ -1504,7 +1504,7 @@ // CHECK: %[[SELECT_VAL:.+]] = arith.select %[[CMP]], %[[ARG1]], %[[ARG3]] // CHECK: %[[SELECT_IDX:.+]] = arith.select %[[CMP]], %[[CAST]], %[[ARG2]] // CHECK: linalg.yield %[[SELECT_IDX]], %[[SELECT_VAL]] - %0 = "tosa.argmax"(%arg0) { axis = 1 : i64} : (tensor<3x?xi32>) -> (tensor<3xi32>) + %0 = tosa.argmax %arg0 { axis = 1 : i64} : (tensor<3x?xi32>) -> tensor<3xi32> return } @@ -1522,7 +1522,7 @@ // CHECK: %[[IDX2:.+]] = linalg.index 2 // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG0]][%[[IDX0]], %[[CAST]], %[[IDX2]]] : tensor<2x3x2xf32> // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.gather"(%arg0, %arg1) : (tensor<2x3x2xf32>, tensor<2x3xi32>) -> (tensor<2x3x2xf32>) + %0 = tosa.gather %arg0, %arg1 : (tensor<2x3x2xf32>, tensor<2x3xi32>) -> tensor<2x3x2xf32> return } @@ -1542,7 +1542,7 @@ // CHECK: %[[IDX2:.+]] = linalg.index 2 // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG0]][%[[IDX0]], %[[CAST]], %[[IDX2]]] : tensor // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.gather"(%arg0, %arg1) : (tensor, tensor) -> (tensor) + %0 = tosa.gather %arg0, %arg1 : (tensor, tensor) -> tensor return } @@ -1566,7 +1566,7 @@ // CHECK: %[[IDX2:.+]] = linalg.index 2 // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG0]][%[[IDX0]], %[[CAST]], %[[IDX2]]] : tensor // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.gather"(%arg0, %arg1) : (tensor, tensor) -> (tensor) + %0 = tosa.gather %arg0, %arg1 : (tensor, tensor) -> tensor return } @@ -1584,7 +1584,7 @@ // CHECK: %[[IDX2:.+]] = linalg.index 2 // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG0]][%[[IDX0]], %[[CAST]], %[[IDX2]]] : tensor<2x3x2xi32> // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.gather"(%arg0, %arg1) : (tensor<2x3x2xi32>, tensor<2x3xi32>) -> (tensor<2x3x2xi32>) + %0 = tosa.gather %arg0, %arg1 : (tensor<2x3x2xi32>, tensor<2x3xi32>) -> tensor<2x3x2xi32> return } @@ -1602,7 +1602,7 @@ // CHECK: %[[ADD:.+]] = arith.addi %[[CAST]], %[[OFFSET]] // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG1]][%[[ADD]]] // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.table"(%arg0, %arg1) : (tensor<6xi8>, tensor<512xi8>) -> (tensor<6xi8>) + %0 = tosa.table %arg0, %arg1 : (tensor<6xi8>, tensor<512xi8>) -> tensor<6xi8> return } @@ -1635,7 +1635,7 @@ // CHECK: %[[DIFF_MUL:.+]] = arith.muli %[[DIFF]], %[[FRACTION]] // CHECK: %[[RESULT:.+]] = arith.addi %[[BASE_MUL]], %[[DIFF_MUL]] // CHECK: linalg.yield %[[RESULT]] - %0 = "tosa.table"(%arg0, %arg1) : (tensor<6xi16>, tensor<513xi16>) -> (tensor<6xi32>) + %0 = tosa.table %arg0, %arg1 : (tensor<6xi16>, tensor<513xi16>) -> tensor<6xi32> return } @@ -1655,7 +1655,7 @@ // CHECK: %[[ADD:.+]] = arith.addi %[[CAST]], %[[OFFSET]] // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG1]][%[[ADD]]] // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.table"(%arg0, %arg1) : (tensor, tensor<512xi8>) -> (tensor) + %0 = tosa.table %arg0, %arg1 : (tensor, tensor<512xi8>) -> tensor return } @@ -1673,7 +1673,7 @@ // CHECK: %[[ADD:.+]] = arith.addi %[[CAST]], %[[OFFSET]] // CHECK: %[[EXTRACT:.+]] = tensor.extract %[[ARG1]][%[[ADD]]] // CHECK: linalg.yield %[[EXTRACT]] - %0 = "tosa.table"(%arg0, %arg1) : (tensor<6xi8>, tensor) -> (tensor<6xi8>) + %0 = tosa.table %arg0, %arg1 : (tensor<6xi8>, tensor) -> tensor<6xi8> return } diff --git a/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir b/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir --- a/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir +++ b/mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir @@ -4,30 +4,29 @@ // CHECK-SAME: ([[ARG0:%.+]]: tensor) func.func @while_test(%arg0 : tensor) -> (tensor) { // CHECK: [[WHILE:%.+]] = scf.while ([[ARG1:%.+]] = [[ARG0]]) - %1 = "tosa.while_loop"(%arg0) ({ - ^bb0(%arg2: tensor): - // CHECK: "tosa.const" - %2 = "tosa.const"() {value = dense<3> : tensor} : () -> tensor + %0 = tosa.while_loop (%arg1 = %arg0) : (tensor) -> tensor { + // CHECK: tosa.const + %1 = "tosa.const"() {value = dense<3> : tensor} : () -> tensor - // CHECK: [[COMPARE:%.+]] = "tosa.greater_equal" - %3 = "tosa.greater_equal"(%2, %arg2) : (tensor, tensor) -> tensor + // CHECK: [[COMPARE:%.+]] = tosa.greater_equal + %2 = tosa.greater_equal %1, %arg1 : (tensor, tensor) -> tensor // CHECK: [[EX:%.+]] = tensor.extract [[COMPARE]] // CHECK: scf.condition([[EX]]) [[ARG1]] - "tosa.yield"(%3) : (tensor) -> () - }, { + tosa.yield %2 : tensor + } do { // CHECK: ^bb0([[ARG1:%.+]]: tensor) - ^bb0(%arg2: tensor): + ^bb0(%arg1: tensor): // CHECK: tosa.const - %2 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor + %1 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor - // CHECK: [[ADD:%.+]] = "tosa.add" - %3 = "tosa.add"(%arg2, %2) : (tensor, tensor) -> tensor + // CHECK: [[ADD:%.+]] = tosa.add + %2 = tosa.add %arg1, %1 : (tensor, tensor) -> tensor // CHECK: scf.yield [[ADD]] - "tosa.yield"(%3) : (tensor) -> () - }) : (tensor) -> (tensor) - return %1 : tensor + tosa.yield %2 : tensor + } + return %0 : tensor } // ----- @@ -37,22 +36,20 @@ func.func @if_test(%arg0 : tensor, %arg1 : tensor, %arg2 : tensor) -> (tensor) { // CHECK: [[EX:%.+]] = tensor.extract [[ARG2]] // CHECK: [[IF:%.+]] = scf.if [[EX]] -> (tensor) { - %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ + %0 = tosa.cond_if %arg2 -> (tensor) { // CHECK: scf.yield [[ARG0]] - ^bb1(%arg3 : tensor, %arg4 : tensor): - "tosa.yield"(%arg3) : (tensor) -> () + tosa.yield %arg0 : tensor // CHECK: } else { - }, { + } else { // CHECK: scf.yield [[ARG1]] - ^bb1(%arg5 : tensor, %arg6 : tensor): - "tosa.yield"(%arg6) : (tensor) -> () + tosa.yield %arg1 : tensor // CHECK: } // CHECK: return [[IF]] - }) : (tensor, tensor, tensor) -> (tensor) + } return %0 : tensor } diff --git a/mlir/test/Dialect/Tosa/broadcast.mlir b/mlir/test/Dialect/Tosa/broadcast.mlir --- a/mlir/test/Dialect/Tosa/broadcast.mlir +++ b/mlir/test/Dialect/Tosa/broadcast.mlir @@ -4,282 +4,282 @@ // CHECK-LABEL: broadcast0 func.func @test_broadcast0(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> { // CHECK-NOT: reshape - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32> return %0 : tensor<1xf32> } // ----- // CHECK-LABEL: broadcast1 func.func @test_broadcast1(%arg0: tensor<1xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x1xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor<2x1xf32>) -> tensor<2x1xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<1xf32>, tensor<2x1xf32>) -> tensor<2x1xf32> return %0 : tensor<2x1xf32> } // ----- // CHECK-LABEL: broadcast2 func.func @test_broadcast2(%arg0: tensor<2x1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<2x1xf32>, tensor<1xf32>) -> tensor<2x1xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<2x1xf32>, tensor<1xf32>) -> tensor<2x1xf32> return %0 : tensor<2x1xf32> } // ----- // CHECK-LABEL: broadcast3 func.func @test_broadcast3(%arg0: tensor<2x1x1x1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1x1x1xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<2x1x1x1xf32>, tensor<1xf32>) -> tensor<2x1x1x1xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<2x1x1x1xf32>, tensor<1xf32>) -> tensor<2x1x1x1xf32> return %0 : tensor<2x1x1x1xf32> } // ----- // CHECK-LABEL: broadcast4 func.func @test_broadcast4(%arg0: tensor<1x1x1x2xf32>, %arg1: tensor<1xf32>) -> tensor<1x1x1x2xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1x1x2xf32>, tensor<1xf32>) -> tensor<1x1x1x2xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<1x1x1x2xf32>, tensor<1xf32>) -> tensor<1x1x1x2xf32> return %0 : tensor<1x1x1x2xf32> } // ----- // CHECK-LABEL: broadcast5 func.func @test_broadcast5(%arg0: tensor<1x1x2x1xf32>, %arg1: tensor<1xf32>) -> tensor<1x1x2x1xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1x2x1xf32>, tensor<1xf32>) -> tensor<1x1x2x1xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<1x1x2x1xf32>, tensor<1xf32>) -> tensor<1x1x2x1xf32> return %0 : tensor<1x1x2x1xf32> } // ----- // CHECK-LABEL: broadcast6 func.func @test_broadcast6(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<1xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<1xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<17x16x15x14xf32>, tensor<1xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast7 func.func @test_broadcast7(%arg0: tensor<17x16x1x14xf32>, %arg1: tensor<1x1xf32>) -> tensor<17x16x1x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x1x14xf32>, tensor<1x1xf32>) -> tensor<17x16x1x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<17x16x1x14xf32>, tensor<1x1xf32>) -> tensor<17x16x1x14xf32> return %0 : tensor<17x16x1x14xf32> } // ----- // CHECK-LABEL: broadcast8 func.func @test_broadcast8(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<1x1xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<1x1xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<17x16x15x14xf32>, tensor<1x1xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast9 func.func @test_broadcast9(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x1xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<15x1xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<17x16x15x14xf32>, tensor<15x1xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast10 func.func @test_broadcast10(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x14xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<15x14xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<17x16x15x14xf32>, tensor<15x14xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast13 func.func @test_broadcast13(%arg0: tensor<1xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast14 func.func @test_broadcast14(%arg0: tensor<1x1xf32>, %arg1: tensor<17x16x1x14xf32>) -> tensor<17x16x1x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1xf32>, tensor<17x16x1x14xf32>) -> tensor<17x16x1x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<1x1xf32>, tensor<17x16x1x14xf32>) -> tensor<17x16x1x14xf32> return %0 : tensor<17x16x1x14xf32> } // ----- // CHECK-LABEL: broadcast15 func.func @test_broadcast15(%arg0: tensor<1x1xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<1x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast16 func.func @test_broadcast16(%arg0: tensor<15x1xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast17 func.func @test_broadcast17(%arg0: tensor<15x14xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<15x14xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<15x14xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast18 func.func @test_broadcast18(%arg0: tensor<14x1xf32>, %arg1: tensor<1x15xf32>) -> tensor<14x15xf32> { - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<14x1xf32>, tensor<1x15xf32>) -> tensor<14x15xf32> + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor<14x1xf32>, tensor<1x15xf32>) -> tensor<14x15xf32> return %0 : tensor<14x15xf32> } // ----- // CHECK-LABEL: broadcast19 func.func @test_broadcast19(%arg0: tensor<64x64x1xf32>, %arg1: tensor<1x17xf32>) -> (tensor<64x64x17xf32> ) { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.sub"(%arg0, %[[VAR0]]) - %0 = "tosa.sub"(%arg0, %arg1) : (tensor<64x64x1xf32>, tensor<1x17xf32>) -> tensor<64x64x17xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.sub %arg0, %[[VAR0]] + %0 = tosa.sub %arg0, %arg1 : (tensor<64x64x1xf32>, tensor<1x17xf32>) -> tensor<64x64x17xf32> return %0 : tensor<64x64x17xf32> } // ----- // CHECK-LABEL: broadcast20 func.func @test_broadcast20(%arg0: tensor<3x3x4x1xf32>, %arg1: tensor<4x5xf32>) -> (tensor<3x3x4x5xf32> ) { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]]) - %0 = "tosa.add"(%arg0, %arg1) : (tensor<3x3x4x1xf32>, tensor<4x5xf32>) -> tensor<3x3x4x5xf32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %arg0, %[[VAR0]] + %0 = tosa.add %arg0, %arg1 : (tensor<3x3x4x1xf32>, tensor<4x5xf32>) -> tensor<3x3x4x5xf32> return %0 : tensor<3x3x4x5xf32> } // ----- // CHECK-LABEL: broadcast_mul func.func @test_broadcast_mul(%arg0: tensor<15x14xi32>, %arg1: tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.mul"(%[[VAR0]], %arg1) - %0 = "tosa.mul"(%arg0, %arg1) <{shift = 1 : i32 }> : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.mul %[[VAR0]], %arg1 + %0 = tosa.mul %arg0, %arg1 {shift = 1 : i32 } : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> return %0 : tensor<17x16x15x14xi32> } // ----- // CHECK-LABEL: broadcast_arithmetic_right_shift func.func @test_broadcast_arithmetic_right_shift(%arg0: tensor<15x14xi32>, %arg1: tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.arithmetic_right_shift"(%[[VAR0]], %arg1) - %0 = "tosa.arithmetic_right_shift"(%arg0, %arg1) <{ round = true }> : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.arithmetic_right_shift %[[VAR0]], %arg1 + %0 = tosa.arithmetic_right_shift %arg0, %arg1 { round = true } : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> return %0 : tensor<17x16x15x14xi32> } // ----- // CHECK-LABEL: broadcast_scalar func.func @test_broadcast_scalar(%arg0: tensor, %arg1: tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> { - // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1) - %0 = "tosa.add"(%arg0, %arg1) : (tensor, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> + // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.add %[[VAR0]], %arg1 + %0 = tosa.add %arg0, %arg1 : (tensor, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> return %0 : tensor<17x16x15x14xi32> } // ----- // CHECK-LABEL: broadcast_select_both_input func.func @test_broadcast_select_both_input(%arg0: tensor<1x16x16xi1>, %arg1: tensor, %arg2: tensor) -> tensor<1x16x16xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%arg0, %[[VAL_0]], %[[VAL_1]]) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x16x16xi1>, tensor, tensor) -> tensor<1x16x16xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %arg0, %[[VAL_0]], %[[VAL_1]] + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<1x16x16xi1>, tensor, tensor) -> tensor<1x16x16xf32> return %0 : tensor<1x16x16xf32> } // ----- // CHECK-LABEL: broadcast_select_one_input func.func @test_broadcast_select_one_input(%arg0: tensor<17x16x15x14xi1>, %arg1: tensor<17x16x15x14xf32>, %arg2: tensor) -> tensor<17x16x15x14xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[VAL_1:.*]] = "tosa.select"(%arg0, %arg1, %[[VAL_0]]) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<17x16x15x14xi1>, tensor<17x16x15x14xf32>, tensor) -> tensor<17x16x15x14xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAL_1:.*]] = tosa.select %arg0, %arg1, %[[VAL_0]] + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<17x16x15x14xi1>, tensor<17x16x15x14xf32>, tensor) -> tensor<17x16x15x14xf32> return %0 : tensor<17x16x15x14xf32> } // ----- // CHECK-LABEL: broadcast_select_predicate func.func @test_broadcast_select_predicate(%arg0: tensor, %arg1: tensor<1x32x32x8xf32>, %arg2: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[VAL_1:.*]] = "tosa.select"(%[[VAL_0]], %arg1, %arg2) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor, tensor<1x32x32x8xf32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[VAL_1:.*]] = tosa.select %[[VAL_0]], %arg1, %arg2 + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor, tensor<1x32x32x8xf32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: broadcast_select_abc func.func @test_broadcast_select_abc(%arg0: tensor, %arg1: tensor<32x8xf32>, %arg2: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %[[VAL_1]], %arg2) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor, tensor<32x8xf32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %[[VAL_0]], %[[VAL_1]], %arg2 + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor, tensor<32x8xf32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: broadcast_select_acb func.func @test_broadcast_select_acb(%arg0: tensor, %arg1: tensor<1x32x32x8xf32>, %arg2: tensor<32x8xf32>) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %arg1, %[[VAL_1]]) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor, tensor<1x32x32x8xf32>, tensor<32x8xf32>) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %[[VAL_0]], %arg1, %[[VAL_1]] + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor, tensor<1x32x32x8xf32>, tensor<32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: broadcast_select_bac func.func @test_broadcast_select_bac(%arg0: tensor<32x8xi1>, %arg1: tensor, %arg2: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %[[VAL_1]], %arg2) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<32x8xi1>, tensor, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %[[VAL_0]], %[[VAL_1]], %arg2 + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<32x8xi1>, tensor, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: broadcast_select_bca func.func @test_broadcast_select_bca(%arg0: tensor<32x8xi1>, %arg1: tensor<1x32x32x8xf32>, %arg2: tensor) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %arg1, %[[VAL_1]]) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<32x8xi1>, tensor<1x32x32x8xf32>, tensor) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %[[VAL_0]], %arg1, %[[VAL_1]] + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<32x8xi1>, tensor<1x32x32x8xf32>, tensor) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: broadcast_select_cab func.func @test_broadcast_select_cab(%arg0: tensor<1x32x32x8xi1>, %arg1: tensor, %arg2: tensor<32x8xf32>) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%arg0, %[[VAL_0]], %[[VAL_1]]) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x32x32x8xi1>, tensor, tensor<32x8xf32>) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %arg0, %[[VAL_0]], %[[VAL_1]] + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<1x32x32x8xi1>, tensor, tensor<32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: broadcast_select_cba func.func @test_broadcast_select_cba(%arg0: tensor<1x32x32x8xi1>, %arg1: tensor<32x8xf32>, %arg2: tensor) -> tensor<1x32x32x8xf32> { - // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[VAL_2:.*]] = "tosa.select"(%arg0, %[[VAL_0]], %[[VAL_1]]) - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x32x32x8xi1>, tensor<32x8xf32>, tensor) -> tensor<1x32x32x8xf32> + // CHECK-DAG: %[[VAL_0:.*]] = tosa.reshape %arg1 {new_shape = array} + // CHECK-DAG: %[[VAL_1:.*]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[VAL_2:.*]] = tosa.select %arg0, %[[VAL_0]], %[[VAL_1]] + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<1x32x32x8xi1>, tensor<32x8xf32>, tensor) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -2,8 +2,8 @@ // CHECK-LABEL: @argmax_nofold func.func @argmax_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.argmax" - %0 = "tosa.argmax"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.argmax + %0 = tosa.argmax %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } @@ -12,7 +12,7 @@ // CHECK-NOT: tosa.add // CHECK: return %arg0 %zeros = "tosa.const"() {value = dense<0> : tensor<1x1x1xi32>} : () -> tensor<1x1x1xi32> - %1 = "tosa.add"(%arg0, %zeros) : (tensor<4x2x3xi32>, tensor<1x1x1xi32>) -> tensor<4x2x3xi32> + %1 = tosa.add %arg0, %zeros : (tensor<4x2x3xi32>, tensor<1x1x1xi32>) -> tensor<4x2x3xi32> return %1 : tensor<4x2x3xi32> } @@ -21,42 +21,42 @@ // CHECK: return %arg0 // CHECK-NOT: tosa.add %zeros = "tosa.const"() {value = dense<0> : tensor<2x3xi32>} : () -> tensor<2x3xi32> - %1 = "tosa.add"(%arg0, %zeros) : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + %1 = tosa.add %arg0, %zeros : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> return %1 : tensor<2x3xi32> } // CHECK-LABEL: @cast_fold func.func @cast_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.cast"(%arg0) : (tensor) -> tensor + %0 = tosa.cast %arg0 : (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @cast_nofold func.func @cast_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.cast" - %0 = "tosa.cast"(%arg0) : (tensor) -> tensor + // CHECK: tosa.cast + %0 = tosa.cast %arg0 : (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @clamp_i32_not_noop func.func @clamp_i32_not_noop(%arg0: tensor<4xi32>) -> tensor<4xi32> { - // CHECK: "tosa.clamp" - %0 = "tosa.clamp"(%arg0) {min_int = 1 : i64, max_int = 4 : i64, min_fp = 1.0 : f32, max_fp = 4.0 : f32} : (tensor<4xi32>) -> tensor<4xi32> + // CHECK: tosa.clamp + %0 = tosa.clamp %arg0 {min_int = 1 : i64, max_int = 4 : i64, min_fp = 1.0 : f32, max_fp = 4.0 : f32} : (tensor<4xi32>) -> tensor<4xi32> return %0 : tensor<4xi32> } // CHECK-LABEL: @clamp_f16_not_noop func.func @clamp_f16_not_noop(%arg0: tensor<4xf16>) -> tensor<4xf16> { - // CHECK: "tosa.clamp" - %0 = "tosa.clamp"(%arg0) {min_int = -128 : i64, max_int = 127 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xf16>) -> tensor<4xf16> + // CHECK: tosa.clamp + %0 = tosa.clamp %arg0 {min_int = -128 : i64, max_int = 127 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xf16>) -> tensor<4xf16> return %0 : tensor<4xf16> } // CHECK-LABEL: @clamp_f32_not_noop func.func @clamp_f32_not_noop(%arg0: tensor<4xf32>) -> tensor<4xf32> { - // CHECK: "tosa.clamp" - %0 = "tosa.clamp"(%arg0) {min_int = -128 : i64, max_int = 127 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xf32>) -> tensor<4xf32> + // CHECK: tosa.clamp + %0 = tosa.clamp %arg0 {min_int = -128 : i64, max_int = 127 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xf32>) -> tensor<4xf32> return %0 : tensor<4xf32> } @@ -65,7 +65,7 @@ // CHECK: return %arg0 // CHECK-NOT: "tosa.clamp" // 0xFF800000 and 0x7F800000 are respectively negative and positive F32 infinity. - %0 = "tosa.clamp"(%arg0) {min_int = -128 : i64, max_int = 127 : i64, min_fp = 0xFF800000 : f32, max_fp = 0x7F800000 : f32} : (tensor<4xf16>) -> tensor<4xf16> + %0 = tosa.clamp %arg0 {min_int = -128 : i64, max_int = 127 : i64, min_fp = 0xFF800000 : f32, max_fp = 0x7F800000 : f32} : (tensor<4xf16>) -> tensor<4xf16> return %0 : tensor<4xf16> } @@ -74,46 +74,46 @@ // CHECK: return %arg0 // CHECK-NOT: "tosa.clamp" // 0xFF800000 and 0x7F800000 are respectively negative and positive F32 infinity. - %0 = "tosa.clamp"(%arg0) {min_int = -128 : i64, max_int = 127 : i64, min_fp = 0xFF800000 : f32, max_fp = 0x7F800000 : f32} : (tensor<4xf32>) -> tensor<4xf32> + %0 = tosa.clamp %arg0 {min_int = -128 : i64, max_int = 127 : i64, min_fp = 0xFF800000 : f32, max_fp = 0x7F800000 : f32} : (tensor<4xf32>) -> tensor<4xf32> return %0 : tensor<4xf32> } // CHECK-LABEL: @clamp_int8_is_noop func.func @clamp_int8_is_noop(%arg0: tensor<4xi8>) -> tensor<4xi8> { // CHECK: return %arg0 - // CHECK-NOT: "tosa.clamp" - %0 = "tosa.clamp"(%arg0) {min_int = -128 : i64, max_int = 127 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xi8>) -> tensor<4xi8> + // CHECK-NOT: tosa.clamp + %0 = tosa.clamp %arg0 {min_int = -128 : i64, max_int = 127 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xi8>) -> tensor<4xi8> return %0 : tensor<4xi8> } // CHECK-LABEL: @clamp_int16_is_noop func.func @clamp_int16_is_noop(%arg0: tensor<4xi16>) -> tensor<4xi16> { // CHECK: return %arg0 - // CHECK-NOT: "tosa.clamp" - %0 = "tosa.clamp"(%arg0) {min_int = -32768 : i64, max_int = 32767 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xi16>) -> tensor<4xi16> + // CHECK-NOT: tosa.clamp + %0 = tosa.clamp %arg0 {min_int = -32768 : i64, max_int = 32767 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xi16>) -> tensor<4xi16> return %0 : tensor<4xi16> } // CHECK-LABEL: @clamp_uint8_is_noop func.func @clamp_uint8_is_noop(%arg0: tensor<4xui8>) -> tensor<4xui8> { // CHECK: return %arg0 - // CHECK-NOT: "tosa.clamp" - %0 = "tosa.clamp"(%arg0) {min_int = 0 : i64, max_int = 255 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xui8>) -> tensor<4xui8> + // CHECK-NOT: tosa.clamp + %0 = tosa.clamp %arg0 {min_int = 0 : i64, max_int = 255 : i64, min_fp = -3.40282347E+38 : f32, max_fp = 3.40282347E+38 : f32} : (tensor<4xui8>) -> tensor<4xui8> return %0 : tensor<4xui8> } // CHECK-LABEL: @clamp_twice_is_single_clamp func.func @clamp_twice_is_single_clamp(%arg0: tensor<4xi8>) -> tensor<4xi8> { - // CHECK: "tosa.clamp"(%arg0) <{max_fp = 3.000000e+00 : f32, max_int = 2 : i64, min_fp = -3.000000e+00 : f32, min_int = -2 : i64} - %0 = "tosa.clamp"(%arg0) {max_fp = 3.0 : f32, max_int = 4 : i64, min_fp = -5.0 : f32, min_int = -2 : i64} : (tensor<4xi8>) -> tensor<4xi8> - %1 = "tosa.clamp"(%0) {max_fp = 5.0 : f32, max_int = 2 : i64, min_fp = -3.0 : f32, min_int = -4 : i64} : (tensor<4xi8>) -> tensor<4xi8> + // CHECK: tosa.clamp %arg0 {max_fp = 3.000000e+00 : f32, max_int = 2 : i64, min_fp = -3.000000e+00 : f32, min_int = -2 : i64} + %0 = tosa.clamp %arg0 {max_fp = 3.0 : f32, max_int = 4 : i64, min_fp = -5.0 : f32, min_int = -2 : i64} : (tensor<4xi8>) -> tensor<4xi8> + %1 = tosa.clamp %0 {max_fp = 5.0 : f32, max_int = 2 : i64, min_fp = -3.0 : f32, min_int = -4 : i64} : (tensor<4xi8>) -> tensor<4xi8> return %1 : tensor<4xi8> } // CHECK-LABEL: @concat_fold func.func @concat_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.concat"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + %0 = tosa.concat %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } @@ -121,47 +121,47 @@ func.func @concat_fold_cast(%arg0: tensor) -> tensor { // CHECK: %[[VAR0:.*]] = tensor.cast %arg0 // CHECK: return %[[VAR0]] - %0 = "tosa.concat"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + %0 = tosa.concat %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @conv2d_stride_2 func.func @conv2d_stride_2(%arg0: tensor<4x10x10x2xf32>) -> tensor<4x10x10x3xf32> { - // CHECK: "tosa.conv2d" + // CHECK: tosa.conv2d %weight = "tosa.const"() {value = dense<[[[[1.0, 1.0]]], [[[1.0, 1.0]]], [[[1.0, 1.0]]]]> : tensor<3x1x1x2xf32>} : ()-> tensor<3x1x1x2xf32> %bias = "tosa.const"() {value = dense<0.0> : tensor<3xf32>} : ()-> tensor<3xf32> - %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> + %0 = tosa.conv2d %arg0, %weight, %bias {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> return %0 : tensor<4x10x10x3xf32> } // CHECK-LABEL: @conv2d_weight_2x2 func.func @conv2d_weight_2x2(%arg0: tensor<4x10x10x1xf32>) -> tensor<4x10x10x1xf32> { - // CHECK: "tosa.conv2d" + // CHECK: tosa.conv2d %weight = "tosa.const"() {value = dense<[[[[1.0], [1.0]], [[1.0], [1.0]]]]> : tensor<1x2x2x1xf32>} : ()-> tensor<1x2x2x1xf32> %bias = "tosa.const"() {value = dense<0.0> : tensor<1xf32>} : ()-> tensor<1xf32> - %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x10x10x1xf32> + %0 = tosa.conv2d %arg0, %weight, %bias {pad = array, stride = array, dilation = array} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x10x10x1xf32> return %0 : tensor<4x10x10x1xf32> } // CHECK-LABEL: @depthwise_conv2d_stride_2 func.func @depthwise_conv2d_stride_2(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { - // CHECK: "tosa.depthwise_conv2d" - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> + // CHECK: tosa.depthwise_conv2d + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> return %0 : tensor<4x10x10x6xf32> } // CHECK-LABEL: @depthwise_conv2d_weight_2x2 func.func @depthwise_conv2d_weight_2x2(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<2x2x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { - // CHECK: "tosa.depthwise_conv2d" - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<2x2x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> + // CHECK: tosa.depthwise_conv2d + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<2x2x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> return %0 : tensor<4x10x10x6xf32> } // CHECK-LABEL: @max_pool2d_is_noop func.func @max_pool2d_is_noop(%arg0: tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> { - // CHECK-NOT: "tosa.max_pool2d" + // CHECK-NOT: tosa.max_pool2d // CHECK: return %arg0 - %0 = "tosa.max_pool2d"(%arg0) {kernel = array, pad = array, stride = array, dilation = array} : (tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> + %0 = tosa.max_pool2d %arg0 {kernel = array, pad = array, stride = array, dilation = array} : (tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> return %0 : tensor<10x1x1x3xf32> } @@ -169,34 +169,34 @@ func.func @pad_noop(%arg0: tensor) -> tensor { // CHECK: return %arg0 %0 = "tosa.const"() { value = dense<0> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %0) : (tensor, tensor<2x2xi32>) -> tensor + %1 = tosa.pad %arg0, %0 : (tensor, tensor<2x2xi32>) -> tensor return %1 : tensor } // CHECK-LABEL: @pad_determine_val_i32 func.func @pad_determine_val_i32(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor} - // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]]) + // CHECK: tosa.pad %arg0, %arg1, %[[ZERO]] %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %arg1) : (tensor, tensor<2x2xi32>) -> tensor + %1 = tosa.pad %arg0, %arg1 : (tensor, tensor<2x2xi32>) -> tensor return %1 : tensor } // CHECK-LABEL: @pad_determine_val_f32 func.func @pad_determine_val_f32(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor} - // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]]) + // CHECK: tosa.pad %arg0, %arg1, %[[ZERO]] %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %arg1) : (tensor, tensor<2x2xi32>) -> tensor + %1 = tosa.pad %arg0, %arg1 : (tensor, tensor<2x2xi32>) -> tensor return %1 : tensor } // CHECK-LABEL: @pad_determine_val_quant func.func @pad_determine_val_quant(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<42> : tensor} - // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]]) + // CHECK: tosa.pad %arg0, %arg1, %[[ZERO]] %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %arg1) {quantization_info = #tosa.pad_quant} : (tensor, tensor<2x2xi32>) -> tensor + %1 = tosa.pad %arg0, %arg1 {quantization_info = #tosa.pad_quant} : (tensor, tensor<2x2xi32>) -> tensor return %1 : tensor } @@ -205,7 +205,7 @@ // CHECK: return %arg0 // CHECK-NOT: tosa.mul %ones = "tosa.const"() {value = dense<1.0> : tensor<2x3xf32>} : () -> tensor<2x3xf32> - %1 = "tosa.mul"(%arg0, %ones) {shift = 0 : i32} : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> + %1 = tosa.mul %arg0, %ones {shift = 0 : i32} : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> return %1 : tensor<2x3xf32> } @@ -214,7 +214,7 @@ // CHECK: return %arg0 // CHECK-NOT: tosa.mul %ones = "tosa.const"() {value = dense<1.0> : tensor<1x1xf32>} : () -> tensor<1x1xf32> - %1 = "tosa.mul"(%ones, %arg0) {shift = 0 : i32} : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> + %1 = tosa.mul %ones, %arg0 {shift = 0 : i32} : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> return %1 : tensor<2x3xf32> } @@ -223,26 +223,26 @@ // CHECK: return %arg0 // CHECK-NOT: tosa.mul %ones = "tosa.const"() {value = dense<1> : tensor<2x3xi32>} : () -> tensor<2x3xi32> - %1 = "tosa.mul"(%arg0, %ones) {shift = 0 : i32} : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + %1 = tosa.mul %arg0, %ones {shift = 0 : i32} : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> return %1 : tensor<2x3xi32> } // CHECK-LABEL: @mul_zero_broadcast func.func @mul_zero_broadcast(%arg0: tensor<2x3xf32>) -> (tensor<2x3xf32>, tensor<2x3xf32>) { - // CHECK: %[[ZERO:.*]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<2x3xf32>}> : () -> tensor<2x3xf32> + // CHECK: %[[ZERO:.*]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<2x3xf32>} // CHECK-NOT: tosa.mul %zeros = "tosa.const"() {value = dense<0.0> : tensor<1x1xf32>} : () -> tensor<1x1xf32> - %1 = "tosa.mul"(%arg0, %zeros) {shift = 0 : i32} : (tensor<2x3xf32>, tensor<1x1xf32>) -> tensor<2x3xf32> + %1 = tosa.mul %arg0, %zeros {shift = 0 : i32} : (tensor<2x3xf32>, tensor<1x1xf32>) -> tensor<2x3xf32> // CHECK-NOT: tosa.mul // CHECK: return %[[ZERO]], %[[ZERO]] - %2 = "tosa.mul"(%zeros, %arg0) {shift = 0 : i32} : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> + %2 = tosa.mul %zeros, %arg0 {shift = 0 : i32} : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> return %1, %2 : tensor<2x3xf32>, tensor<2x3xf32> } // CHECK-LABEL: @select_same_value func.func @select_same_value(%arg0: tensor<2x3xi1>, %arg1: tensor<2x3xi32>) -> tensor<2x3xi32> { - %0 = "tosa.select"(%arg0, %arg1, %arg1) : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + %0 = tosa.select %arg0, %arg1, %arg1 : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> // CHECK: return %arg1 // CHECK-NOT: tosa.select return %0 : tensor<2x3xi32> @@ -251,7 +251,7 @@ // CHECK-LABEL: @select_true_value func.func @select_true_value(%arg0: tensor<2x3xi32>, %arg1: tensor<2x3xi32>) -> tensor<2x3xi32> { %c1 = "tosa.const"() {value = dense<1> : tensor<2x3xi1>} : () -> tensor<2x3xi1> - %0 = "tosa.select"(%c1, %arg0, %arg1) : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + %0 = tosa.select %c1, %arg0, %arg1 : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> // CHECK: return %arg0 // CHECK-NOT: tosa.select return %0 : tensor<2x3xi32> @@ -260,7 +260,7 @@ // CHECK-LABEL: @select_false_value func.func @select_false_value(%arg0: tensor<2x3xi32>, %arg1: tensor<2x3xi32>) -> tensor<2x3xi32> { %c0 = "tosa.const"() {value = dense<0> : tensor<2x3xi1>} : () -> tensor<2x3xi1> - %0 = "tosa.select"(%c0, %arg0, %arg1) : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + %0 = tosa.select %c0, %arg0, %arg1 : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> // CHECK: return %arg1 // CHECK-NOT: tosa.select return %0 : tensor<2x3xi32> @@ -268,109 +268,109 @@ // CHECK-LABEL: @select_not_pred func.func @select_not_pred(%arg0: tensor<2x3xi1>, %arg1: tensor<2x3xi32>, %arg2: tensor<2x3xi32>) -> tensor<2x3xi32> { - %0 = "tosa.logical_not"(%arg0) : (tensor<2x3xi1>) -> tensor<2x3xi1> - %1 = "tosa.select"(%0, %arg1, %arg2) : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> - // CHECK: "tosa.select"(%arg0, %arg2, %arg1) + %0 = tosa.logical_not %arg0 : (tensor<2x3xi1>) -> tensor<2x3xi1> + %1 = tosa.select %0, %arg1, %arg2 : (tensor<2x3xi1>, tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32> + // CHECK: tosa.select %arg0, %arg2, %arg1 return %1 : tensor<2x3xi32> } // CHECK-LABEL: @reduce_all_fold func.func @reduce_all_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reduce_all"(%arg0) {axis = 1 : i64}: (tensor) -> tensor + %0 = tosa.reduce_all %arg0 {axis = 1 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_all_nofold func.func @reduce_all_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.reduce_all" - %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.reduce_all + %0 = tosa.reduce_all %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_any_fold func.func @reduce_any_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reduce_any"(%arg0) {axis = 1 : i64}: (tensor) -> tensor + %0 = tosa.reduce_any %arg0 {axis = 1 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_any_nofold func.func @reduce_any_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.reduce_any" - %0 = "tosa.reduce_any"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.reduce_any + %0 = tosa.reduce_any %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_max_fold func.func @reduce_max_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reduce_max"(%arg0) {axis = 1 : i64}: (tensor) -> tensor + %0 = tosa.reduce_max %arg0 {axis = 1 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_max_nofold func.func @reduce_max_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.reduce_max" - %0 = "tosa.reduce_max"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.reduce_max + %0 = tosa.reduce_max %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_min_fold func.func @reduce_min_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reduce_min"(%arg0) {axis = 1 : i64}: (tensor) -> tensor + %0 = tosa.reduce_min %arg0 {axis = 1 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_min_nofold func.func @reduce_min_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.reduce_min" - %0 = "tosa.reduce_min"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.reduce_min + %0 = tosa.reduce_min %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_prod_fold func.func @reduce_prod_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reduce_prod"(%arg0) {axis = 1 : i64}: (tensor) -> tensor + %0 = tosa.reduce_prod %arg0 {axis = 1 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_prod_nofold func.func @reduce_prod_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.reduce_prod" - %0 = "tosa.reduce_prod"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.reduce_prod + %0 = tosa.reduce_prod %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_sum_fold func.func @reduce_sum_fold(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64}: (tensor) -> tensor + %0 = tosa.reduce_sum %arg0 {axis = 1 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reduce_sum_nofold func.func @reduce_sum_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.reduce_sum" - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64}: (tensor) -> tensor + // CHECK: tosa.reduce_sum + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reshape_canonicalize func.func @reshape_canonicalize(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.reshape"(%arg0) {new_shape = array}: (tensor) -> tensor + %0 = tosa.reshape %arg0 {new_shape = array}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @reshape_canonicalize_double func.func @reshape_canonicalize_double(%arg0: tensor) -> tensor { - // CHECK: %[[VAR0:.+]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: return %[[VAR0]] - %0 = "tosa.reshape"(%arg0) {new_shape = array}: (tensor) -> tensor<5x?xf32> - %1 = "tosa.reshape"(%0) {new_shape = array}: (tensor<5x?xf32>) -> tensor + // CHECK: %[[VAL_1:.*]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: return %[[VAL_1]] + %0 = tosa.reshape %arg0 {new_shape = array}: (tensor) -> tensor<5x?xf32> + %1 = tosa.reshape %0 {new_shape = array}: (tensor<5x?xf32>) -> tensor return %1 : tensor } @@ -379,7 +379,7 @@ // CHECK: %[[VAR0:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 1, 2, 3, 4]]> : tensor<1x5xi32>} // CHECK: return %[[VAR0]] %0 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>} : () -> tensor<5xi32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<5xi32>) -> tensor<1x5xi32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<5xi32>) -> tensor<1x5xi32> return %1 : tensor<1x5xi32> } @@ -387,7 +387,7 @@ func.func @reshape_canonicalize_const_dynamic() -> tensor<1x?xi32> { // CHECK: tosa.reshape %0 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>} : () -> tensor<5xi32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<5xi32>) -> tensor<1x?xi32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<5xi32>) -> tensor<1x?xi32> return %1 : tensor<1x?xi32> } @@ -397,15 +397,15 @@ // CHECK-DAG: %[[VAR1:.+]] = "tosa.const"() <{value = dense<0> : tensor<1x10xi32>} // CHECK: return %[[VAR0]], %[[VAR1]] %0 = "tosa.const"() {value = dense<0> : tensor<10xi32>} : () -> tensor<10xi32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<10xi32>) -> tensor<1x10xi32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<10xi32>) -> tensor<1x10xi32> return %0 , %1 : tensor<10xi32>, tensor<1x10xi32> } // CHECK-LABEL: @reshape_canonicalize_const_sparse func.func @reshape_canonicalize_const_sparse() -> (tensor<3xi32>, tensor<1x3xi32>) { - // CHECK: "tosa.reshape" + // CHECK: tosa.reshape %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi32>} : ()-> tensor<3xi32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<3xi32>) -> tensor<1x3xi32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<3xi32>) -> tensor<1x3xi32> return %0 , %1 : tensor<3xi32>, tensor<1x3xi32> } @@ -413,7 +413,7 @@ func.func @reshape_canonicalize_quant() -> (tensor<1x3x!quant.uniform>) { // CHECK{LITERAL}: "tosa.const"() <{value = dense<[[1, 2, 3]]> : tensor<1x3xi8>}> : () -> tensor<1x3x!quant.uniform> %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi8>} : ()-> tensor<3x!quant.uniform> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<3x!quant.uniform>) -> tensor<1x3x!quant.uniform> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<3x!quant.uniform>) -> tensor<1x3x!quant.uniform> return %1 : tensor<1x3x!quant.uniform> } @@ -422,35 +422,35 @@ // CHECK: "tosa.const"() <{value = dense<0> : tensor<2x1x3xi8>}> : () -> tensor<2x1x3xi8> %perms = "tosa.const"() {value = dense<[1, 0, 2]> : tensor<3xi32>} : () -> tensor<3xi32> %0 = "tosa.const"() {value = dense<0> : tensor<1x2x3xi8>} : ()-> tensor<1x2x3x!quant.uniform> - %1 = "tosa.transpose"(%0, %perms) : (tensor<1x2x3x!quant.uniform>, tensor<3xi32>) -> tensor<2x1x3xi8> + %1 = tosa.transpose %0, %perms : (tensor<1x2x3x!quant.uniform>, tensor<3xi32>) -> tensor<2x1x3xi8> return %1 : tensor<2x1x3xi8> } // CHECK-LABEL: @slice_fold func.func @slice_fold(%arg0: tensor<3x4xf32>) -> tensor<3x4xf32> { // CHECK: return %arg0 - %0 = "tosa.slice"(%arg0) { size = array, start = array}: (tensor<3x4xf32>) -> tensor<3x4xf32> + %0 = tosa.slice %arg0 { size = array, start = array}: (tensor<3x4xf32>) -> tensor<3x4xf32> return %0 : tensor<3x4xf32> } // CHECK-LABEL: @slice_nofold func.func @slice_nofold(%arg0: tensor) -> tensor { - // CHECK: "tosa.slice" - %0 = "tosa.slice"(%arg0) { size = array, start = array}: (tensor) -> tensor + // CHECK: tosa.slice + %0 = tosa.slice %arg0 { size = array, start = array}: (tensor) -> tensor return %0 : tensor } // CHECK-LABEL: @tile_fold func.func @tile_fold(%arg0: tensor<3x4xf32>) -> tensor<3x4xf32> { // CHECK: return %arg0 - %0 = "tosa.tile"(%arg0) { multiples = array }: (tensor<3x4xf32>) -> tensor<3x4xf32> + %0 = tosa.tile %arg0 { multiples = array }: (tensor<3x4xf32>) -> tensor<3x4xf32> return %0 : tensor<3x4xf32> } // CHECK-LABEL: @tile_nofold func.func @tile_nofold(%arg0: tensor<3x4xf32>) -> tensor<3x8xf32> { - // CHECK: "tosa.tile" - %0 = "tosa.tile"(%arg0) { multiples = array }: (tensor<3x4xf32>) -> tensor<3x8xf32> + // CHECK: tosa.tile + %0 = tosa.tile %arg0 { multiples = array }: (tensor<3x4xf32>) -> tensor<3x8xf32> return %0 : tensor<3x8xf32> } @@ -459,15 +459,15 @@ // CHECK: return %arg0 // CHECK-NOT: tosa.transpose %perms = "tosa.const"() {value = dense<[0, 1, 2, 3]> : tensor<4xi32>} : () -> tensor<4xi32> - %1 = "tosa.transpose"(%arg0, %perms) : (tensor<3x4x5x6xf32>, tensor<4xi32>) -> tensor<3x4x5x6xf32> + %1 = tosa.transpose %arg0, %perms : (tensor<3x4x5x6xf32>, tensor<4xi32>) -> tensor<3x4x5x6xf32> return %1 : tensor<3x4x5x6xf32> } // CHECK-LABEL: @transpose_is_reshape func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> { - // CHECK: "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> + // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> %perms = "tosa.const"() <{value = dense<[3, 1, 0, 2]> : tensor<4xi32>}> : () -> tensor<4xi32> - %0 = "tosa.transpose"(%arg0, %perms) : (tensor<1x4x5x1xf32>, tensor<4xi32>) -> tensor<1x4x1x5xf32> + %0 = tosa.transpose %arg0, %perms : (tensor<1x4x5x1xf32>, tensor<4xi32>) -> tensor<1x4x1x5xf32> return %0 : tensor<1x4x1x5xf32> } @@ -476,7 +476,7 @@ func.func @single_bit_reshape() -> tensor<1xi1> { // CHECK: "tosa.const"() <{value = dense : tensor<1xi1>} %0 = arith.constant dense : tensor<1x1xi1> - %1 = "tosa.reshape"(%0) <{new_shape = array}> : (tensor<1x1xi1>) -> tensor<1xi1> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x1xi1>) -> tensor<1xi1> return %1 : tensor<1xi1> } @@ -485,7 +485,7 @@ // CHECK-LABEL: @fold_resize_nearest func.func @fold_resize_nearest(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> { // CHECK: return %arg0 - %resize = "tosa.resize"(%arg0) <{mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array}> : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> + %resize = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR" , scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> return %resize : tensor<1x15x13x1xi8> } @@ -494,7 +494,7 @@ // CHECK-LABEL: @fold_resize_bilinear func.func @fold_resize_bilinear(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> { // CHECK: return %arg0 - %resize = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> + %resize = tosa.resize %arg0 {mode = "BILINEAR" , scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> return %resize : tensor<1x15x13x1xi8> } @@ -504,9 +504,9 @@ // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x12x12x1xf32>, %[[VAL_1:.*]]: tensor<1x12x12x1xf32> // CHECK: return %[[VAL_0]], %[[VAL_1]] : tensor<1x12x12x1xf32>, tensor<1x12x12x1xf32> func.func @canonicalize_concat_slice_final_axis(%arg0 : tensor<1x12x12x1xf32>, %arg1 : tensor<1x12x12x1xf32>) -> (tensor<1x12x12x1xf32>, tensor<1x12x12x1xf32>) { - %0 = "tosa.concat"(%arg0, %arg1) {axis = 3 : i64} : (tensor<1x12x12x1xf32>, tensor<1x12x12x1xf32>) -> tensor<1x12x12x2xf32> - %1 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x12x12x2xf32>) -> tensor<1x12x12x1xf32> - %2 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x12x12x2xf32>) -> tensor<1x12x12x1xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 3 : i64} : (tensor<1x12x12x1xf32>, tensor<1x12x12x1xf32>) -> tensor<1x12x12x2xf32> + %1 = tosa.slice %0 {size = array, start = array} : (tensor<1x12x12x2xf32>) -> tensor<1x12x12x1xf32> + %2 = tosa.slice %0 {size = array, start = array} : (tensor<1x12x12x2xf32>) -> tensor<1x12x12x1xf32> return %1, %2 : tensor<1x12x12x1xf32>, tensor<1x12x12x1xf32> } @@ -516,9 +516,9 @@ // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x12x12xf32>, %[[VAL_1:.*]]: tensor<1x12x12xf32> // CHECK: return %[[VAL_0]], %[[VAL_1]] : tensor<1x12x12xf32>, tensor<1x12x12xf32> func.func @canonicalize_concat_slice_middle_axis(%arg0 : tensor<1x12x12xf32>, %arg1 : tensor<1x12x12xf32>) -> (tensor<1x12x12xf32>, tensor<1x12x12xf32>) { - %0 = "tosa.concat"(%arg0, %arg1) {axis = 1 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x24x12xf32> - %1 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x24x12xf32>) -> tensor<1x12x12xf32> - %2 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x24x12xf32>) -> tensor<1x12x12xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 1 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x24x12xf32> + %1 = tosa.slice %0 {size = array, start = array} : (tensor<1x24x12xf32>) -> tensor<1x12x12xf32> + %2 = tosa.slice %0 {size = array, start = array} : (tensor<1x24x12xf32>) -> tensor<1x12x12xf32> return %1, %2 : tensor<1x12x12xf32>, tensor<1x12x12xf32> } @@ -526,14 +526,14 @@ // CHECK-LABEL: @canonicalize_cross_concat_inputs // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x12x12xf32>, %[[VAL_1:.*]]: tensor<1x12x12xf32> -// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_1]]) <{axis = 2 : i64}> : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32> -// CHECK: %[[VAL_3:.*]] = "tosa.slice"(%[[VAL_2]]) <{size = array, start = array}> : (tensor<1x12x24xf32>) -> tensor<1x12x15xf32> -// CHECK: %[[VAL_4:.*]] = "tosa.slice"(%[[VAL_2]]) <{size = array, start = array}> : (tensor<1x12x24xf32>) -> tensor<1x12x20xf32> +// CHECK: %[[VAL_2:.*]] = tosa.concat %[[VAL_0]], %[[VAL_1]] {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32> +// CHECK: %[[VAL_3:.*]] = tosa.slice %[[VAL_2]] {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x12x15xf32> +// CHECK: %[[VAL_4:.*]] = tosa.slice %[[VAL_2]] {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x12x20xf32> // CHECK: return %[[VAL_3]], %[[VAL_4]] : tensor<1x12x15xf32>, tensor<1x12x20xf32> func.func @canonicalize_cross_concat_inputs(%arg0 : tensor<1x12x12xf32>, %arg1 : tensor<1x12x12xf32>) -> (tensor<1x12x15xf32>, tensor<1x12x20xf32>) { - %0 = "tosa.concat"(%arg0, %arg1) {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32> - %1 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x12x15xf32> - %2 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x12x20xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32> + %1 = tosa.slice %0 {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x12x15xf32> + %2 = tosa.slice %0 {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x12x20xf32> return %1, %2 : tensor<1x12x15xf32>, tensor<1x12x20xf32> } @@ -541,13 +541,13 @@ // CHECK-LABEL: @canonicalize_concat_slice_on_non_concat_axis // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x12x12xf32>, %[[VAL_1:.*]]: tensor<1x12x12xf32> -// CHECK: %[[VAL_2:.*]] = "tosa.slice"(%[[VAL_0]]) <{size = array, start = array}> : (tensor<1x12x12xf32>) -> tensor<1x6x12xf32> -// CHECK: %[[VAL_3:.*]] = "tosa.slice"(%[[VAL_1]]) <{size = array, start = array}> : (tensor<1x12x12xf32>) -> tensor<1x3x12xf32> +// CHECK: %[[VAL_2:.*]] = tosa.slice %[[VAL_0]] {size = array, start = array} : (tensor<1x12x12xf32>) -> tensor<1x6x12xf32> +// CHECK: %[[VAL_3:.*]] = tosa.slice %[[VAL_1]] {size = array, start = array} : (tensor<1x12x12xf32>) -> tensor<1x3x12xf32> // CHECK: return %[[VAL_2]], %[[VAL_3]] : tensor<1x6x12xf32>, tensor<1x3x12xf32> func.func @canonicalize_concat_slice_on_non_concat_axis(%arg0 : tensor<1x12x12xf32>, %arg1 : tensor<1x12x12xf32>) -> (tensor<1x6x12xf32>, tensor<1x3x12xf32>) { - %0 = "tosa.concat"(%arg0, %arg1) {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32> - %1 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x6x12xf32> - %2 = "tosa.slice"(%0) {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x3x12xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32> + %1 = tosa.slice %0 {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x6x12xf32> + %2 = tosa.slice %0 {size = array, start = array} : (tensor<1x12x24xf32>) -> tensor<1x3x12xf32> return %1, %2 : tensor<1x6x12xf32>, tensor<1x3x12xf32> } @@ -556,8 +556,8 @@ // CHECK-LABEL func.func @fold_log_exp(%arg0: tensor) -> tensor { // CHECK: return %arg{{.*}} : tensor - %0 = "tosa.exp"(%arg0) : (tensor) -> tensor - %1 = "tosa.log"(%0) : (tensor) -> tensor + %0 = tosa.exp %arg0 : (tensor) -> tensor + %1 = tosa.log %0 : (tensor) -> tensor return %1 : tensor } @@ -566,8 +566,8 @@ // CHECK-LABEL: @fold_exp_log func.func @fold_exp_log(%arg0: tensor) -> tensor { // CHECK: return %arg{{.*}} : tensor - %0 = "tosa.log"(%arg0) : (tensor) -> tensor - %1 = "tosa.exp"(%0) : (tensor) -> tensor + %0 = tosa.log %arg0 : (tensor) -> tensor + %1 = tosa.exp %0 : (tensor) -> tensor return %1 : tensor } @@ -576,8 +576,8 @@ // CHECK-LABEL: @fold_negate_negate func.func @fold_negate_negate(%arg0: tensor) -> tensor { // CHECK: return %arg{{.*}} : tensor - %0 = "tosa.negate"(%arg0) : (tensor) -> tensor - %1 = "tosa.negate"(%0) : (tensor) -> tensor + %0 = tosa.negate %arg0 : (tensor) -> tensor + %1 = tosa.negate %0 : (tensor) -> tensor return %1 : tensor } @@ -585,9 +585,9 @@ // CHECK-LABEL: @fold_abs_abs func.func @fold_abs_abs(%arg0: tensor) -> tensor { - // CHECK: %[[ABS:.*]] = "tosa.abs"(%arg{{.*}}) : (tensor) -> tensor + // CHECK: %[[ABS:.*]] = tosa.abs %arg{{.*}} : (tensor) -> tensor // CHECK: return %[[ABS]] : tensor - %0 = "tosa.abs"(%arg0) : (tensor) -> tensor - %1 = "tosa.abs"(%0) : (tensor) -> tensor + %0 = tosa.abs %arg0 : (tensor) -> tensor + %1 = tosa.abs %0 : (tensor) -> tensor return %1 : tensor } diff --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir --- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir +++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir @@ -4,23 +4,23 @@ func.func @transpose_fold(%arg0: tensor<3x4xf32>) -> tensor<3x4xf32> { // CHECK: return %arg0 %0 = arith.constant dense<[0, 1]> : tensor<2xi32> - %1 = "tosa.transpose"(%arg0, %0) { perms = [1, 0] }: (tensor<3x4xf32>, tensor<2xi32>) -> tensor<3x4xf32> + %1 = tosa.transpose %arg0, %0 { perms = [1, 0] }: (tensor<3x4xf32>, tensor<2xi32>) -> tensor<3x4xf32> return %1 : tensor<3x4xf32> } // CHECK-LABEL: @transpose_nofold func.func @transpose_nofold(%arg0: tensor<3x3xf32>) -> tensor<3x3xf32> { - // CHECK: "tosa.transpose" + // CHECK: tosa.transpose %0 = arith.constant dense<[1, 0]> : tensor<2xi32> - %1 = "tosa.transpose"(%arg0, %0) { perms = [1, 0] }: (tensor<3x3xf32>, tensor<2xi32>) -> tensor<3x3xf32> + %1 = tosa.transpose %arg0, %0 { perms = [1, 0] }: (tensor<3x3xf32>, tensor<2xi32>) -> tensor<3x3xf32> return %1 : tensor<3x3xf32> } // CHECK-LABEL: @transpose_nofold_shape func.func @transpose_nofold_shape(%arg0: tensor<3x4xf32>) -> tensor { - // CHECK: "tosa.transpose" + // CHECK: tosa.transpose %0 = arith.constant dense<[1, 0]> : tensor<2xi32> - %1 = "tosa.transpose"(%arg0, %0) { perms = [1, 0] }: (tensor<3x4xf32>, tensor<2xi32>) -> tensor + %1 = tosa.transpose %arg0, %0 { perms = [1, 0] }: (tensor<3x4xf32>, tensor<2xi32>) -> tensor return %1 : tensor } @@ -28,9 +28,9 @@ func.func @transpose_fold_splat() -> tensor<3x2xf32> { %input = "tosa.const"() {value = dense<4.0> : tensor<2x3xf32>} : () -> tensor<2x3xf32> %perms = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32> - // CHECK: %[[CST:.+]] = "tosa.const"() + // CHECK: %[[CST:.+]] = "tosa.const"() <{ // CHECK-SAME{LITERAL}: value = dense<4.000000e+00> : tensor<3x2xf32> - %1 = "tosa.transpose"(%input, %perms) : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> + %1 = tosa.transpose %input, %perms : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> // CHECK: return %[[CST]] return %1 : tensor<3x2xf32> } @@ -39,9 +39,9 @@ func.func @transpose_fold_2d_float() -> tensor<3x2xf32> { %input = "tosa.const"() {value = dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32> %perms = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32> - // CHECK: %[[CST:.+]] = "tosa.const"() + // CHECK: %[[CST:.+]] = "tosa.const"() <{ // CHECK-SAME{LITERAL}: value = dense<[[0.000000e+00, 3.000000e+00], [1.000000e+00, 4.000000e+00], [2.000000e+00, 5.000000e+00]]> : tensor<3x2xf32> - %1 = "tosa.transpose"(%input, %perms) : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> + %1 = tosa.transpose %input, %perms : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> // CHECK: return %[[CST]] return %1 : tensor<3x2xf32> } @@ -50,9 +50,9 @@ func.func @transpose_fold_2d_bool() -> tensor<3x2xi1> { %input = "tosa.const"() {value = dense<[[true, false, false], [false, false, true]]> : tensor<2x3xi1>} : () -> tensor<2x3xi1> %perms = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32> - // CHECK: %[[CST:.+]] = "tosa.const"() + // CHECK: %[[CST:.+]] = "tosa.const"() <{ // CHECK-SAME{LITERAL}: value = dense<[[true, false], [false, false], [false, true]]> : tensor<3x2xi1> - %1 = "tosa.transpose"(%input, %perms) : (tensor<2x3xi1>, tensor<2xi32>) -> tensor<3x2xi1> + %1 = tosa.transpose %input, %perms : (tensor<2x3xi1>, tensor<2xi32>) -> tensor<3x2xi1> // CHECK: return %[[CST]] return %1 : tensor<3x2xi1> } @@ -64,13 +64,13 @@ [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]] ]]> : tensor<1x2x3x4xi32>} : () -> tensor<1x2x3x4xi32> %perms = "tosa.const"() {value = dense<[2, 0, 3, 1]> : tensor<4xi64>} : () -> tensor<4xi64> - // CHECK: %[[CST:.+]] = "tosa.const"() + // CHECK: %[[CST:.+]] = "tosa.const"() <{ // CHECK-SAME{LITERAL}: value = dense<[ // CHECK-SAME{LITERAL}: [[[0, 12], [1, 13], [2, 14], [3, 15]]], // CHECK-SAME{LITERAL}: [[[4, 16], [5, 17], [6, 18], [7, 19]]], // CHECK-SAME{LITERAL}: [[[8, 20], [9, 21], [10, 22], [11, 23]]] // CHECK-SAME{LITERAL}: ]> - %1 = "tosa.transpose"(%input, %perms) : (tensor<1x2x3x4xi32>, tensor<4xi64>) -> tensor<3x1x4x2xi32> + %1 = tosa.transpose %input, %perms : (tensor<1x2x3x4xi32>, tensor<4xi64>) -> tensor<3x1x4x2xi32> // CHECK: return %[[CST]] return %1 : tensor<3x1x4x2xi32> } @@ -79,7 +79,7 @@ func.func @transpose_nofold_non_cst_input(%input: tensor<2x3xf32>) -> tensor<3x2xf32> { %perms = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32> // CHECK: tosa.transpose - %1 = "tosa.transpose"(%input, %perms) : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> + %1 = tosa.transpose %input, %perms : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> return %1 : tensor<3x2xf32> } @@ -87,7 +87,7 @@ func.func @transpose_nofold_non_cst_perms(%perms: tensor<2xi32>) -> tensor<3x2xf32> { %input = "tosa.const"() {value = dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32> // CHECK: tosa.transpose - %1 = "tosa.transpose"(%input, %perms) : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> + %1 = tosa.transpose %input, %perms : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> return %1 : tensor<3x2xf32> } @@ -96,7 +96,7 @@ %input = "tosa.const"() {value = dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32> %perms = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32> // CHECK: tosa.transpose - %1 = "tosa.transpose"(%input, %perms) : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> + %1 = tosa.transpose %input, %perms : (tensor<2x3xf32>, tensor<2xi32>) -> tensor<3x2xf32> return %1, %input : tensor<3x2xf32>, tensor<2x3xf32> } @@ -105,7 +105,7 @@ %perms = "tosa.const"() {value = dense<[1, 2, 3, 0]> : tensor<4xi32>} : () -> tensor<4xi32> %input = "tosa.const"() {value = dense<-127> : tensor<2x1x1x2xi8>} : () -> tensor<2x1x1x2xi8> // CHECK: tosa.transpose - %0 = "tosa.transpose"(%input, %perms) : (tensor<2x1x1x2xi8>, tensor<4xi32>) -> tensor<1x1x2x2x!quant.uniform:f32:3, {1.000000e-01,1.000000e-01}>> + %0 = tosa.transpose %input, %perms : (tensor<2x1x1x2xi8>, tensor<4xi32>) -> tensor<1x1x2x2x!quant.uniform:f32:3, {1.000000e-01,1.000000e-01}>> return %0: tensor<1x1x2x2x!quant.uniform:f32:3, {1.000000e-01,1.000000e-01}>> } @@ -114,7 +114,7 @@ // CHECK-LABEL: @fold_add_zero_rhs_f32 func.func @fold_add_zero_rhs_f32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0.0> : tensor} : () -> tensor - %add = "tosa.add"(%arg0, %zero) : (tensor, tensor) -> tensor + %add = tosa.add %arg0, %zero : (tensor, tensor) -> tensor // CHECK: return %arg0 return %add : tensor } @@ -124,7 +124,7 @@ // CHECK-LABEL: @fold_add_zero_lhs_f32 func.func @fold_add_zero_lhs_f32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0.0> : tensor} : () -> tensor - %add = "tosa.add"(%zero, %arg0) : (tensor, tensor) -> tensor + %add = tosa.add %zero, %arg0 : (tensor, tensor) -> tensor // CHECK: return %arg0 return %add : tensor } @@ -134,7 +134,7 @@ // CHECK-LABEL: @fold_add_zero_rhs_i32 func.func @fold_add_zero_rhs_i32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0> : tensor} : () -> tensor - %add = "tosa.add"(%arg0, %zero) : (tensor, tensor) -> tensor + %add = tosa.add %arg0, %zero : (tensor, tensor) -> tensor // CHECK: return %arg0 return %add : tensor } @@ -144,7 +144,7 @@ // CHECK-LABEL: @fold_add_zero_lhs_i32 func.func @fold_add_zero_lhs_i32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0> : tensor} : () -> tensor - %add = "tosa.add"(%zero, %arg0) : (tensor, tensor) -> tensor + %add = tosa.add %zero, %arg0 : (tensor, tensor) -> tensor // CHECK: return %arg0 return %add : tensor } @@ -155,7 +155,7 @@ func.func @fold_add_splat_i32() -> tensor<10xi32> { %one = "tosa.const"() {value = dense<1> : tensor<10xi32>} : () -> tensor<10xi32> %two = "tosa.const"() {value = dense<2> : tensor<10xi32>} : () -> tensor<10xi32> - %add = "tosa.add"(%one, %two) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> + %add = tosa.add %one, %two : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<3> : tensor<10xi32>} // CHECK: return %[[THREE]] return %add : tensor<10xi32> @@ -167,7 +167,7 @@ func.func @fold_add_splat_f32() -> tensor<10xf32> { %one = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32> %two = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> - %add = "tosa.add"(%one, %two) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> + %add = tosa.add %one, %two : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<3.000000e+00> // CHECK: return %[[THREE]] return %add : tensor<10xf32> @@ -179,7 +179,7 @@ func.func @fold_div_zero_lhs_i32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0> : tensor} : () -> tensor // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> - %div = "tosa.div"(%zero, %arg0) : (tensor, tensor) -> tensor + %div = tosa.div %zero, %arg0 : (tensor, tensor) -> tensor // CHECK: return %[[ZERO]] return %div : tensor } @@ -189,7 +189,7 @@ // CHECK-LABEL: @fold_div_one_rhs_i32 func.func @fold_div_one_rhs_i32(%arg0: tensor) -> tensor { %one = "tosa.const"() {value = dense<1> : tensor} : () -> tensor - %div = "tosa.div"(%arg0, %one) : (tensor, tensor) -> tensor + %div = tosa.div %arg0, %one : (tensor, tensor) -> tensor // CHECK: return %arg0 return %div : tensor } @@ -201,7 +201,7 @@ %lhs = "tosa.const"() {value = dense<10> : tensor} : () -> tensor %rhs = "tosa.const"() {value = dense<-3> : tensor} : () -> tensor // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-3> - %div = "tosa.div"(%lhs, %rhs) : (tensor, tensor) -> tensor + %div = tosa.div %lhs, %rhs : (tensor, tensor) -> tensor // CHECK: return %[[SPLAT]] return %div : tensor } @@ -213,7 +213,7 @@ func.func @fold_mul_zero_rhs_f32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0.0> : tensor} : () -> tensor // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> - %mul = "tosa.mul"(%arg0, %zero) {shift = 0 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %arg0, %zero {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: return %[[ZERO]] return %mul : tensor } @@ -224,7 +224,7 @@ func.func @fold_mul_zero_lhs_f32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0.0> : tensor} : () -> tensor // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> - %mul = "tosa.mul"(%zero, %arg0) {shift = 0 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %zero, %arg0 {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: return %[[ZERO]] return %mul : tensor } @@ -235,7 +235,7 @@ func.func @fold_mul_zero_rhs_i32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0> : tensor} : () -> tensor // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> - %mul = "tosa.mul"(%arg0, %zero) {shift = 0 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %arg0, %zero {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: return %[[ZERO]] return %mul : tensor } @@ -246,7 +246,7 @@ func.func @fold_mul_zero_lhs_i32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0> : tensor} : () -> tensor // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> - %mul = "tosa.mul"(%zero, %arg0) {shift = 0 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %zero, %arg0 {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: return %[[ZERO]] return %mul : tensor } @@ -256,7 +256,7 @@ // CHECK-LABEL: @fold_mul_one_rhs_f32 func.func @fold_mul_one_rhs_f32(%arg0: tensor) -> tensor { %one = "tosa.const"() {value = dense<1.0> : tensor} : () -> tensor - %mul = "tosa.mul"(%arg0, %one) {shift = 0 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %arg0, %one {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: return %arg0 return %mul : tensor } @@ -266,7 +266,7 @@ // CHECK-LABEL: @fold_mul_one_lhs_f32 func.func @fold_mul_one_lhs_f32(%arg0: tensor) -> tensor { %one = "tosa.const"() {value = dense<1.0> : tensor} : () -> tensor - %mul = "tosa.mul"(%one, %arg0) {shift = 0 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %one, %arg0 {shift = 0 : i32} : (tensor, tensor) -> tensor // CHECK: return %arg0 return %mul : tensor } @@ -276,7 +276,7 @@ // CHECK-LABEL: @fold_mul_one_rhs_i32 func.func @fold_mul_one_rhs_i32(%arg0: tensor) -> tensor { %one = "tosa.const"() {value = dense<64> : tensor} : () -> tensor - %mul = "tosa.mul"(%arg0, %one) {shift = 6 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %arg0, %one {shift = 6 : i32} : (tensor, tensor) -> tensor // CHECK: return %arg0 return %mul : tensor } @@ -286,7 +286,7 @@ // CHECK-LABEL: @fold_mul_one_lhs_i32 func.func @fold_mul_one_lhs_i32(%arg0: tensor) -> tensor { %one = "tosa.const"() {value = dense<64> : tensor} : () -> tensor - %mul = "tosa.mul"(%one, %arg0) {shift = 6 : i32} : (tensor, tensor) -> tensor + %mul = tosa.mul %one, %arg0 {shift = 6 : i32} : (tensor, tensor) -> tensor // CHECK: return %arg0 return %mul : tensor } @@ -297,7 +297,7 @@ func.func @fold_mul_splat_i8() -> tensor<10xi32> { %one = "tosa.const"() {value = dense<17> : tensor<10xi8>} : () -> tensor<10xi8> %two = "tosa.const"() {value = dense<32> : tensor<10xi8>} : () -> tensor<10xi8> - %mul = "tosa.mul"(%one, %two) {shift = 3 : i32} : (tensor<10xi8>, tensor<10xi8>) -> tensor<10xi32> + %mul = tosa.mul %one, %two {shift = 3 : i32} : (tensor<10xi8>, tensor<10xi8>) -> tensor<10xi32> // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<68> : tensor<10xi32>} // CHECK: return %[[THREE]] return %mul : tensor<10xi32> @@ -309,7 +309,7 @@ func.func @fold_mul_splat_f32() -> tensor<10xf32> { %one = "tosa.const"() {value = dense<3.0> : tensor<10xf32>} : () -> tensor<10xf32> %two = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> - %mul = "tosa.mul"(%one, %two) {shift = 0 : i32} : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> + %mul = tosa.mul %one, %two {shift = 0 : i32} : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<6.000000e+00> : tensor<10xf32>} // CHECK: return %[[THREE]] return %mul : tensor<10xf32> @@ -320,7 +320,7 @@ // CHECK-LABEL: @fold_sub_zero_rhs_f32 func.func @fold_sub_zero_rhs_f32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0.0> : tensor} : () -> tensor - %sub = "tosa.sub"(%arg0, %zero) : (tensor, tensor) -> tensor + %sub = tosa.sub %arg0, %zero : (tensor, tensor) -> tensor // CHECK: return %arg0 return %sub : tensor } @@ -330,7 +330,7 @@ // CHECK-LABEL: @fold_sub_zero_rhs_i32 func.func @fold_sub_zero_rhs_i32(%arg0: tensor) -> tensor { %zero = "tosa.const"() {value = dense<0> : tensor} : () -> tensor - %sub = "tosa.sub"(%arg0, %zero) : (tensor, tensor) -> tensor + %sub = tosa.sub %arg0, %zero : (tensor, tensor) -> tensor // CHECK: return %arg0 return %sub : tensor } @@ -341,7 +341,7 @@ func.func @fold_sub_splat_i32() -> tensor<10xi32> { %one = "tosa.const"() {value = dense<1> : tensor<10xi32>} : () -> tensor<10xi32> %two = "tosa.const"() {value = dense<2> : tensor<10xi32>} : () -> tensor<10xi32> - %sub = "tosa.sub"(%one, %two) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> + %sub = tosa.sub %one, %two : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<-1> : tensor<10xi32>} // CHECK: return %[[THREE]] return %sub : tensor<10xi32> @@ -353,7 +353,7 @@ func.func @fold_sub_splat_f32() -> tensor<10xf32> { %one = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32> %two = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> - %sub = "tosa.sub"(%one, %two) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> + %sub = tosa.sub %one, %two : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<-1.000000e+00> : tensor<10xf32>} // CHECK: return %[[THREE]] return %sub : tensor<10xf32> @@ -367,8 +367,8 @@ %1 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> %2 = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32> %3 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> - %true = "tosa.greater"(%0, %1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> - %false = "tosa.greater"(%2, %3) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> + %true = tosa.greater %0, %1 : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> + %false = tosa.greater %2, %3 : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK: return %[[TRUE]], %[[FALSE]] @@ -383,8 +383,8 @@ %1 = "tosa.const"() {value = dense<8> : tensor<10xi32>} : () -> tensor<10xi32> %2 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32> %3 = "tosa.const"() {value = dense<-12> : tensor<10xi32>} : () -> tensor<10xi32> - %false = "tosa.greater"(%0, %1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> - %true = "tosa.greater"(%2, %3) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %false = tosa.greater %0, %1 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %true = tosa.greater %2, %3 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK: return %[[FALSE]], %[[TRUE]] @@ -399,8 +399,8 @@ %1 = "tosa.const"() {value = dense<4.0> : tensor<10xf32>} : () -> tensor<10xf32> %2 = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32> %3 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> - %true = "tosa.greater_equal"(%0, %1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> - %false = "tosa.greater_equal"(%2, %3) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> + %true = tosa.greater_equal %0, %1 : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> + %false = tosa.greater_equal %2, %3 : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK: return %[[TRUE]], %[[FALSE]] @@ -415,8 +415,8 @@ %1 = "tosa.const"() {value = dense<8> : tensor<10xi32>} : () -> tensor<10xi32> %2 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32> %3 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32> - %true = "tosa.greater_equal"(%2, %3) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> - %false = "tosa.greater_equal"(%0, %1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %true = tosa.greater_equal %2, %3 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %false = tosa.greater_equal %0, %1 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK: return %[[TRUE]], %[[FALSE]] @@ -431,8 +431,8 @@ %1 = "tosa.const"() {value = dense<4.0> : tensor<10xf32>} : () -> tensor<10xf32> %2 = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32> %3 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32> - %true = "tosa.equal"(%0, %1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> - %false = "tosa.equal"(%2, %3) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> + %true = tosa.equal %0, %1 : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> + %false = tosa.equal %2, %3 : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1> // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK: return %[[TRUE]], %[[FALSE]] @@ -447,8 +447,8 @@ %1 = "tosa.const"() {value = dense<8> : tensor<10xi32>} : () -> tensor<10xi32> %2 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32> %3 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32> - %true = "tosa.equal"(%2, %3) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> - %false = "tosa.equal"(%0, %1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %true = tosa.equal %2, %3 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %false = tosa.equal %0, %1 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} // CHECK: return %[[TRUE]], %[[FALSE]] @@ -460,7 +460,7 @@ // CHECK-LABEL: @fold_eq_i32 func.func @fold_eq_i32(%arg0 : tensor<10xi32>) -> (tensor<10xi1>) { // CHECK: %[[TRUE:.+]] = "tosa.const"() <{value = dense : tensor<10xi1>} - %0 = "tosa.equal"(%arg0, %arg0) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> + %0 = tosa.equal %arg0, %arg0 : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1> // CHECK: return %[[TRUE]] return %0 : tensor<10xi1> } @@ -470,7 +470,7 @@ func.func @reshape_splat() -> tensor<6x5x4xi32> { // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<42> : tensor<6x5x4xi32>} %splat = "tosa.const"() {value = dense<42> : tensor<4x5x6xi32>} : () -> tensor<4x5x6xi32> - %reshape = "tosa.reshape"(%splat) { new_shape = array } : (tensor<4x5x6xi32>) -> tensor<6x5x4xi32> + %reshape = tosa.reshape %splat { new_shape = array } : (tensor<4x5x6xi32>) -> tensor<6x5x4xi32> // CHECK: return %[[SPLAT]] return %reshape : tensor<6x5x4xi32> } @@ -481,7 +481,7 @@ func.func @slice_splat() -> tensor<1x1x1xi32> { // CHECK: %[[SLICE:.+]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>} %splat = "tosa.const"() {value = dense<42> : tensor<4x5x6xi32>} : () -> tensor<4x5x6xi32> - %slice = "tosa.slice"(%splat) { size = array, start = array } : (tensor<4x5x6xi32>) -> tensor<1x1x1xi32> + %slice = tosa.slice %splat { size = array, start = array } : (tensor<4x5x6xi32>) -> tensor<1x1x1xi32> // CHECK: return %[[SLICE]] return %slice : tensor<1x1x1xi32> } @@ -492,7 +492,7 @@ func.func @slice_singleton() -> tensor<1x1xi32> { %splat = "tosa.const"() {value = dense<[[0, 1, 2], [3, 4, 5], [6, 7 ,8]]> : tensor<3x3xi32>} : () -> tensor<3x3xi32> // CHECK: %[[SLICE:.+]] = "tosa.const"() <{value = dense<4> : tensor<1x1xi32>} - %slice = "tosa.slice"(%splat) { size = array, start = array } : (tensor<3x3xi32>) -> tensor<1x1xi32> + %slice = tosa.slice %splat { size = array, start = array } : (tensor<3x3xi32>) -> tensor<1x1xi32> // CHECK: return %[[SLICE]] return %slice : tensor<1x1xi32> } @@ -503,7 +503,7 @@ func.func @cast_float_to_float() -> tensor { %splat = "tosa.const"() {value = dense<42.0> : tensor} : () -> tensor // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<4.200000e+01> : tensor} - %cast = "tosa.cast"(%splat) : (tensor) -> tensor + %cast = tosa.cast %splat : (tensor) -> tensor // CHECK: return %[[SPLAT]] return %cast : tensor } @@ -514,7 +514,7 @@ func.func @cast_int_to_float() -> tensor { %splat = "tosa.const"() {value = dense<4> : tensor} : () -> tensor // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<4.000000e+00> : tensor} - %cast = "tosa.cast"(%splat) : (tensor) -> tensor + %cast = tosa.cast %splat : (tensor) -> tensor // CHECK: return %[[SPLAT]] return %cast : tensor } @@ -525,7 +525,7 @@ func.func @cast_float_to_int() -> tensor { %splat = "tosa.const"() {value = dense<-4.0> : tensor} : () -> tensor // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-4> : tensor} - %cast = "tosa.cast"(%splat) : (tensor) -> tensor + %cast = tosa.cast %splat : (tensor) -> tensor // CHECK: return %[[SPLAT]] return %cast : tensor } @@ -536,7 +536,7 @@ func.func @cast_int_to_int_trunc() -> tensor { %splat = "tosa.const"() {value = dense<-1> : tensor} : () -> tensor // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-1> : tensor} - %cast = "tosa.cast"(%splat) : (tensor) -> tensor + %cast = tosa.cast %splat : (tensor) -> tensor // CHECK: return %[[SPLAT]] return %cast : tensor } @@ -547,7 +547,7 @@ func.func @cast_int_to_int_sign() -> tensor { %splat = "tosa.const"() {value = dense<-1> : tensor} : () -> tensor // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-1> : tensor} - %cast = "tosa.cast"(%splat) : (tensor) -> tensor + %cast = tosa.cast %splat : (tensor) -> tensor // CHECK: return %[[SPLAT]] return %cast : tensor } @@ -558,7 +558,7 @@ func.func @reverse_splat() -> tensor<10xi32> { // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<42> : tensor<10xi32>} %splat = "tosa.const"() {value = dense<42> : tensor<10xi32>} : () -> tensor<10xi32> - %reverse = "tosa.reverse"(%splat) { axis = 0 : i64 } : (tensor<10xi32>) -> tensor<10xi32> + %reverse = tosa.reverse %splat { axis = 0 : i64 } : (tensor<10xi32>) -> tensor<10xi32> // CHECK: return %[[SPLAT]] return %reverse : tensor<10xi32> } @@ -567,9 +567,9 @@ // CHECK-LABEL: @reverse_length_one func.func @reverse_length_one(%arg0 : tensor<10x1xi32>) -> (tensor<10x1xi32>, tensor<10x1xi32>) { - %nofold = "tosa.reverse"(%arg0) { axis = 0 : i64 } : (tensor<10x1xi32>) -> tensor<10x1xi32> - %fold = "tosa.reverse"(%arg0) { axis = 1 : i64 } : (tensor<10x1xi32>) -> tensor<10x1xi32> - // CHECK: %[[NOFOLD:.+]] = "tosa.reverse"(%arg0) <{axis = 0 : i64} + %nofold = tosa.reverse %arg0 { axis = 0 : i64 } : (tensor<10x1xi32>) -> tensor<10x1xi32> + %fold = tosa.reverse %arg0 { axis = 1 : i64 } : (tensor<10x1xi32>) -> tensor<10x1xi32> + // CHECK: %[[NOFOLD:.+]] = tosa.reverse %arg0 {axis = 0 : i64} // CHECK: return %[[NOFOLD]], %arg0 return %nofold, %fold : tensor<10x1xi32>, tensor<10x1xi32> } diff --git a/mlir/test/Dialect/Tosa/constant_folding.mlir b/mlir/test/Dialect/Tosa/constant_folding.mlir --- a/mlir/test/Dialect/Tosa/constant_folding.mlir +++ b/mlir/test/Dialect/Tosa/constant_folding.mlir @@ -2,22 +2,22 @@ // CHECK-LABEL: func @test_const func.func @test_const(%arg0 : index) -> tensor<4xi32> { - // CHECK: "tosa.const" + // CHECK: tosa.const %0 = "tosa.const"() {value = dense<[3, 0, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32> return %0 : tensor<4xi32> } // CHECK-LABEL: func @test_const_i64 func.func @test_const_i64(%arg0 : index) -> tensor<4xi64> { - // CHECK: "tosa.const" + // CHECK: tosa.const %0 = "tosa.const"() {value = dense<[3, 0, 1, 2]> : tensor<4xi64>} : () -> tensor<4xi64> return %0 : tensor<4xi64> } // CHECK-LABEL: func @try_fold_equal_with_unranked_tensor func.func @try_fold_equal_with_unranked_tensor(%arg0: tensor<4xi32>, %arg1: tensor) { - // CHECK: "tosa.equal" + // CHECK: tosa.equal // CHECK-NEXT: return - %0 = "tosa.equal"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi1> + %0 = tosa.equal %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi1> return } diff --git a/mlir/test/Dialect/Tosa/fold_concats.mlir b/mlir/test/Dialect/Tosa/fold_concats.mlir --- a/mlir/test/Dialect/Tosa/fold_concats.mlir +++ b/mlir/test/Dialect/Tosa/fold_concats.mlir @@ -1,28 +1,28 @@ // RUN: mlir-opt --split-input-file --canonicalize %s | FileCheck %s func.func @single_concat(%arg0: tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> { - %0 = "tosa.concat"(%arg0, %arg0) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> + %0 = tosa.concat %arg0, %arg0 {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> return %0 : tensor<1x2x7x7xf32> } // CHECK-LABEL: func.func @single_concat( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> { -// CHECK: %[[VAL_1:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_0]]) <{axis = 1 : i64}> : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> +// CHECK: %[[VAL_1:.*]] = tosa.concat %[[VAL_0]], %[[VAL_0]] {axis = 1 : i64} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> // CHECK: return %[[VAL_1]] : tensor<1x2x7x7xf32> // CHECK: } // ----- func.func @concat_different_axis(%arg0: tensor<1x1x7x7xf32>) -> tensor<2x2x7x7xf32> { - %0 = "tosa.concat"(%arg0, %arg0) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> - %1 = "tosa.concat"(%0, %0) {axis = 0} : (tensor<1x2x7x7xf32>, tensor<1x2x7x7xf32>) -> tensor<2x2x7x7xf32> + %0 = tosa.concat %arg0, %arg0 {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> + %1 = tosa.concat %0, %0 {axis = 0} : (tensor<1x2x7x7xf32>, tensor<1x2x7x7xf32>) -> tensor<2x2x7x7xf32> return %1 : tensor<2x2x7x7xf32> } // CHECK-LABEL: func.func @concat_different_axis( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x7x7xf32>) -> tensor<2x2x7x7xf32> { -// CHECK: %[[VAL_1:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_0]]) <{axis = 1 : i64}> : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> -// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_1]], %[[VAL_1]]) <{axis = 0 : i64}> : (tensor<1x2x7x7xf32>, tensor<1x2x7x7xf32>) -> tensor<2x2x7x7xf32> +// CHECK: %[[VAL_1:.*]] = tosa.concat %[[VAL_0]], %[[VAL_0]] {axis = 1 : i64} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> +// CHECK: %[[VAL_2:.*]] = tosa.concat %[[VAL_1]], %[[VAL_1]] {axis = 0 : i64} : (tensor<1x2x7x7xf32>, tensor<1x2x7x7xf32>) -> tensor<2x2x7x7xf32> // CHECK: return %[[VAL_2]] : tensor<2x2x7x7xf32> // CHECK: } @@ -30,15 +30,15 @@ func.func @fold_concats(%arg0: tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> { %tmp = tensor.empty() : tensor<1x1x7x7xf32> - %0 = "tosa.concat"(%arg0, %arg0) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> - %1 = "tosa.concat"(%tmp, %0, %tmp) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x2x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> + %0 = tosa.concat %arg0, %arg0 {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> + %1 = tosa.concat %tmp, %0, %tmp {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x2x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> return %1 : tensor<1x4x7x7xf32> } // CHECK-LABEL: func.func @fold_concats( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> { // CHECK: %[[VAL_1:.*]] = tensor.empty() : tensor<1x1x7x7xf32> -// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_1]], %[[VAL_0]], %[[VAL_0]], %[[VAL_1]]) <{axis = 1 : i64}> : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> +// CHECK: %[[VAL_2:.*]] = tosa.concat %[[VAL_1]], %[[VAL_0]], %[[VAL_0]], %[[VAL_1]] {axis = 1 : i64} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> // CHECK: return %[[VAL_2]] : tensor<1x4x7x7xf32> // CHECK: } @@ -46,48 +46,48 @@ func.func @nested_fold(%arg0: tensor<1x1x7x7xf32>) -> tensor<1x8x7x7xf32> { %tmp = tensor.empty() : tensor<1x1x7x7xf32> - %0 = "tosa.concat"(%arg0, %arg0) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> - %1 = "tosa.concat"(%tmp, %0, %tmp) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x2x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> - %2 = "tosa.concat"(%1, %1) {axis = 1} : (tensor<1x4x7x7xf32>, tensor<1x4x7x7xf32>) -> tensor<1x8x7x7xf32> + %0 = tosa.concat %arg0, %arg0 {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> + %1 = tosa.concat %tmp, %0, %tmp {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x2x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> + %2 = tosa.concat %1, %1 {axis = 1} : (tensor<1x4x7x7xf32>, tensor<1x4x7x7xf32>) -> tensor<1x8x7x7xf32> return %2 : tensor<1x8x7x7xf32> } // CHECK-LABEL: func.func @nested_fold( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x7x7xf32>) -> tensor<1x8x7x7xf32> { // CHECK: %[[VAL_1:.*]] = tensor.empty() : tensor<1x1x7x7xf32> -// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_1]], %[[VAL_0]], %[[VAL_0]], %[[VAL_1]], %[[VAL_1]], %[[VAL_0]], %[[VAL_0]], %[[VAL_1]]) <{axis = 1 : i64}> : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x8x7x7xf32> +// CHECK: %[[VAL_2:.*]] = tosa.concat %[[VAL_1]], %[[VAL_0]], %[[VAL_0]], %[[VAL_1]], %[[VAL_1]], %[[VAL_0]], %[[VAL_0]], %[[VAL_1]] {axis = 1 : i64} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x8x7x7xf32> // CHECK: return %[[VAL_2]] : tensor<1x8x7x7xf32> // CHECK: } // ----- func.func @wide_fold(%arg0: tensor<1x1x7x7xf32>, %arg1: tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> { - %0 = "tosa.concat"(%arg0, %arg0) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> - %1 = "tosa.concat"(%arg1, %arg1) {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> - %2 = "tosa.concat"(%0, %1) {axis = 1} : (tensor<1x2x7x7xf32>, tensor<1x2x7x7xf32>) -> tensor<1x4x7x7xf32> + %0 = tosa.concat %arg0, %arg0 {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> + %1 = tosa.concat %arg1, %arg1 {axis = 1} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x2x7x7xf32> + %2 = tosa.concat %0, %1 {axis = 1} : (tensor<1x2x7x7xf32>, tensor<1x2x7x7xf32>) -> tensor<1x4x7x7xf32> return %2 : tensor<1x4x7x7xf32> } // CHECK-LABEL: func.func @wide_fold( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x7x7xf32>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> { -// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_0]], %[[VAL_1]], %[[VAL_1]]) <{axis = 1 : i64}> : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> +// CHECK: %[[VAL_2:.*]] = tosa.concat %[[VAL_0]], %[[VAL_0]], %[[VAL_1]], %[[VAL_1]] {axis = 1 : i64} : (tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>, tensor<1x1x7x7xf32>) -> tensor<1x4x7x7xf32> // CHECK: return %[[VAL_2]] : tensor<1x4x7x7xf32> // CHECK: } // ----- func.func @partially_foldable(%arg0: tensor<1x1x8x8xf32>, %arg1: tensor<1x2x4x8xf32>) -> tensor<1x4x8x8xf32> { - %0 = "tosa.concat"(%arg0, %arg0) {axis = 1} : (tensor<1x1x8x8xf32>, tensor<1x1x8x8xf32>) -> tensor<1x2x8x8xf32> - %1 = "tosa.concat"(%arg1, %arg1) {axis = 2} : (tensor<1x2x4x8xf32>, tensor<1x2x4x8xf32>) -> tensor<1x2x8x8xf32> - %2 = "tosa.concat"(%0, %1) {axis = 1} : (tensor<1x2x8x8xf32>, tensor<1x2x8x8xf32>) -> tensor<1x4x8x8xf32> + %0 = tosa.concat %arg0, %arg0 {axis = 1} : (tensor<1x1x8x8xf32>, tensor<1x1x8x8xf32>) -> tensor<1x2x8x8xf32> + %1 = tosa.concat %arg1, %arg1 {axis = 2} : (tensor<1x2x4x8xf32>, tensor<1x2x4x8xf32>) -> tensor<1x2x8x8xf32> + %2 = tosa.concat %0, %1 {axis = 1} : (tensor<1x2x8x8xf32>, tensor<1x2x8x8xf32>) -> tensor<1x4x8x8xf32> return %2 : tensor<1x4x8x8xf32> } // CHECK-LABEL: func.func @partially_foldable( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x8x8xf32>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<1x2x4x8xf32>) -> tensor<1x4x8x8xf32> { -// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_1]], %[[VAL_1]]) <{axis = 2 : i64}> : (tensor<1x2x4x8xf32>, tensor<1x2x4x8xf32>) -> tensor<1x2x8x8xf32> -// CHECK: %[[VAL_3:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_0]], %[[VAL_2]]) <{axis = 1 : i64}> : (tensor<1x1x8x8xf32>, tensor<1x1x8x8xf32>, tensor<1x2x8x8xf32>) -> tensor<1x4x8x8xf32> +// CHECK: %[[VAL_2:.*]] = tosa.concat %[[VAL_1]], %[[VAL_1]] {axis = 2 : i64} : (tensor<1x2x4x8xf32>, tensor<1x2x4x8xf32>) -> tensor<1x2x8x8xf32> +// CHECK: %[[VAL_3:.*]] = tosa.concat %[[VAL_0]], %[[VAL_0]], %[[VAL_2]] {axis = 1 : i64} : (tensor<1x1x8x8xf32>, tensor<1x1x8x8xf32>, tensor<1x2x8x8xf32>) -> tensor<1x4x8x8xf32> // CHECK: return %[[VAL_3]] : tensor<1x4x8x8xf32> // CHECK: } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -3,7 +3,7 @@ func.func @test_conv2d(%arg0: tensor<1x29x29x4xf32>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{expect both input and weight to be float or not together, got 'f32' and 'i8'}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x29x29x4xf32>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -12,7 +12,7 @@ func.func @test_conv2d(%arg0: tensor<*xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{expect a ranked tensor for input, got of type 'tensor<*xi8>' at index: 0}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<*xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -21,7 +21,7 @@ func.func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<*xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{'tosa.conv2d' op operand #1 must be 4D tensor of 4-bit signless integer or 8-bit signless integer or Quint8 type or Qint4 type or Qint8 type or Qint16 type or Qint32 type or 32-bit float or 16-bit float or bfloat16 type values, but got 'tensor<*xi8>'}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x29x29x4xi8>, tensor<*xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -30,7 +30,7 @@ func.func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{'tosa.conv2d' op quantizationattr is required for quantized type, and not allowed for float type}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x29x29x4xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -40,7 +40,7 @@ func.func @test_concat(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>) -> tensor { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{Cannot concat tensors with different sizes on the non-axis dimension 1}} - %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor + %0 = tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor return %0 : tensor } @@ -49,7 +49,7 @@ func.func @test_concat_element_type_mismatch(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xf32>) -> tensor { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.concat' op inferred type(s) 'tensor<3x2xf32>' are incompatible with return type(s) of operation 'tensor}} - %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor + %0 = tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor return %0 : tensor } @@ -57,7 +57,7 @@ func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> { // expected-error@+1 {{'tosa.pad' op padding of pad is not constant}} - %0 = "tosa.pad"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32> + %0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } @@ -66,7 +66,7 @@ func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor) -> tensor<13x21x3xi8> { %0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> // expected-error@+1 {{'tosa.pad' op pad_const of pad is not constant}} - %1 = "tosa.pad"(%arg0, %0, %arg1) : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor) -> tensor<13x21x3xi8> + %1 = tosa.pad %arg0, %0, %arg1 : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor) -> tensor<13x21x3xi8> return %1 : tensor<13x21x3xi8> } @@ -74,7 +74,7 @@ func.func @test_transpose_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3xi32>) -> tensor<3x13x21xf32> { // expected-error@+1 {{'tosa.transpose' op perms of transpose is not constant}} - %0 = "tosa.transpose"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32> + %0 = tosa.transpose %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32> return %0 : tensor<3x13x21xf32> } @@ -82,9 +82,9 @@ func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<2x3xf32>) -> tensor<273x2xf32> { %0 = "tosa.const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32> - %1 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> + %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> // expected-error@+1 {{'tosa.fully_connected' op weight of fully_connected is not constant}} - %2 = "tosa.fully_connected"(%1, %arg1, %0) : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> + %2 = tosa.fully_connected %1, %arg1, %0 : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> return %2 : tensor<273x2xf32> } @@ -92,9 +92,9 @@ func.func @test_fully_connected_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<2xf32>) -> tensor<273x2xf32> { %0 = "tosa.const"() {value = dense<[[-0.613216758, -0.63714242, -0.73500061], [0.180762768, 0.773053169, -0.933686495]]> : tensor<2x3xf32>} : () -> tensor<2x3xf32> - %1 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> + %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<273x3xf32> // expected-error@+1 {{'tosa.fully_connected' op bias of fully_connected is not constant}} - %2 = "tosa.fully_connected"(%1, %0, %arg1) : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> + %2 = tosa.fully_connected %1, %0, %arg1 : (tensor<273x3xf32>, tensor<2x3xf32>, tensor<2xf32>) -> tensor<273x2xf32> return %2 : tensor<273x2xf32> } @@ -103,7 +103,7 @@ func.func @test_reduce_sum_type_mismatch(%arg0 : tensor<2x3x4x5xf32>) -> () { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reduce_sum' op inferred type(s) 'tensor<1x3x4x5xf32>' are incompatible with return type(s) of operation 'tensor<1x3x4x5xi32>'}} - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<2x3x4x5xf32>) -> tensor<1x3x4x5xi32> + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor<2x3x4x5xf32>) -> tensor<1x3x4x5xi32> return } @@ -112,7 +112,7 @@ func.func @test_reduce_max_type_mismatch(%arg0 : tensor<2x3x4x5xf32>) -> () { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reduce_max' op inferred type(s) 'tensor<2x3x4x1xf32>' are incompatible with return type(s) of operation 'tensor<2x3x4x1xi32>'}} - %0 = "tosa.reduce_max"(%arg0) {axis = 3 : i64} : (tensor<2x3x4x5xf32>) -> tensor<2x3x4x1xi32> + %0 = tosa.reduce_max %arg0 {axis = 3 : i64} : (tensor<2x3x4x5xf32>) -> tensor<2x3x4x1xi32> return } @@ -121,7 +121,7 @@ func.func @test_reduce_min_type_mismatch(%arg0 : tensor<2x3x4x5xf32>) -> () { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reduce_min' op inferred type(s) 'tensor<2x1x4x5xf32>' are incompatible with return type(s) of operation 'tensor<2x1x4x5xi32>'}} - %0 = "tosa.reduce_min"(%arg0) {axis = 1 : i64} : (tensor<2x3x4x5xf32>) -> tensor<2x1x4x5xi32> + %0 = tosa.reduce_min %arg0 {axis = 1 : i64} : (tensor<2x3x4x5xf32>) -> tensor<2x1x4x5xi32> return } @@ -130,7 +130,7 @@ func.func @test_reduce_prod_type_mismatch(%arg0 : tensor<2x3x4x5xf32>) -> () { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reduce_prod' op inferred type(s) 'tensor<2x1x4x5xf32>' are incompatible with return type(s) of operation 'tensor<2x3x4x5xf32>'}} - %0 = "tosa.reduce_prod"(%arg0) {axis = 1 : i64} : (tensor<2x3x4x5xf32>) -> tensor<2x3x4x5xf32> + %0 = tosa.reduce_prod %arg0 {axis = 1 : i64} : (tensor<2x3x4x5xf32>) -> tensor<2x3x4x5xf32> return } @@ -139,7 +139,7 @@ func.func @test_reshape_type_mismatch(%arg0 : tensor<13x21x3xf32>) -> () { // expected-error@+2 {{failed to infer returned types}} // expected-error@+1 {{'tosa.reshape' op inferred type(s) 'tensor<13x21x3x1xf32>' are incompatible with return type(s) of operation 'tensor<13x21x3x1xi32>'}} - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<13x21x3x1xi32> + %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<13x21x3x1xi32> return } diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -5,43 +5,43 @@ // ----- // CHECK-LABEL: argmax func.func @test_argmax(%arg0: tensor<14x19xf32>) -> tensor<14xi32> { - %0 = "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<14x19xf32>) -> tensor<14xi32> + %0 = tosa.argmax %arg0 {axis = 1 : i64} : (tensor<14x19xf32>) -> tensor<14xi32> return %0 : tensor<14xi32> } // ----- // CHECK-LABEL: avg_pool2d_f32 func.func @test_avg_pool2d_f32(%arg0: tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> { - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> - return %0 : tensor<1x7x7x9xf32> + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> + return %0 : tensor<1x7x7x9xf32> } // ----- // CHECK-LABEL: avg_pool2d_i8 func.func @test_avg_pool2d_i8(%arg0: tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> { - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = i32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> - return %0 : tensor<1x7x7x9xi8> + %0 = tosa.avg_pool2d %arg0 {acc_type = i32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> + return %0 : tensor<1x7x7x9xi8> } // ----- // CHECK-LABEL: avg_pool2d_i16 func.func @test_avg_pool2d_i16(%arg0: tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> { - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = i32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> - return %0 : tensor<1x7x7x9xi16> + %0 = tosa.avg_pool2d %arg0 {acc_type = i32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> + return %0 : tensor<1x7x7x9xi16> } // ----- // CHECK-LABEL: avg_pool2d_q8 func.func @test_avg_pool2d_q8(%arg0: tensor<1x7x7x9x!quant.uniform>) -> tensor<1x7x7x9x!quant.uniform> { - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = i32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9x!quant.uniform>) -> tensor<1x7x7x9x!quant.uniform> - return %0 : tensor<1x7x7x9x!quant.uniform> + %0 = tosa.avg_pool2d %arg0 {acc_type = i32, kernel = array, pad = array, stride = array} : (tensor<1x7x7x9x!quant.uniform>) -> tensor<1x7x7x9x!quant.uniform> + return %0 : tensor<1x7x7x9x!quant.uniform> } // ----- // CHECK-LABEL: conv2d func.func @test_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> { - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> - return %0 : tensor<1x4x4x8xf32> + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> } // ----- @@ -57,175 +57,174 @@ // ----- // CHECK-LABEL: depthwise_conv2d func.func @test_depthwise_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<1x1x4x2xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> { - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> - return %2 : tensor<1x4x4x8xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> } // ----- // CHECK-LABEL: fft2d func.func @test_fft2d(%arg0: tensor<1x4x8xf32>, %arg1: tensor<1x4x8xf32>) -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) { - %0, %1 = "tosa.fft2d"(%arg0, %arg1) {inverse = false} : (tensor<1x4x8xf32>, tensor<1x4x8xf32>) -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) + %0, %1 = tosa.fft2d %arg0, %arg1 {inverse = false} : (tensor<1x4x8xf32>, tensor<1x4x8xf32>) -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) return %0, %1 : tensor<1x4x8xf32>, tensor<1x4x8xf32> } // ----- // CHECK-LABEL: fully_connected func.func @test_fully_connected(%arg0: tensor<14x19xf32>, %arg1: tensor<19x28xf32>, %arg2: tensor<28xf32>) -> tensor<14x28xf32> { - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<14x19xf32>, tensor<19x28xf32>, tensor<28xf32>) -> tensor<14x28xf32> + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<14x19xf32>, tensor<19x28xf32>, tensor<28xf32>) -> tensor<14x28xf32> return %0 : tensor<14x28xf32> } // ----- // CHECK-LABEL: test_matmul func.func @test_matmul(%arg0: tensor<1x14x19xf32>, %arg1: tensor<1x19x28xf32>) -> tensor<1x14x28xf32> { - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor<1x14x19xf32>, tensor<1x19x28xf32>) -> tensor<1x14x28xf32> + %0 = tosa.matmul %arg0, %arg1 : (tensor<1x14x19xf32>, tensor<1x19x28xf32>) -> tensor<1x14x28xf32> return %0 : tensor<1x14x28xf32> } // ----- // CHECK-LABEL: max_pool2d func.func @test_max_pool2d(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> { - %0 = "tosa.max_pool2d"(%arg0) {kernel = array, pad = array, stride = array} : (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> + %0 = tosa.max_pool2d %arg0 {kernel = array, pad = array, stride = array} : (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- // CHECK-LABEL: rfft2d func.func @test_rfft2d(%arg0: tensor<13x8x16xf32>) -> (tensor<13x8x9xf32>, tensor<13x8x9xf32>) { - %0, %1 = "tosa.rfft2d"(%arg0) {} : (tensor<13x8x16xf32>) -> (tensor<13x8x9xf32>, tensor<13x8x9xf32>) + %0, %1 = tosa.rfft2d %arg0 : (tensor<13x8x16xf32>) -> (tensor<13x8x9xf32>, tensor<13x8x9xf32>) return %0, %1 : tensor<13x8x9xf32>, tensor<13x8x9xf32> } // ----- // CHECK-LABEL: transpose_conv2d func.func @test_transpose_conv2d(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> return %0 : tensor<1x32x32x16xf32> } // ----- // CHECK-LABEL: clamp func.func @test_clamp(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.clamp"(%arg0) {min_fp = 0.0 : f32, max_fp = 1.0: f32, min_int = 0 : i64, max_int = 1 : i64} : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.clamp %arg0 {min_fp = 0.0 : f32, max_fp = 1.0: f32, min_int = 0 : i64, max_int = 1 : i64} : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: sigmoid func.func @test_sigmoid(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.sigmoid"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.sigmoid %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: tanh func.func @test_tanh(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.tanh"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.tanh %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } - // ----- // CHECK-LABEL: erf func.func @test_erf(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.erf"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.erf %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: add func.func @test_add(%arg0: tensor<13x21x1xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.add"(%arg0, %arg1) : (tensor<13x21x1xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<13x21x1xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: arithmetic_right_shift func.func @test_arithmetic_right_shift(%arg0: tensor<13x21x1xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.arithmetic_right_shift"(%arg0, %arg1) { round = false } : (tensor<13x21x1xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.arithmetic_right_shift %arg0, %arg1 {round = false} : (tensor<13x21x1xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: bitwise_and func.func @test_bitwise_and(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x21x1xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.bitwise_and"(%arg0, %arg1) : (tensor<13x21x3xi32>, tensor<13x21x1xi32>) -> tensor<13x21x3xi32> + %0 = tosa.bitwise_and %arg0, %arg1 : (tensor<13x21x3xi32>, tensor<13x21x1xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: bitwise_or func.func @test_bitwise_or(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x1x3xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.bitwise_or"(%arg0, %arg1) : (tensor<13x21x3xi32>, tensor<13x1x3xi32>) -> tensor<13x21x3xi32> + %0 = tosa.bitwise_or %arg0, %arg1 : (tensor<13x21x3xi32>, tensor<13x1x3xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: bitwise_xor func.func @test_bitwise_xor(%arg0: tensor<13x21x1xi32>, %arg1: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.bitwise_xor"(%arg0, %arg1) : (tensor<13x21x1xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32> + %0 = tosa.bitwise_xor %arg0, %arg1 : (tensor<13x21x1xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: div func.func @test_div(%arg0: tensor<13x21x1xi32>, %arg1: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.div"(%arg0, %arg1) : (tensor<13x21x1xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32> + %0 = tosa.div %arg0, %arg1 : (tensor<13x21x1xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: logical_and func.func @test_logical_and(%arg0: tensor<13x21x3xi1>, %arg1: tensor<13x21x1xi1>) -> tensor<13x21x3xi1> { - %0 = "tosa.logical_and"(%arg0, %arg1) : (tensor<13x21x3xi1>, tensor<13x21x1xi1>) -> tensor<13x21x3xi1> + %0 = tosa.logical_and %arg0, %arg1 : (tensor<13x21x3xi1>, tensor<13x21x1xi1>) -> tensor<13x21x3xi1> return %0 : tensor<13x21x3xi1> } // ----- // CHECK-LABEL: logical_left_shift func.func @test_logical_left_shift(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x21x1xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.logical_left_shift"(%arg0, %arg1) : (tensor<13x21x3xi32>, tensor<13x21x1xi32>) -> tensor<13x21x3xi32> + %0 = tosa.logical_left_shift %arg0, %arg1 : (tensor<13x21x3xi32>, tensor<13x21x1xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: logical_right_shift func.func @test_logical_right_shift(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x21x1xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.logical_right_shift"(%arg0, %arg1) : (tensor<13x21x3xi32>, tensor<13x21x1xi32>) -> tensor<13x21x3xi32> + %0 = tosa.logical_right_shift %arg0, %arg1 : (tensor<13x21x3xi32>, tensor<13x21x1xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: logical_or func.func @test_logical_or(%arg0: tensor<13x1x3xi1>, %arg1: tensor<13x21x3xi1>) -> tensor<13x21x3xi1> { - %0 = "tosa.logical_or"(%arg0, %arg1) : (tensor<13x1x3xi1>, tensor<13x21x3xi1>) -> tensor<13x21x3xi1> + %0 = tosa.logical_or %arg0, %arg1 : (tensor<13x1x3xi1>, tensor<13x21x3xi1>) -> tensor<13x21x3xi1> return %0 : tensor<13x21x3xi1> } // ----- // CHECK-LABEL: logical_xor func.func @test_logical_xor(%arg0: tensor<13x1x3xi1>, %arg1: tensor<13x21x3xi1>) -> tensor<13x21x3xi1> { - %0 = "tosa.logical_xor"(%arg0, %arg1) : (tensor<13x1x3xi1>, tensor<13x21x3xi1>) -> tensor<13x21x3xi1> + %0 = tosa.logical_xor %arg0, %arg1 : (tensor<13x1x3xi1>, tensor<13x21x3xi1>) -> tensor<13x21x3xi1> return %0 : tensor<13x21x3xi1> } // ----- // CHECK-LABEL: maximum func.func @test_max(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x1xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.maximum"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<13x21x1xf32>) -> tensor<13x21x3xf32> + %0 = tosa.maximum %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<13x21x1xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: minimum func.func @test_min(%arg0: tensor<13x21x3xf32>, %arg1: tensor<1x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.minimum"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<1x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.minimum %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<1x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: mul func.func @test_mul(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x1x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i32 } : (tensor<13x21x3xf32>, tensor<13x1x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.mul %arg0, %arg1 {shift = 1 : i32} : (tensor<13x21x3xf32>, tensor<13x1x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } @@ -239,105 +238,105 @@ // ----- // CHECK-LABEL: pow func.func @test_pow(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x1xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.pow"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<13x21x1xf32>) -> tensor<13x21x3xf32> + %0 = tosa.pow %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<13x21x1xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: sub func.func @test_sub(%arg0: tensor<1x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.sub"(%arg0, %arg1) : (tensor<1x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.sub %arg0, %arg1 : (tensor<1x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: table func.func @main(%arg0: tensor<64xi32>, %arg1: tensor<513x!quant.uniform>) -> tensor<64x!quant.uniform> { - %0 = "tosa.table"(%arg0, %arg1) : (tensor<64xi32>, tensor<513x!quant.uniform>) -> tensor<64x!quant.uniform> + %0 = tosa.table %arg0, %arg1 : (tensor<64xi32>, tensor<513x!quant.uniform>) -> tensor<64x!quant.uniform> return %0 : tensor<64x!quant.uniform> } // ----- // CHECK-LABEL: abs func.func @test_abs(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.abs"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.abs %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: bitwise_not func.func @test_bitwise_not(%arg0: tensor<13x21x1xi32>) -> tensor<13x21x1xi32> { - %0 = "tosa.bitwise_not"(%arg0) : (tensor<13x21x1xi32>) -> tensor<13x21x1xi32> + %0 = tosa.bitwise_not %arg0 : (tensor<13x21x1xi32>) -> tensor<13x21x1xi32> return %0 : tensor<13x21x1xi32> } // ----- // CHECK-LABEL: ceil func.func @test_ceil(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.ceil"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.ceil %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: clz func.func @test_clz(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.clz"(%arg0) : (tensor<13x21x3xi32>) -> tensor<13x21x3xi32> + %0 = tosa.clz %arg0 : (tensor<13x21x3xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: exp func.func @test_exp(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.exp"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.exp %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: floor func.func @test_floor(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.floor"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.floor %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: log func.func @test_log(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.log"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.log %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: logical_not func.func @test_logical_not(%arg0: tensor<1x21x3xi1>) -> tensor<1x21x3xi1> { - %0 = "tosa.logical_not"(%arg0) : (tensor<1x21x3xi1>) -> tensor<1x21x3xi1> + %0 = tosa.logical_not %arg0 : (tensor<1x21x3xi1>) -> tensor<1x21x3xi1> return %0 : tensor<1x21x3xi1> } // ----- // CHECK-LABEL: negate func.func @test_negate(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.negate"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.negate %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: reciprocal func.func @test_reciprocal(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.reciprocal"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.reciprocal %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: rsqrt func.func @test_rsqrt(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.rsqrt"(%arg0) : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.rsqrt %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: select func.func @test_select(%arg0: tensor<1x1x1xi1>, %arg1: tensor<13x21x3xf32>, %arg2: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x1xi1>, tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<1x1x1xi1>, tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } @@ -345,83 +344,83 @@ // ----- // CHECK-LABEL: equal func.func @test_equal(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x1x3xf32>) -> tensor<13x21x3xi1> { - %0 = "tosa.equal"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<13x1x3xf32>) -> tensor<13x21x3xi1> + %0 = tosa.equal %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<13x1x3xf32>) -> tensor<13x21x3xi1> return %0 : tensor<13x21x3xi1> } // ----- // CHECK-LABEL: greater func.func @test_greater(%arg0: tensor<13x21x1xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xi1> { - %0 = "tosa.greater"(%arg0, %arg1) : (tensor<13x21x1xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xi1> + %0 = tosa.greater %arg0, %arg1 : (tensor<13x21x1xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xi1> return %0 : tensor<13x21x3xi1> } // ----- // CHECK-LABEL: greater_equal func.func @test_greater_equal(%arg0: tensor<13x1x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xi1> { - %0 = "tosa.greater_equal"(%arg0, %arg1) : (tensor<13x1x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xi1> + %0 = tosa.greater_equal %arg0, %arg1 : (tensor<13x1x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xi1> return %0 : tensor<13x21x3xi1> } // ----- // CHECK-LABEL: reduce_all func.func @test_reduce_all(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { - %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> + %0 = tosa.reduce_all %arg0 {axis = 0 : i64} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> return %1 : tensor<21x3xi1> } // ----- // CHECK-LABEL: reduce_any func.func @test_reduce_any(%arg0: tensor<13x21x3xi1>) -> tensor<21x3xi1> { - %0 = "tosa.reduce_any"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> + %0 = tosa.reduce_any %arg0 {axis = 0 : i64} : (tensor<13x21x3xi1>) -> tensor<1x21x3xi1> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xi1>) -> tensor<21x3xi1> return %1 : tensor<21x3xi1> } // ----- // CHECK-LABEL: reduce_max func.func @test_reduce_max(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { - %0 = "tosa.reduce_max"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %0 = tosa.reduce_max %arg0 {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } // ----- // CHECK-LABEL: reduce_min func.func @test_reduce_min(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { - %0 = "tosa.reduce_min"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %0 = tosa.reduce_min %arg0 {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } // ----- // CHECK-LABEL: reduce_product func.func @test_reduce_product(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { - %0 = "tosa.reduce_prod"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %0 = tosa.reduce_prod %arg0 {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } // ----- // CHECK-LABEL: reduce_sum func.func @test_reduce_sum(%arg0: tensor<13x21x3xf32>) -> tensor<21x3xf32> { - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> - %1 = "tosa.reshape"(%0) {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<1x21x3xf32> + %1 = tosa.reshape %0 {new_shape = array} : (tensor<1x21x3xf32>) -> tensor<21x3xf32> return %1 : tensor<21x3xf32> } // ----- // CHECK-LABEL: concat func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<26x21x3xf32> { - %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<26x21x3xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<26x21x3xf32> return %0 : tensor<26x21x3xf32> } // ----- // CHECK-LABEL: pad func.func @test_pad(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> { - %0 = "tosa.pad"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32> + %0 = tosa.pad %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } @@ -429,35 +428,35 @@ // CHECK-LABEL: pad_explicit_value func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> { %0 = "tosa.const"() {value = dense<3.14> : tensor} : () -> tensor - %1 = "tosa.pad"(%arg0, %arg1, %0) : (tensor<13x21x3xf32>, tensor<3x2xi32>, tensor) -> tensor<13x21x3xf32> + %1 = tosa.pad %arg0, %arg1, %0 : (tensor<13x21x3xf32>, tensor<3x2xi32>, tensor) -> tensor<13x21x3xf32> return %1 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: reshape func.func @test_reshape(%arg0: tensor<13x21x3xf32>) -> tensor<1x819xf32> { - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<1x819xf32> + %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<13x21x3xf32>) -> tensor<1x819xf32> return %0 : tensor<1x819xf32> } // ----- // CHECK-LABEL: reverse func.func @test_reverse(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.reverse"(%arg0) {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.reverse %arg0 {axis = 0 : i64} : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: slice func.func @test_slice(%arg0: tensor<13x21x3xf32>) -> tensor<4x11x1xf32> { - %0 = "tosa.slice"(%arg0) {start = array, size = array} : (tensor<13x21x3xf32>) -> tensor<4x11x1xf32> + %0 = tosa.slice %arg0 {size = array, start = array} : (tensor<13x21x3xf32>) -> tensor<4x11x1xf32> return %0 : tensor<4x11x1xf32> } // ----- // CHECK-LABEL: tile func.func @test_tile(%arg0: tensor<13x21x3xf32>) -> tensor<39x21x6xf32> { - %0 = "tosa.tile"(%arg0) {multiples = array} : (tensor<13x21x3xf32>) -> tensor<39x21x6xf32> + %0 = tosa.tile %arg0 {multiples = array} : (tensor<13x21x3xf32>) -> tensor<39x21x6xf32> return %0 : tensor<39x21x6xf32> } @@ -465,56 +464,56 @@ // CHECK-LABEL: transpose func.func @test_transpose(%arg0: tensor<13x21x3xf32>) -> tensor<3x13x21xf32> { %0 = "tosa.const"() {value = dense<[2, 0, 1]> : tensor<3xi32>} : () -> tensor<3xi32> - %1 = "tosa.transpose"(%arg0, %0) : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32> + %1 = tosa.transpose %arg0, %0 : (tensor<13x21x3xf32>, tensor<3xi32>) -> tensor<3x13x21xf32> return %1 : tensor<3x13x21xf32> } // ----- // CHECK-LABEL: gather func.func @test_gather(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x26xi32>) -> tensor<13x26x3xf32> { - %0 = "tosa.gather"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<13x26xi32>) -> tensor<13x26x3xf32> + %0 = tosa.gather %arg0, %arg1 : (tensor<13x21x3xf32>, tensor<13x26xi32>) -> tensor<13x26x3xf32> return %0 : tensor<13x26x3xf32> } // ----- // CHECK-LABEL: scatter func.func @test_scatter(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x26xi32>, %arg2: tensor<13x26x3xf32>) -> tensor<13x21x3xf32> { - %0 = "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor<13x21x3xf32>, tensor<13x26xi32>, tensor<13x26x3xf32>) -> tensor<13x21x3xf32> + %0 = tosa.scatter %arg0, %arg1, %arg2 : (tensor<13x21x3xf32>, tensor<13x26xi32>, tensor<13x26x3xf32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: resize func.func @test_resize(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> { - %1 = "tosa.resize"(%arg0) { scale = array, offset = array, border = array, mode = "BILINEAR"} : (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> + %1 = tosa.resize %arg0 { scale = array, offset = array, border = array, mode = "BILINEAR" } : (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> return %1 : tensor<1x64x64x8xf32> } // ----- // CHECK-LABEL: cast func.func @test_cast1(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3xf32> { - %0 = "tosa.cast"(%arg0) : (tensor<13x21x3xi32>) -> tensor<13x21x3xf32> + %0 = tosa.cast %arg0 : (tensor<13x21x3xi32>) -> tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32> } // ----- // CHECK-LABEL: cast2 func.func @test_cast2(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3x!quant.uniform> { - %0 = "tosa.cast"(%arg0) : (tensor<13x21x3xi32>) -> tensor<13x21x3x!quant.uniform> + %0 = tosa.cast %arg0 : (tensor<13x21x3xi32>) -> tensor<13x21x3x!quant.uniform> return %0 : tensor<13x21x3x!quant.uniform> } // ----- // CHECK-LABEL: cast3 func.func @test_cast3(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3x!quant.uniform> { - %0 = "tosa.cast"(%arg0) : (tensor<13x21x3xi32>) -> tensor<13x21x3x!quant.uniform> + %0 = tosa.cast %arg0 : (tensor<13x21x3xi32>) -> tensor<13x21x3x!quant.uniform> return %0 : tensor<13x21x3x!quant.uniform> } // ----- // CHECK-LABEL: rescale func.func @test_rescale(%arg0: tensor<13x21x3x!quant.uniform>) -> tensor<13x21x3x!quant.uniform> { - %0 = "tosa.rescale"(%arg0) {double_round = false, input_zp = 127 : i32, multiplier = array, output_zp = -1 : i32, per_channel = false, scale32 = true, shift = array} : (tensor<13x21x3x!quant.uniform>) -> tensor<13x21x3x!quant.uniform> + %0 = tosa.rescale %arg0 {double_round = false, input_zp = 127 : i32, multiplier = array, output_zp = -1 : i32, per_channel = false, scale32 = true, shift = array} : (tensor<13x21x3x!quant.uniform>) -> tensor<13x21x3x!quant.uniform> return %0 : tensor<13x21x3x!quant.uniform> } @@ -528,22 +527,20 @@ // ----- // CHECK-LABEL: identity func.func @test_identity(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> { - %0 = "tosa.identity"(%arg0) : (tensor<13x21x3xi32>) -> tensor<13x21x3xi32> + %0 = tosa.identity %arg0 : (tensor<13x21x3xi32>) -> tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32> } // ----- // CHECK-LABEL: cond_if func.func @test_cond_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ - ^bb0(%arg3: tensor, %arg4: tensor): - %1 = "tosa.add"(%arg3, %arg4) : (tensor, tensor) -> tensor - "tosa.yield"(%1) : (tensor) -> () - }, { - ^bb0(%arg3: tensor, %arg4: tensor): - %1 = "tosa.sub"(%arg3, %arg4) : (tensor, tensor) -> tensor - "tosa.yield"(%1) : (tensor) -> () - }) : (tensor, tensor, tensor) -> tensor + %0 = tosa.cond_if %arg2 -> (tensor) { + %1 = tosa.add %arg0, %arg1 : (tensor, tensor) -> tensor + tosa.yield %1 : tensor + } else { + %1 = tosa.sub %arg0, %arg1 : (tensor, tensor) -> tensor + tosa.yield %1 : tensor + } return %0 : tensor } @@ -551,26 +548,25 @@ // CHECK-LABEL: while_loop func.func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor) { %0 = "tosa.const"() {value = dense<0> : tensor} : () -> tensor - %1:3 = "tosa.while_loop"(%0, %0, %arg0) ({ - ^bb0(%arg2: tensor, %arg3: tensor, %arg4: tensor<10xi32>): - %2 = "tosa.greater_equal"(%arg3, %arg1) : (tensor, tensor) -> tensor - %3 = "tosa.logical_not"(%2) : (tensor) -> tensor - "tosa.yield"(%3) : (tensor) -> () - }, { + %1:3 = tosa.while_loop (%arg2 = %0, %arg3 = %0, %arg4 = %arg0) : (tensor, tensor, tensor<10xi32>) -> (tensor, tensor, tensor<10xi32>) { + %2 = tosa.greater_equal %arg3, %arg1 : (tensor, tensor) -> tensor + %3 = tosa.logical_not %2 : (tensor) -> tensor + tosa.yield %3 : tensor + } do { ^bb0(%arg2: tensor, %arg3: tensor, %arg4: tensor<10xi32>): %2 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor - %3 = "tosa.add"(%arg3, %2) : (tensor, tensor) -> tensor - %4 = "tosa.reshape"(%2) {new_shape = array} : (tensor) -> tensor<1xi32> - %5 = "tosa.add"(%arg4, %4) : (tensor<10xi32>, tensor<1xi32>) -> tensor<10xi32> - %6 = "tosa.add"(%arg2, %2) : (tensor, tensor) -> tensor - "tosa.yield"(%6, %3, %5) : (tensor, tensor, tensor<10xi32>) -> () - }) : (tensor, tensor, tensor<10xi32>) -> (tensor, tensor, tensor<10xi32>) + %3 = tosa.add %arg3, %2 : (tensor, tensor) -> tensor + %4 = tosa.reshape %2 {new_shape = array} : (tensor) -> tensor<1xi32> + %5 = tosa.add %arg4, %4 : (tensor<10xi32>, tensor<1xi32>) -> tensor<10xi32> + %6 = tosa.add %arg2, %2 : (tensor, tensor) -> tensor + tosa.yield %6, %3, %5 : tensor, tensor, tensor<10xi32> + } return } // ----- // CHECK-LABEL: custom func.func @test_custom(%arg0: tensor<10xi32>) -> tensor<10xi32> { - %0 = "tosa.custom"(%arg0) {identifier="custom_test", config="tosa_mlir_test", implementation_attrs=""} : (tensor<10xi32>) -> (tensor<10xi32>) + %0 = tosa.custom %arg0 {identifier="custom_test", config="tosa.mlir_test", implementation_attrs="" } : (tensor<10xi32>) -> (tensor<10xi32>) return %0 : tensor<10xi32> } diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir @@ -4,17 +4,17 @@ // CHECK-LABEL: @conv2d_as_fully_connected func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<3x1x1x2xf32>, %arg2: tensor<3xf32>) -> tensor<4x10x10x3xf32> { - // CHECK-NOT: "tosa.conv2d" - // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} + // CHECK-NOT: tosa.conv2d + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} // CHECK-SAME: -> tensor<400x2xf32> - // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} // CHECK-SAME: -> tensor<3x2xf32> - // CHECK: %[[VAR2:.*]] = "tosa.fully_connected"(%[[VAR0]], %[[VAR1]], %arg2) + // CHECK: %[[VAR2:.*]] = tosa.fully_connected %[[VAR0]], %[[VAR1]], %arg2 // CHECK-SAME: -> tensor<400x3xf32> - // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) <{new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} // CHECK-SAME: -> tensor<4x10x10x3xf32> // CHECK: return %[[VAR3]] - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> + %0 = tosa.conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> return %0 : tensor<4x10x10x3xf32> } @@ -22,18 +22,18 @@ // CHECK-LABEL: @conv2d_as_fully_connected_quant func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x10x10x3xi32> { - // CHECK-NOT: "tosa.conv2d" - // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} + // CHECK-NOT: tosa.conv2d + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} // CHECK-SAME: -> tensor<400x2xi8> - // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} // CHECK-SAME: -> tensor<3x2xi8> - // CHECK: %[[VAR2:.*]] = "tosa.fully_connected"(%[[VAR0]], %[[VAR1]], %arg2) + // CHECK: %[[VAR2:.*]] = tosa.fully_connected %[[VAR0]], %[[VAR1]], %arg2 // CHECK-SAME: quantization_info = #tosa.conv_quant // CHECK-SAME: -> tensor<400x3xi32> - // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) <{new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} // CHECK-SAME: -> tensor<4x10x10x3xi32> // CHECK: return %[[VAR3]] - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x10x10x3xi32> + %0 = tosa.conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x10x10x3xi32> return %0 : tensor<4x10x10x3xi32> } @@ -44,13 +44,13 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<384x1x1x64xi8>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<384xi32>) -> tensor { func.func @conv_with_dynamic_dim(%arg0: tensor, %arg1: tensor<384x1x1x64xi8>, %arg2: tensor<384xi32>) -> tensor { -// CHECK: %[[VAL_3:.*]] = "tosa.reshape"(%[[VAL_0]]) <{new_shape = array}> : (tensor) -> tensor -// CHECK: %[[VAL_4:.*]] = "tosa.reshape"(%[[VAL_1]]) <{new_shape = array}> : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8> -// CHECK: %[[VAL_5:.*]] = "tosa.fully_connected"(%[[VAL_3]], %[[VAL_4]], %[[VAL_2]]) <{quantization_info = #tosa.conv_quant}> : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor -// CHECK: %[[VAL_6:.*]] = "tosa.reshape"(%[[VAL_5]]) <{new_shape = array}> : (tensor) -> tensor +// CHECK: %[[VAL_3:.*]] = tosa.reshape %[[VAL_0]] {new_shape = array} : (tensor) -> tensor +// CHECK: %[[VAL_4:.*]] = tosa.reshape %[[VAL_1]] {new_shape = array} : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8> +// CHECK: %[[VAL_5:.*]] = tosa.fully_connected %[[VAL_3]], %[[VAL_4]], %[[VAL_2]] {quantization_info = #tosa.conv_quant} : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor +// CHECK: %[[VAL_6:.*]] = tosa.reshape %[[VAL_5]] {new_shape = array} : (tensor) -> tensor // CHECK: return %[[VAL_6]] : tensor // CHECK: } - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} : (tensor, tensor<384x1x1x64xi8>, tensor<384xi32>) -> tensor + %0 = tosa.conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} : (tensor, tensor<384x1x1x64xi8>, tensor<384xi32>) -> tensor return %0 : tensor } @@ -60,11 +60,11 @@ func.func @conv2d_as_fully_connected_padded(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x12x12x3xi32> { // CHECK-DAG: %[[PAD_SHAPE:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>} // CHECK-DAG: %[[PAD_VAL:.+]] = "tosa.const"() <{value = dense<42> : tensor} - // CHECK-DAG: %[[PAD:.+]] = "tosa.pad"(%arg0, %[[PAD_SHAPE]], %[[PAD_VAL]]) : (tensor<4x10x10x2xi8>, tensor<4x2xi64>, tensor) -> tensor<4x12x12x2xi8> - // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = "tosa.reshape"(%[[PAD]]) <{new_shape = array} - // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK-DAG: %[[FULLY:.+]] = "tosa.fully_connected"(%[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2) <{quantization_info = #tosa.conv_quant} - // CHECK: %[[RESHAPE:.+]] = "tosa.reshape"(%[[FULLY]]) <{new_shape = array} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x12x12x3xi32> + // CHECK-DAG: %[[PAD:.+]] = tosa.pad %arg0, %[[PAD_SHAPE]], %[[PAD_VAL]] : (tensor<4x10x10x2xi8>, tensor<4x2xi64>, tensor) -> tensor<4x12x12x2xi8> + // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = tosa.reshape %[[PAD]] {new_shape = array} + // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = tosa.reshape %arg1 {new_shape = array} + // CHECK-DAG: %[[FULLY:.+]] = tosa.fully_connected %[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2 {quantization_info = #tosa.conv_quant} + // CHECK: %[[RESHAPE:.+]] = tosa.reshape %[[FULLY]] {new_shape = array} + %0 = tosa.conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x12x12x3xi32> return %0 : tensor<4x12x12x3xi32> } diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir @@ -4,21 +4,21 @@ // CHECK-LABEL: @depthwise_conv2d_as_mul func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { - // CHECK-NOT: "tosa.depthwise_conv2d" - // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array} + // CHECK-NOT: tosa.depthwise_conv2d + // CHECK: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} // CHECK-SAME: -> tensor<4x10x10x2x1xf32> - // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array} + // CHECK: %[[VAR1:.*]] = tosa.reshape %arg1 {new_shape = array} // CHECK-SAME: -> tensor<1x1x1x2x3xf32> - // CHECK: %[[VAR2:.*]] = "tosa.mul"(%[[VAR0]], %[[VAR1]]) + // CHECK: %[[VAR2:.*]] = tosa.mul %[[VAR0]], %[[VAR1]] // CHECK-SAME: -> tensor<4x10x10x2x3xf32> - // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) <{new_shape = array} + // CHECK: %[[VAR3:.*]] = tosa.reshape %[[VAR2]] {new_shape = array} // CHECK-SAME: -> tensor<4x10x10x6xf32> - // CHECK: %[[VAR4:.*]] = "tosa.reshape"(%arg2) <{new_shape = array} + // CHECK: %[[VAR4:.*]] = tosa.reshape %arg2 {new_shape = array} // CHECK-SAME: -> tensor<1x1x1x6xf32> - // CHECK: %[[VAR5:.*]] = "tosa.add"(%[[VAR3]], %[[VAR4]]) + // CHECK: %[[VAR5:.*]] = tosa.add %[[VAR3]], %[[VAR4]] // CHECK-SAME: -> tensor<4x10x10x6xf32> // CHECK: return %[[VAR5]] - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> return %0 : tensor<4x10x10x6xf32> } @@ -28,17 +28,17 @@ func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<1x1x2x3xi8>, %arg2: tensor<6xi32>) -> tensor<4x10x10x6xi32> { // CHECK: %[[iZp:.+]] = "tosa.const"() <{value = dense<7> : tensor<1x1x1x1x1xi32>} // CHECK: %[[wZp:.+]] = "tosa.const"() <{value = dense<11> : tensor<1x1x1x1xi32>} - // CHECK: %[[rIn:.+]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[cIn:.+]] = "tosa.cast"(%[[rIn]]) : (tensor<4x10x10x2x1xi8>) -> tensor<4x10x10x2x1xi32> - // CHECK: %[[cWe:.+]] = "tosa.cast"(%arg1) : (tensor<1x1x2x3xi8>) -> tensor<1x1x2x3xi32> - // CHECK: %[[sIn:.+]] = "tosa.sub"(%[[cIn]], %[[iZp]]) - // CHECK: %[[sWe:.+]] = "tosa.sub"(%[[cWe]], %[[wZp]]) - // CHECK: %[[resWe:.+]] = "tosa.reshape"(%[[sWe]]) <{new_shape = array} - // CHECK: %[[mul:.+]] = "tosa.mul"(%[[sIn]], %[[resWe]]) <{shift = 0 : i32} - // CHECK: %[[reO:.+]] = "tosa.reshape"(%[[mul]]) <{new_shape = array} - // CHECK: %[[reArg2:.+]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[add:.+]] = "tosa.add"(%[[reO]], %[[reArg2]]) - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<1x1x2x3xi8>, tensor<6xi32>) -> tensor<4x10x10x6xi32> + // CHECK: %[[rIn:.+]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[cIn:.+]] = tosa.cast %[[rIn]] : (tensor<4x10x10x2x1xi8>) -> tensor<4x10x10x2x1xi32> + // CHECK: %[[cWe:.+]] = tosa.cast %arg1 : (tensor<1x1x2x3xi8>) -> tensor<1x1x2x3xi32> + // CHECK: %[[sIn:.+]] = tosa.sub %[[cIn]], %[[iZp]] + // CHECK: %[[sWe:.+]] = tosa.sub %[[cWe]], %[[wZp]] + // CHECK: %[[resWe:.+]] = tosa.reshape %[[sWe]] {new_shape = array} + // CHECK: %[[mul:.+]] = tosa.mul %[[sIn]], %[[resWe]] {shift = 0 : i32} + // CHECK: %[[reO:.+]] = tosa.reshape %[[mul]] {new_shape = array} + // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[add:.+]] = tosa.add %[[reO]], %[[reArg2]] + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<1x1x2x3xi8>, tensor<6xi32>) -> tensor<4x10x10x6xi32> return %0 : tensor<4x10x10x6xi32> } @@ -48,13 +48,13 @@ func.func @depthwise_conv2d_as_mul_padded(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x12x12x6xf32> { // CHECK: %[[pad:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0], [0, 0]]> : tensor<5x2xi64>} // CHECK: %[[zero:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor} - // CHECK: %[[reIn:.+]] = "tosa.reshape"(%arg0) <{new_shape = array} - // CHECK: %[[padded:.+]] = "tosa.pad"(%[[reIn]], %[[pad]], %[[zero]]) : (tensor<4x10x10x2x1xf32>, tensor<5x2xi64>, tensor) -> tensor<4x12x12x2x1xf32> - // CHECK: %[[reArg1:.+]] = "tosa.reshape"(%arg1) <{new_shape = array} - // CHECK: %[[mul:.+]] = "tosa.mul"(%3, %[[reArg1]]) <{shift = 0 : i32} - // CHECK: %[[reOut:.+]] = "tosa.reshape"(%[[mul]]) <{new_shape = array} - // CHECK: %[[reArg2:.+]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[add:.+]] = "tosa.add"(%[[reOut]], %[[reArg2]]) - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32> + // CHECK: %[[reIn:.+]] = tosa.reshape %arg0 {new_shape = array} + // CHECK: %[[padded:.+]] = tosa.pad %[[reIn]], %[[pad]], %[[zero]] : (tensor<4x10x10x2x1xf32>, tensor<5x2xi64>, tensor) -> tensor<4x12x12x2x1xf32> + // CHECK: %[[reArg1:.+]] = tosa.reshape %arg1 {new_shape = array} + // CHECK: %[[mul:.+]] = tosa.mul %3, %[[reArg1]] {shift = 0 : i32} + // CHECK: %[[reOut:.+]] = tosa.reshape %[[mul]] {new_shape = array} + // CHECK: %[[reArg2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[add:.+]] = tosa.add %[[reOut]], %[[reArg2]] + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32> return %0 : tensor<4x12x12x6xf32> } diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir @@ -2,11 +2,11 @@ // CHECK-LABEL: @transpose_conv2d func.func @transpose_conv2d(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x18x19x5xf32> { - // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) <{axis = 1 : i64} - // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64} - // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) + // CHECK: %[[REV1:.+]] = tosa.reverse %arg1 {axis = 1 : i64} + // CHECK: %[[REV2:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i64} + // CHECK: tosa.conv2d %arg0, %[[REV2]], %arg2 // CHECK-SAME: dilation = array, pad = array, stride = array - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32> return %0 : tensor<2x18x19x5xf32> } @@ -15,10 +15,10 @@ // CHECK-LABEL: @transpose_conv2d_quantized func.func @transpose_conv2d_quantized(%arg0: tensor<2x16x14x3xi8>, %arg1: tensor<5x3x6x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x18x19x5xi32>) { - // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) <{axis = 1 : i64} - // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64} - // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) <{dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, quantization_info = #tosa.conv_quant, out_shape = array, stride = array} : (tensor<2x16x14x3xi8>, tensor<5x3x6x3xi8>, tensor<5xi32>) -> tensor<2x18x19x5xi32> + // CHECK: %[[REV1:.+]] = tosa.reverse %arg1 {axis = 1 : i64} + // CHECK: %[[REV2:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i64} + // CHECK: tosa.conv2d %arg0, %[[REV2]], %arg2 {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, quantization_info = #tosa.conv_quant, out_shape = array, stride = array} : (tensor<2x16x14x3xi8>, tensor<5x3x6x3xi8>, tensor<5xi32>) -> tensor<2x18x19x5xi32> return %0 : tensor<2x18x19x5xi32> } @@ -26,12 +26,12 @@ // CHECK-LABEL: @transpose_conv2d_quantized_padded func.func @transpose_conv2d_quantized_padded(%arg0: tensor<2x16x14x3xi8>, %arg1: tensor<5x3x6x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x21x26x5xi32>) { - // CHECK-DAG: %[[REV0:.+]] = "tosa.reverse"(%0) <{axis = 2 : i64} - // CHECK-DAG: %[[REV1:.+]] = "tosa.reverse"(%arg1) <{axis = 1 : i64} - // CHECK: "tosa.conv2d"(%arg0, %1, %arg2) + // CHECK-DAG: %[[REV0:.+]] = tosa.reverse %0 {axis = 2 : i64} + // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %arg1 {axis = 1 : i64} + // CHECK: tosa.conv2d %arg0, %1, %arg2 // CHECK-SAME: dilation = array, pad = array, // CHECK-SAME: quantization_info = #tosa.conv_quant, stride = array} - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) { + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 { out_pad = array, quantization_info = #tosa.conv_quant, out_shape = array, @@ -46,28 +46,28 @@ // Manipulate the weight matrix to handle striding. // CHECK-DAG: %[[PADV:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[PADW:.+]] = "tosa.pad"(%arg1, %[[PADV]]) - // CHECK-DAG: %[[RESW1:.+]] = "tosa.reshape"(%[[PADW]]) <{new_shape = array} - // CHECK-DAG: %[[TRANS:.+]] = "tosa.transpose"(%[[RESW1]], %[[TRANSV]]) - // CHECK-DAG: %[[RESW2:.+]] = "tosa.reshape"(%[[TRANS]]) <{new_shape = array} - // CHECK-DAG: %[[REV1:.+]] = "tosa.reverse"(%[[RESW2]]) <{axis = 1 : i64} - // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64} + // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] + // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]] {new_shape = array} + // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]], %[[TRANSV]] + // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]] {new_shape = array} + // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i64} + // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i64} // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANS2:.+]] = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]]) + // CHECK-DAG: %[[NEWINPUT:.+]] = tosa.pad %arg0, %[[PAD]] // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<30xf32>} - // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) <{dilation = array, pad = array, stride = array} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) <{new_shape = array} - // CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]]) - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) <{new_shape = array} - // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) <{size = array, start = array} - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %[[RESHAPE_ARG2]]) - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> + // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]] {dilation = array, pad = array, stride = array} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]], %[[TRANS2]] + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]] {new_shape = array} + // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]] {size = array, start = array} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> %1 = tensor.cast %0 : tensor<2x35x47x5xf32> to tensor<2x?x?x5xf32> return %1 : tensor<2x?x?x5xf32> } @@ -79,28 +79,28 @@ // Manipulate the weight matrix to handle striding. // CHECK-DAG: %[[PADV:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[PADW:.+]] = "tosa.pad"(%arg1, %[[PADV]]) <{quantization_info = #tosa.pad_quant} - // CHECK-DAG: %[[RESW1:.+]] = "tosa.reshape"(%[[PADW]]) <{new_shape = array} - // CHECK-DAG: %[[TRANS:.+]] = "tosa.transpose"(%[[RESW1]], %[[TRANSV]]) - // CHECK-DAG: %[[RESW2:.+]] = "tosa.reshape"(%[[TRANS]]) <{new_shape = array} - // CHECK-DAG: %[[REV1:.+]] = "tosa.reverse"(%[[RESW2]]) <{axis = 1 : i64} - // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64} + // CHECK-DAG: %[[PADW:.+]] = tosa.pad %arg1, %[[PADV]] {quantization_info = #tosa.pad_quant} + // CHECK-DAG: %[[RESW1:.+]] = tosa.reshape %[[PADW]] {new_shape = array} + // CHECK-DAG: %[[TRANS:.+]] = tosa.transpose %[[RESW1]], %[[TRANSV]] + // CHECK-DAG: %[[RESW2:.+]] = tosa.reshape %[[TRANS]] {new_shape = array} + // CHECK-DAG: %[[REV1:.+]] = tosa.reverse %[[RESW2]] {axis = 1 : i64} + // CHECK-DAG: %[[NEWWEIGHT:.+]] = tosa.reverse %[[REV1]] {axis = 2 : i64} // Pad out the input matrix to handle the transpose conv. // CHECK-DAG: %[[PAD:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANS2:.+]] = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]]) <{quantization_info = #tosa.pad_quant} + // CHECK-DAG: %[[NEWINPUT:.+]] = tosa.pad %arg0, %[[PAD]] {quantization_info = #tosa.pad_quant} // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() <{value = dense<0> : tensor<30xi32>} - // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) <{dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} - // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) <{new_shape = array} - // CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]]) - // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) <{new_shape = array} - // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) <{size = array, start = array} - // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %[[RESHAPE_ARG2]]) - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, quantization_info = #tosa.conv_quant, out_shape = array, stride = array} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32> + // CHECK-DAG: %[[CONV:.+]] = tosa.conv2d %[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]] {dilation = array, pad = array, quantization_info = #tosa.conv_quant, stride = array} + // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK-DAG: %[[TRANS_OUT:.+]] = tosa.transpose %[[RESHAPE_OUT_1]], %[[TRANS2]] + // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = tosa.reshape %[[TRANS_OUT]] {new_shape = array} + // CHECK-DAG: %[[SLICE:.+]] = tosa.slice %[[RESHAPE_OUT_2]] {size = array, start = array} + // CHECK-DAG: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[ADD:.+]] = tosa.add %[[SLICE]], %[[RESHAPE_ARG2]] + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, quantization_info = #tosa.conv_quant, out_shape = array, stride = array} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32> return %0 : tensor<2x35x47x5xi32> } @@ -117,25 +117,25 @@ // CHECK: %[[RESULT_PERMS:.+]] = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} // CHECK: %[[RESULT_PAD:.+]] = "tosa.const"() // CHECK-SAME{literal}: value = dense<[[0, 0], [2, 0], [0, 0], [0, 0]]> : tensor<4x2xi32>} - // CHECK: %[[PAD_WEIGHT:.+]] = "tosa.pad"(%arg1, %[[WEIGHT_PAD]]) <{quantization_info = #tosa.pad_quant} - // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = "tosa.reshape"(%[[PAD_WEIGHT]]) <{new_shape = array} - // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = "tosa.transpose"(%[[RESHAPE_WEIGHT_0]], %[[WEIGHT_PERMS]]) - // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_WEIGHT]]) <{new_shape = array} - // CHECK: %[[REVERSE:.+]] = "tosa.reverse"(%[[RESHAPE_WEIGHT_1]]) <{axis = 1 : i64} - // CHECK: %[[PAD_INPUT:.+]] = "tosa.pad"(%arg0, %[[INPUT_PAD]]) <{quantization_info = #tosa.pad_quant} - // CHECK: %[[CONV:.+]] = "tosa.conv2d"(%[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]]) + // CHECK: %[[PAD_WEIGHT:.+]] = tosa.pad %arg1, %[[WEIGHT_PAD]] {quantization_info = #tosa.pad_quant} + // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = tosa.reshape %[[PAD_WEIGHT]] {new_shape = array} + // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = tosa.transpose %[[RESHAPE_WEIGHT_0]], %[[WEIGHT_PERMS]] + // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = tosa.reshape %[[TRANSPOSE_WEIGHT]] {new_shape = array} + // CHECK: %[[REVERSE:.+]] = tosa.reverse %[[RESHAPE_WEIGHT_1]] {axis = 1 : i64} + // CHECK: %[[PAD_INPUT:.+]] = tosa.pad %arg0, %[[INPUT_PAD]] {quantization_info = #tosa.pad_quant} + // CHECK: %[[CONV:.+]] = tosa.conv2d %[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]] // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant, stride = [1, 1]} - // CHECK: %[[RESHAPE_RESULT_0:.+]] = "tosa.reshape"(%[[CONV]]) <{new_shape = array} - // CHECK: %[[TRANSPOSE_RESULT:.+]] = "tosa.transpose"(%[[RESHAPE_RESULT_0]], %[[RESULT_PERMS]]) - // CHECK: %[[RESHAPE_RESULT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_RESULT]]) <{new_shape = array} - // CHECK: %[[PAD_RESULT:.+]] = "tosa.pad"(%[[RESHAPE_RESULT_1]], %[[RESULT_PAD]]) - // CHECK: %[[RESHAPE_ARG2:.+]] = "tosa.reshape"(%arg2) <{new_shape = array} - // CHECK: %[[ADD:.+]] = "tosa.add"(%[[PAD_RESULT]], %[[RESHAPE_ARG2]]) - %2 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) { + // CHECK: %[[RESHAPE_RESULT_0:.+]] = tosa.reshape %[[CONV]] {new_shape = array} + // CHECK: %[[TRANSPOSE_RESULT:.+]] = tosa.transpose %[[RESHAPE_RESULT_0]], %[[RESULT_PERMS]] + // CHECK: %[[RESHAPE_RESULT_1:.+]] = tosa.reshape %[[TRANSPOSE_RESULT]] {new_shape = array} + // CHECK: %[[PAD_RESULT:.+]] = tosa.pad %[[RESHAPE_RESULT_1]], %[[RESULT_PAD]] + // CHECK: %[[RESHAPE_ARG2:.+]] = tosa.reshape %arg2 {new_shape = array} + // CHECK: %[[ADD:.+]] = tosa.add %[[PAD_RESULT]], %[[RESHAPE_ARG2]] + %2 = tosa.transpose_conv2d %arg0, %arg1, %arg2 { out_pad = array, out_shape = array, stride = array, quantization_info = #tosa.conv_quant} : - (tensor<1x16x1x1xi8>, tensor<1x2x1x1xi8>, tensor<1xi32>) -> (tensor<1x19x2x1xi32>) + (tensor<1x16x1x1xi8>, tensor<1x2x1x1xi8>, tensor<1xi32>) -> tensor<1x19x2x1xi32> "func.return" (%2) : (tensor<1x19x2x1xi32>) -> () } diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -2,9 +2,9 @@ // CHECK-LABEL: @test_return func.func @test_return(%arg0 : tensor<4xf32>) -> tensor<*xf32> { - // CHECK: [[LOG:%.+]] = "tosa.log"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> + // CHECK: [[LOG:%.+]] = tosa.log %arg0 : (tensor<4xf32>) -> tensor<4xf32> // CHECK: tensor.cast [[LOG]] : tensor<4xf32> to tensor<*xf32> - %0 = "tosa.log"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + %0 = tosa.log %arg0 : (tensor<4xf32>) -> tensor<*xf32> return %0 : tensor<*xf32> } @@ -12,14 +12,14 @@ // CHECK-LABEL: @test_multiple func.func @test_multiple(%arg0 : tensor<4xf32>, %arg1 : tensor<1xf32>, %arg2 : tensor) -> tensor<*xf32> { - // CHECK: [[ADD:%.+]] = "tosa.add"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: [[ADD:%.+]] = tosa.add %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: [[LOG:%.+]] = "tosa.log"(%0) : (tensor<4xf32>) -> tensor<4xf32> - %1 = "tosa.log"(%0) : (tensor<*xf32>) -> tensor<*xf32> + // CHECK: [[LOG:%.+]] = tosa.log %0 : (tensor<4xf32>) -> tensor<4xf32> + %1 = tosa.log %0 : (tensor<*xf32>) -> tensor<*xf32> - // CHECK: [[SUB:%.+]] = "tosa.sub"(%0, %arg2) : (tensor<4xf32>, tensor) -> tensor<4xf32> - %2 = "tosa.sub"(%0, %arg2) : (tensor<*xf32>, tensor) -> tensor<*xf32> + // CHECK: [[SUB:%.+]] = tosa.sub %0, %arg2 : (tensor<4xf32>, tensor) -> tensor<4xf32> + %2 = tosa.sub %0, %arg2 : (tensor<*xf32>, tensor) -> tensor<*xf32> return %0 : tensor<*xf32> } @@ -27,47 +27,47 @@ // CHECK-LABEL: @test_unary_f32 func.func @test_unary_f32(%arg0 : tensor<4xf32>) -> () { - // CHECK: "tosa.abs"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %0 = "tosa.abs"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.abs %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %0 = tosa.abs %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.ceil"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %1 = "tosa.ceil"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.ceil %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %1 = tosa.ceil %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.clamp"(%arg0) {{.+}} : (tensor<4xf32>) -> tensor<4xf32> - %2 = "tosa.clamp"(%arg0) { max_int = 10 : i64, min_int = 0 : i64, min_fp = 0.0 : f32, max_fp = 10.0 : f32 } : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.clamp %arg0 {{.+}} : (tensor<4xf32>) -> tensor<4xf32> + %2 = tosa.clamp %arg0 { max_int = 10 : i64, min_int = 0 : i64, min_fp = 0.0 : f32, max_fp = 10.0 : f32 } : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.exp"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %3 = "tosa.exp"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.exp %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %3 = tosa.exp %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.floor"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %4 = "tosa.floor"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.floor %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %4 = tosa.floor %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.log"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %5 = "tosa.log"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.log %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %5 = tosa.log %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.negate"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %6 = "tosa.negate"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.negate %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %6 = tosa.negate %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.reciprocal"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %7 = "tosa.reciprocal"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.reciprocal %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %7 = tosa.reciprocal %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.reverse"(%arg0) <{axis = 0 : i64}> : (tensor<4xf32>) -> tensor<4xf32> - %8 = "tosa.reverse"(%arg0) { axis = 0 : i64 } : (tensor<4xf32>) -> tensor + // CHECK: tosa.reverse %arg0 {axis = 0 : i64} : (tensor<4xf32>) -> tensor<4xf32> + %8 = tosa.reverse %arg0 { axis = 0 : i64 } : (tensor<4xf32>) -> tensor - // CHECK: "tosa.rsqrt"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %9 = "tosa.rsqrt"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.rsqrt %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %9 = tosa.rsqrt %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.tanh"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %10 = "tosa.tanh"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.tanh %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %10 = tosa.tanh %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.sigmoid"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %11 = "tosa.sigmoid"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.sigmoid %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %11 = tosa.sigmoid %arg0 : (tensor<4xf32>) -> tensor<*xf32> - // CHECK: "tosa.cast"(%arg0) : (tensor<4xf32>) -> tensor<4xi32> - %12 = "tosa.cast"(%arg0) : (tensor<4xf32>) -> tensor<*xi32> + // CHECK: tosa.cast %arg0 : (tensor<4xf32>) -> tensor<4xi32> + %12 = tosa.cast %arg0 : (tensor<4xf32>) -> tensor<*xi32> - // CHECK: "tosa.erf"(%arg0) : (tensor<4xf32>) -> tensor<4xf32> - %13 = "tosa.erf"(%arg0) : (tensor<4xf32>) -> tensor<*xf32> + // CHECK: tosa.erf %arg0 : (tensor<4xf32>) -> tensor<4xf32> + %13 = tosa.erf %arg0 : (tensor<4xf32>) -> tensor<*xf32> return } @@ -75,29 +75,29 @@ // CHECK-LABEL: @test_unary_i32 func.func @test_unary_i32(%arg0 : tensor<4xi32>) -> () { - // CHECK: "tosa.abs"(%arg0) : (tensor<4xi32>) -> tensor<4xi32> - %0 = "tosa.abs"(%arg0) : (tensor<4xi32>) -> tensor<*xi32> + // CHECK: tosa.abs %arg0 : (tensor<4xi32>) -> tensor<4xi32> + %0 = tosa.abs %arg0 : (tensor<4xi32>) -> tensor<*xi32> - // CHECK: "tosa.bitwise_not"(%arg0) : (tensor<4xi32>) -> tensor<4xi32> - %1 = "tosa.bitwise_not"(%arg0) : (tensor<4xi32>) -> tensor<*xi32> + // CHECK: tosa.bitwise_not %arg0 : (tensor<4xi32>) -> tensor<4xi32> + %1 = tosa.bitwise_not %arg0 : (tensor<4xi32>) -> tensor<*xi32> - // CHECK: "tosa.clamp"(%arg0) {{.+}} : (tensor<4xi32>) -> tensor<4xi32> - %2 = "tosa.clamp"(%arg0) { max_int = 10 : i64, min_int = 0 : i64, min_fp = 0.0 : f32, max_fp = 10.0 : f32 } : (tensor<4xi32>) -> tensor<*xi32> + // CHECK: tosa.clamp %arg0 {{.+}} : (tensor<4xi32>) -> tensor<4xi32> + %2 = tosa.clamp %arg0 { max_int = 10 : i64, min_int = 0 : i64, min_fp = 0.0 : f32, max_fp = 10.0 : f32 } : (tensor<4xi32>) -> tensor<*xi32> - // CHECK: "tosa.clz"(%arg0) : (tensor<4xi32>) -> tensor<4xi32> - %3 = "tosa.clz"(%arg0) : (tensor<4xi32>) -> tensor<*xi32> + // CHECK: tosa.clz %arg0 : (tensor<4xi32>) -> tensor<4xi32> + %3 = tosa.clz %arg0 : (tensor<4xi32>) -> tensor<*xi32> - // CHECK: "tosa.negate"(%arg0) : (tensor<4xi32>) -> tensor<4xi32> - %4 = "tosa.negate"(%arg0) : (tensor<4xi32>) -> tensor<*xi32> + // CHECK: tosa.negate %arg0 : (tensor<4xi32>) -> tensor<4xi32> + %4 = tosa.negate %arg0 : (tensor<4xi32>) -> tensor<*xi32> - // CHECK: "tosa.reverse"(%arg0) <{axis = 0 : i64}> : (tensor<4xi32>) -> tensor<4xi32> - %5 = "tosa.reverse"(%arg0) { axis = 0 : i64 } : (tensor<4xi32>) -> tensor + // CHECK: tosa.reverse %arg0 {axis = 0 : i64} : (tensor<4xi32>) -> tensor<4xi32> + %5 = tosa.reverse %arg0 { axis = 0 : i64 } : (tensor<4xi32>) -> tensor - // CHECK: "tosa.rescale"(%arg0) {{.+}} : (tensor<4xi32>) -> tensor<4xi16> - %6 = "tosa.rescale"(%arg0) {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<4xi32>) -> (tensor<*xi16>) + // CHECK: tosa.rescale %arg0 {{.+}} : (tensor<4xi32>) -> tensor<4xi16> + %6 = tosa.rescale %arg0 {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = array, shift = array, scale32 = false, double_round = false, per_channel = false} : (tensor<4xi32>) -> tensor<*xi16> - // CHECK: "tosa.identity"(%arg0) : (tensor<4xi32>) -> tensor<4xi32> - %7 = "tosa.identity"(%arg0) : (tensor<4xi32>) -> tensor + // CHECK: tosa.identity %arg0 : (tensor<4xi32>) -> tensor<4xi32> + %7 = tosa.identity %arg0 : (tensor<4xi32>) -> tensor return } @@ -105,32 +105,32 @@ // CHECK-LABEL: @test_binary_scalar_f32 func.func @test_binary_scalar_f32(%arg0 : tensor<4xf32>, %arg1 : tensor) -> () { - // CHECK: "tosa.add"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xf32> + // CHECK: tosa.add %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xf32> - // CHECK: "tosa.maximum"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xf32> - %1 = "tosa.maximum"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xf32> + // CHECK: tosa.maximum %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xf32> + %1 = tosa.maximum %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xf32> - // CHECK: "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xf32> - %2 = "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xf32> + // CHECK: tosa.minimum %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xf32> + %2 = tosa.minimum %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xf32> - // CHECK: "tosa.mul"(%arg0, %arg1) <{shift = 0 : i32}> : (tensor<4xf32>, tensor) -> tensor<4xf32> - %3 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xf32>, tensor) -> tensor<*xf32> + // CHECK: tosa.mul %arg0, %arg1 {shift = 0 : i32} : (tensor<4xf32>, tensor) -> tensor<4xf32> + %3 = tosa.mul %arg0, %arg1 { shift = 0 : i32 } : (tensor<4xf32>, tensor) -> tensor<*xf32> - // CHECK: "tosa.pow"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xf32> - %4 = "tosa.pow"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xf32> + // CHECK: tosa.pow %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xf32> + %4 = tosa.pow %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xf32> - // CHECK: "tosa.sub"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xf32> - %5 = "tosa.sub"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xf32> + // CHECK: tosa.sub %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xf32> + %5 = tosa.sub %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xf32> - // CHECK: "tosa.equal"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xi1> - %6 = "tosa.equal"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xi1> + // CHECK: tosa.equal %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xi1> + %6 = tosa.equal %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xi1> - // CHECK: "tosa.greater"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xi1> - %7 = "tosa.greater"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xi1> + // CHECK: tosa.greater %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xi1> + %7 = tosa.greater %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xi1> - // CHECK: "tosa.greater_equal"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<4xi1> - %8 = "tosa.greater_equal"(%arg0, %arg1) : (tensor<4xf32>, tensor) -> tensor<*xi1> + // CHECK: tosa.greater_equal %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<4xi1> + %8 = tosa.greater_equal %arg0, %arg1 : (tensor<4xf32>, tensor) -> tensor<*xi1> return } @@ -139,32 +139,32 @@ // CHECK-LABEL: @test_binary_broadcast_f32 func.func @test_binary_broadcast_f32(%arg0 : tensor<4xf32>, %arg1 : tensor<1xf32>) -> () { - // CHECK: "tosa.add"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: tosa.add %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %0 = tosa.add %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: "tosa.maximum"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %1 = "tosa.maximum"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: tosa.maximum %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %1 = tosa.maximum %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %2 = "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: tosa.minimum %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %2 = tosa.minimum %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: "tosa.mul"(%arg0, %arg1) <{shift = 0 : i32}> : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %3 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 } : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: tosa.mul %arg0, %arg1 {shift = 0 : i32} : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %3 = tosa.mul %arg0, %arg1 { shift = 0 : i32 } : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: "tosa.pow"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %4 = "tosa.pow"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: tosa.pow %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %4 = tosa.pow %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: "tosa.sub"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> - %5 = "tosa.sub"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> + // CHECK: tosa.sub %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32> + %5 = tosa.sub %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32> - // CHECK: "tosa.equal"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xi1> - %6 = "tosa.equal"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xi1> + // CHECK: tosa.equal %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xi1> + %6 = tosa.equal %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xi1> - // CHECK: "tosa.greater"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xi1> - %7 = "tosa.greater"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xi1> + // CHECK: tosa.greater %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xi1> + %7 = tosa.greater %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xi1> - // CHECK: "tosa.greater_equal"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xi1> - %8 = "tosa.greater_equal"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xi1> + // CHECK: tosa.greater_equal %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xi1> + %8 = tosa.greater_equal %arg0, %arg1 : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xi1> return } @@ -173,47 +173,47 @@ // CHECK-LABEL: @test_binary_i32 func.func @test_binary_i32(%arg0 : tensor<4xi32>, %arg1 : tensor) -> () { - // CHECK: "tosa.add"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %0 = "tosa.add"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.add %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %0 = tosa.add %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.bitwise_and"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %1 = "tosa.bitwise_and"(%arg0, %arg1): (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.bitwise_and %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %1 = tosa.bitwise_and %arg0, %arg1: (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.bitwise_or"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %2 = "tosa.bitwise_or"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.bitwise_or %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %2 = tosa.bitwise_or %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.bitwise_xor"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %3 = "tosa.bitwise_xor"(%arg0, %arg1): (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.bitwise_xor %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %3 = tosa.bitwise_xor %arg0, %arg1: (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.equal"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi1> - %4 = "tosa.equal"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi1> + // CHECK: tosa.equal %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi1> + %4 = tosa.equal %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi1> - // CHECK: "tosa.greater"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi1> - %5 = "tosa.greater"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi1> + // CHECK: tosa.greater %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi1> + %5 = tosa.greater %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi1> - // CHECK: "tosa.greater_equal"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi1> - %6 = "tosa.greater_equal"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi1> + // CHECK: tosa.greater_equal %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi1> + %6 = tosa.greater_equal %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi1> - // CHECK: "tosa.logical_left_shift"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xi32>, tensor) -> tensor<4xi32> - %7 = "tosa.logical_left_shift"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.logical_left_shift %arg0, %arg1 {shift = 0 : i32} : (tensor<4xi32>, tensor) -> tensor<4xi32> + %7 = tosa.logical_left_shift %arg0, %arg1 { shift = 0 : i32 }: (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.logical_right_shift"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xi32>, tensor) -> tensor<4xi32> - %8 = "tosa.logical_right_shift"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.logical_right_shift %arg0, %arg1 {shift = 0 : i32} : (tensor<4xi32>, tensor) -> tensor<4xi32> + %8 = tosa.logical_right_shift %arg0, %arg1 { shift = 0 : i32 }: (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.maximum"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %9 = "tosa.maximum"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.maximum %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %9 = tosa.maximum %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.minimum"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %10 = "tosa.minimum"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.minimum %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %10 = tosa.minimum %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.mul"(%arg0, %arg1) <{shift = 0 : i32}> : (tensor<4xi32>, tensor) -> tensor<4xi32> - %11 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.mul %arg0, %arg1 {shift = 0 : i32} : (tensor<4xi32>, tensor) -> tensor<4xi32> + %11 = tosa.mul %arg0, %arg1 { shift = 0 : i32 }: (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.pow"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %12 = "tosa.pow"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.pow %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %12 = tosa.pow %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.sub"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<4xi32> - %13 = "tosa.sub"(%arg0, %arg1) : (tensor<4xi32>, tensor) -> tensor<*xi32> + // CHECK: tosa.sub %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<4xi32> + %13 = tosa.sub %arg0, %arg1 : (tensor<4xi32>, tensor) -> tensor<*xi32> return } @@ -222,14 +222,14 @@ // CHECK-LABEL: @test_binary_i1 func.func @test_binary_i1(%arg0 : tensor<4xi1>, %arg1 : tensor) -> () { - // CHECK "tosa.logical_and"(%arg0, %arg1) : (tensor<4xi1>, tensor) -> tensor<4xi1> - %0 = "tosa.logical_and"(%arg0, %arg1): (tensor<4xi1>, tensor) -> tensor<*xi1> + // CHECK tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor) -> tensor<4xi1> + %0 = tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor) -> tensor<*xi1> - // CHECK "tosa.logical_or"(%arg0, %arg1) : (tensor<4xi1>, tensor) -> tensor<4xi1> - %1 = "tosa.logical_or"(%arg0, %arg1): (tensor<4xi1>, tensor) -> tensor<*xi1> + // CHECK tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor) -> tensor<4xi1> + %1 = tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor) -> tensor<*xi1> - // CHECK "tosa.logical_xor"(%arg0, %arg1) : (tensor<4xi1>, tensor) -> tensor<*4i1> - %2 = "tosa.logical_xor"(%arg0, %arg1): (tensor<4xi1>, tensor) -> tensor<*xi1> + // CHECK tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor) -> tensor<*4i1> + %2 = tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor) -> tensor<*xi1> return } @@ -238,8 +238,8 @@ // CHECK-LABEL: @test_select_i32 func.func @test_select_i32(%arg0 : tensor<4xi1>, %arg1 : tensor, %arg2 : tensor<4xi32>) -> () { - // CHECK: "tosa.select"(%arg0, %arg1, %arg2) : (tensor<4xi1>, tensor, tensor<4xi32>) -> tensor<4xi32> - %0 = "tosa.select"(%arg0, %arg1, %arg2): (tensor<4xi1>, tensor, tensor<4xi32>) -> tensor<*xi32> + // CHECK: tosa.select %arg0, %arg1, %arg2 : (tensor<4xi1>, tensor, tensor<4xi32>) -> tensor<4xi32> + %0 = tosa.select %arg0, %arg1, %arg2 : (tensor<4xi1>, tensor, tensor<4xi32>) -> tensor<*xi32> return } @@ -248,11 +248,11 @@ // CHECK-LABEL: @test_static_argmax func.func @test_static_argmax(%arg0 : tensor<2x3xi32>) -> () { - // CHECK: "tosa.argmax"(%arg0) <{axis = 0 : i64}> : (tensor<2x3xi32>) -> tensor<3xi32> - %0 = "tosa.argmax"(%arg0) {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor + // CHECK: tosa.argmax %arg0 {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor<3xi32> + %0 = tosa.argmax %arg0 {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor - // CHECK: "tosa.argmax"(%arg0) <{axis = 1 : i64}> : (tensor<2x3xi32>) -> tensor<2xi32> - %1 = "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<2x3xi32>) -> tensor + // CHECK: tosa.argmax %arg0 {axis = 1 : i64} : (tensor<2x3xi32>) -> tensor<2xi32> + %1 = tosa.argmax %arg0 {axis = 1 : i64} : (tensor<2x3xi32>) -> tensor return } @@ -260,11 +260,11 @@ // CHECK-LABEL: @test_dynamic_argmax func.func @test_dynamic_argmax(%arg0 : tensor<2x?xi32>) -> () { - // CHECK: "tosa.argmax"(%arg0) <{axis = 0 : i64}> : (tensor<2x?xi32>) -> tensor - %0 = "tosa.argmax"(%arg0) {axis = 0 : i64} : (tensor<2x?xi32>) -> tensor + // CHECK: tosa.argmax %arg0 {axis = 0 : i64} : (tensor<2x?xi32>) -> tensor + %0 = tosa.argmax %arg0 {axis = 0 : i64} : (tensor<2x?xi32>) -> tensor - // CHECK: "tosa.argmax"(%arg0) <{axis = 1 : i64}> : (tensor<2x?xi32>) -> tensor<2xi32> - %1 = "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<2x?xi32>) -> tensor + // CHECK: tosa.argmax %arg0 {axis = 1 : i64} : (tensor<2x?xi32>) -> tensor<2xi32> + %1 = tosa.argmax %arg0 {axis = 1 : i64} : (tensor<2x?xi32>) -> tensor return } @@ -272,8 +272,8 @@ // CHECK-LABEL: @test_static_fully_connected func.func @test_static_fully_connected(%arg0 : tensor<3x4xf32>, %arg1 : tensor<5x4xf32>, %arg2 : tensor<5xf32>) -> () { - // CHECK: "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<3x4xf32>, tensor<5x4xf32>, tensor<5xf32>) -> tensor<3x5xf32> - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<3x4xf32>, tensor<5x4xf32>, tensor<5xf32>) -> tensor + // CHECK: tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<3x4xf32>, tensor<5x4xf32>, tensor<5xf32>) -> tensor<3x5xf32> + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<3x4xf32>, tensor<5x4xf32>, tensor<5xf32>) -> tensor return } @@ -281,8 +281,8 @@ // CHECK-LABEL: @test_static_input_fully_connected func.func @test_static_input_fully_connected(%arg0 : tensor<3x4xf32>, %arg1 : tensor, %arg2 : tensor) -> () { - // CHECK: "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<3x4xf32>, tensor, tensor) -> tensor<3x?xf32> - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<3x4xf32>, tensor, tensor) -> tensor + // CHECK: tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<3x4xf32>, tensor, tensor) -> tensor<3x?xf32> + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<3x4xf32>, tensor, tensor) -> tensor return } @@ -290,8 +290,8 @@ // CHECK-LABEL: @test_static_weight_fully_connected func.func @test_static_weight_fully_connected(%arg0 : tensor, %arg1 : tensor<5x4xf32>, %arg2 : tensor) -> () { - // CHECK: "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor, tensor<5x4xf32>, tensor) -> tensor - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor, tensor<5x4xf32>, tensor) -> tensor + // CHECK: tosa.fully_connected %arg0, %arg1, %arg2 : (tensor, tensor<5x4xf32>, tensor) -> tensor + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor, tensor<5x4xf32>, tensor) -> tensor return } @@ -299,8 +299,8 @@ // CHECK-LABEL: @test_static_bias_fully_connected func.func @test_static_bias_fully_connected(%arg0 : tensor, %arg1 : tensor, %arg2 : tensor<5xf32>) -> () { - // CHECK: "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor<5xf32>) -> tensor - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor<5xf32>) -> tensor + // CHECK: tosa.fully_connected %arg0, %arg1, %arg2 : (tensor, tensor, tensor<5xf32>) -> tensor + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor, tensor, tensor<5xf32>) -> tensor return } @@ -308,8 +308,8 @@ // CHECK-LABEL: @test_static_out_fully_connected func.func @test_static_out_fully_connected(%arg0 : tensor<3x?xf32>, %arg1 : tensor, %arg2 : tensor<5xf32>) -> () { - // CHECK: "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<3x?xf32>, tensor, tensor<5xf32>) -> tensor<3x5xf32> - %0 = "tosa.fully_connected"(%arg0, %arg1, %arg2) : (tensor<3x?xf32>, tensor, tensor<5xf32>) -> tensor + // CHECK: tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<3x?xf32>, tensor, tensor<5xf32>) -> tensor<3x5xf32> + %0 = tosa.fully_connected %arg0, %arg1, %arg2 : (tensor<3x?xf32>, tensor, tensor<5xf32>) -> tensor return } @@ -317,8 +317,8 @@ // CHECK-LABEL: @test_static_matmul func.func @test_static_matmul(%arg0 : tensor<2x3x4xi32>, %arg1 : tensor<2x4x5xi32>) -> () { - // CHECK: "tosa.matmul"(%arg0, %arg1) : (tensor<2x3x4xi32>, tensor<2x4x5xi32>) -> tensor<2x3x5xi32> - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor<2x3x4xi32>, tensor<2x4x5xi32>) -> tensor + // CHECK: tosa.matmul %arg0, %arg1 : (tensor<2x3x4xi32>, tensor<2x4x5xi32>) -> tensor<2x3x5xi32> + %0 = tosa.matmul %arg0, %arg1 : (tensor<2x3x4xi32>, tensor<2x4x5xi32>) -> tensor return } @@ -327,8 +327,8 @@ // CHECK-LABEL: @test_dynamic_lhs_matmul func.func @test_dynamic_lhs_matmul(%arg0 : tensor, %arg1 : tensor<2x4x5xi32>) -> () { - // CHECK: "tosa.matmul"(%arg0, %arg1) : (tensor, tensor<2x4x5xi32>) -> tensor<2x?x5xi32> - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor, tensor<2x4x5xi32>) -> tensor + // CHECK: tosa.matmul %arg0, %arg1 : (tensor, tensor<2x4x5xi32>) -> tensor<2x?x5xi32> + %0 = tosa.matmul %arg0, %arg1 : (tensor, tensor<2x4x5xi32>) -> tensor return } @@ -337,8 +337,8 @@ // CHECK-LABEL: @test_dynamic_rhs_matmul func.func @test_dynamic_rhs_matmul(%arg0 : tensor<2x3x4xi32>, %arg1 : tensor) -> () { - // CHECK: "tosa.matmul"(%arg0, %arg1) : (tensor<2x3x4xi32>, tensor) -> tensor<2x3x?xi32> - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor<2x3x4xi32>, tensor) -> tensor + // CHECK: tosa.matmul %arg0, %arg1 : (tensor<2x3x4xi32>, tensor) -> tensor<2x3x?xi32> + %0 = tosa.matmul %arg0, %arg1 : (tensor<2x3x4xi32>, tensor) -> tensor return } @@ -347,8 +347,8 @@ // CHECK-LABEL: @test_dynamic_mixed_matmul func.func @test_dynamic_mixed_matmul(%arg0 : tensor, %arg1 : tensor) -> () { - // CHECK: "tosa.matmul"(%arg0, %arg1) : (tensor, tensor) -> tensor - %0 = "tosa.matmul"(%arg0, %arg1) : (tensor, tensor) -> tensor + // CHECK: tosa.matmul %arg0, %arg1 : (tensor, tensor) -> tensor + %0 = tosa.matmul %arg0, %arg1 : (tensor, tensor) -> tensor return } @@ -357,8 +357,8 @@ // CHECK-LABEL: @test_table_static func.func @test_table_static(%arg0 : tensor<4x5xi16>, %arg1 : tensor<513xi16>) -> () { - // CHECK:"tosa.table"(%arg0, %arg1) : (tensor<4x5xi16>, tensor<513xi16>) -> tensor<4x5xi16> - %0 = "tosa.table"(%arg0, %arg1) : (tensor<4x5xi16>, tensor<513xi16>) -> tensor + // CHECK:tosa.table %arg0, %arg1 : (tensor<4x5xi16>, tensor<513xi16>) -> tensor<4x5xi16> + %0 = tosa.table %arg0, %arg1 : (tensor<4x5xi16>, tensor<513xi16>) -> tensor return } @@ -366,8 +366,8 @@ // CHECK-LABEL: @test_table_dynamic func.func @test_table_dynamic(%arg0 : tensor<4x?xi16>, %arg1 : tensor<513xi16>) -> () { - // CHECK:"tosa.table"(%arg0, %arg1) : (tensor<4x?xi16>, tensor<513xi16>) -> tensor<4x?xi16> - %0 = "tosa.table"(%arg0, %arg1) : (tensor<4x?xi16>, tensor<513xi16>) -> tensor + // CHECK:tosa.table %arg0, %arg1 : (tensor<4x?xi16>, tensor<513xi16>) -> tensor<4x?xi16> + %0 = tosa.table %arg0, %arg1 : (tensor<4x?xi16>, tensor<513xi16>) -> tensor return } @@ -375,14 +375,14 @@ // CHECK-LABEL: @test_static_reshape func.func @test_static_reshape(%arg0 : tensor<4x4xi32>) -> () { - // CHECK: "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<4x4xi32>) -> tensor<16xi32> - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<16xi32> + %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor - // CHECK: "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<4x4xi32>) -> tensor<16xi32> - %1 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<16xi32> + %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor - // CHECK: "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<4x4xi32>) -> tensor<2x8xi32> - %2 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<4x4xi32>) -> tensor + // CHECK: tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor<2x8xi32> + %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x4xi32>) -> tensor return } @@ -390,14 +390,14 @@ // CHECK-LABEL: @test_dynamic_reshape func.func @test_dynamic_reshape(%arg0 : tensor<4x?xi32>) -> () { - // CHECK: %0 = "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<4x?xi32>) -> tensor<16xi32> - %0 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<4x?xi32>) -> tensor + // CHECK: %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor<16xi32> + %0 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - // CHECK: %1 = "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<4x?xi32>) -> tensor - %1 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<4x?xi32>) -> tensor + // CHECK: %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor + %1 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor - // CHECK: %2 = "tosa.reshape"(%arg0) <{new_shape = array}> : (tensor<4x?xi32>) -> tensor<2x?xi32> - %2 = "tosa.reshape"(%arg0) {new_shape = array} : (tensor<4x?xi32>) -> tensor + // CHECK: %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor<2x?xi32> + %2 = tosa.reshape %arg0 {new_shape = array} : (tensor<4x?xi32>) -> tensor return } @@ -406,20 +406,20 @@ // CHECK: @test_reduce_binary func.func @test_reduce_binary(%arg0 : tensor<2x3x?x?xi1>) -> () { - // CHECK: "tosa.reduce_all"(%arg0) <{axis = 0 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1> - %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor + // CHECK: tosa.reduce_all %arg0 {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1> + %0 = tosa.reduce_all %arg0 {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor - // CHECK: "tosa.reduce_all"(%arg0) <{axis = 1 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<2x1x?x?xi1> - %1 = "tosa.reduce_all"(%arg0) {axis = 1 : i64} : (tensor<2x3x?x?xi1>) -> tensor + // CHECK: tosa.reduce_all %arg0 {axis = 1 : i64} : (tensor<2x3x?x?xi1>) -> tensor<2x1x?x?xi1> + %1 = tosa.reduce_all %arg0 {axis = 1 : i64} : (tensor<2x3x?x?xi1>) -> tensor - // CHECK: "tosa.reduce_all"(%arg0) <{axis = 2 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<2x3x1x?xi1> - %2 = "tosa.reduce_all"(%arg0) {axis = 2 : i64} : (tensor<2x3x?x?xi1>) -> tensor + // CHECK: tosa.reduce_all %arg0 {axis = 2 : i64} : (tensor<2x3x?x?xi1>) -> tensor<2x3x1x?xi1> + %2 = tosa.reduce_all %arg0 {axis = 2 : i64} : (tensor<2x3x?x?xi1>) -> tensor - // CHECK: "tosa.reduce_all"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<2x3x?x1xi1> - %3 = "tosa.reduce_all"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xi1>) -> tensor + // CHECK: tosa.reduce_all %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xi1>) -> tensor<2x3x?x1xi1> + %3 = tosa.reduce_all %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xi1>) -> tensor - // CHECK: "tosa.reduce_any"(%arg0) <{axis = 0 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1> - %4 = "tosa.reduce_any"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor + // CHECK: tosa.reduce_any %arg0 {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1> + %4 = tosa.reduce_any %arg0 {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor return } @@ -428,26 +428,26 @@ // CHECK: @test_reduce_float func.func @test_reduce_float(%arg0 : tensor<2x3x?x?xf32>) -> () { - // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 0 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<1x3x?x?xf32> - %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor<2x3x?x?xf32>) -> tensor<1x3x?x?xf32> + %0 = tosa.reduce_sum %arg0 {axis = 0 : i64} : (tensor<2x3x?x?xf32>) -> tensor - // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 1 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x1x?x?xf32> - %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_sum %arg0 {axis = 1 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x1x?x?xf32> + %1 = tosa.reduce_sum %arg0 {axis = 1 : i64} : (tensor<2x3x?x?xf32>) -> tensor - // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 2 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x1x?xf32> - %2 = "tosa.reduce_sum"(%arg0) {axis = 2 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_sum %arg0 {axis = 2 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x1x?xf32> + %2 = tosa.reduce_sum %arg0 {axis = 2 : i64} : (tensor<2x3x?x?xf32>) -> tensor - // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> - %3 = "tosa.reduce_sum"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_sum %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> + %3 = tosa.reduce_sum %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor - // CHECK: "tosa.reduce_max"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> - %4 = "tosa.reduce_max"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_max %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> + %4 = tosa.reduce_max %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor - // CHECK: "tosa.reduce_min"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> - %5 = "tosa.reduce_min"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_min %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> + %5 = tosa.reduce_min %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor - // CHECK: "tosa.reduce_prod"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> - %6 = "tosa.reduce_prod"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor + // CHECK: tosa.reduce_prod %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32> + %6 = tosa.reduce_prod %arg0 {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor return } @@ -456,8 +456,8 @@ // CHECK-LABEL: @test_concat func.func @test_concat(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xf32>) -> () { - // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 0 : i64}> : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor<3x2xf32> - %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor + // CHECK: tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor<3x2xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor return } @@ -466,8 +466,8 @@ // CHECK-LABEL: @test_concat_dynamic func.func @test_concat_dynamic(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x?xf32>) -> () { - // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 0 : i64}> : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor<3x2xf32> - %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor + // CHECK: tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor<3x2xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor return } @@ -476,8 +476,8 @@ // CHECK-LABEL: @test_concat_dynamic_axis func.func @test_concat_dynamic_axis(%arg0 : tensor, %arg1 : tensor<2x2xf32>) -> () { - // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 0 : i64}> : (tensor, tensor<2x2xf32>) -> tensor - %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor, tensor<2x2xf32>) -> tensor + // CHECK: tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor, tensor<2x2xf32>) -> tensor + %0 = tosa.concat %arg0, %arg1 {axis = 0 : i64} : (tensor, tensor<2x2xf32>) -> tensor return } @@ -486,8 +486,8 @@ // CHECK-LABEL: @test_concat_axis_1 func.func @test_concat_axis_1(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>) -> () { - // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 1 : i64}> : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<2x3xf32> - %0 = "tosa.concat"(%arg0, %arg1) {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor + // CHECK: tosa.concat %arg0, %arg1 {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<2x3xf32> + %0 = tosa.concat %arg0, %arg1 {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor return } @@ -496,8 +496,8 @@ // CHECK-LABEL: @test_padding_no_const func.func @test_padding_no_const(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xi32>) -> () { - // CHECK: "tosa.pad"(%arg0, %arg1) : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor - %0 = "tosa.pad"(%arg0, %arg1) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) + // CHECK: tosa.pad %arg0, %arg1 : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor + %0 = tosa.pad %arg0, %arg1 : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor return } @@ -506,8 +506,8 @@ // CHECK-LABEL:@test_padding_dynamic_input func.func @test_padding_dynamic_input(%arg0 : tensor<1x?xf32>) -> () { %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - // CHECK: "tosa.pad"(%arg0, %cst) : (tensor<1x?xf32>, tensor<2x2xi32>) -> tensor<4x?xf32> - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x?xf32>, tensor<2x2xi32>) -> (tensor) + // CHECK: tosa.pad %arg0, %cst : (tensor<1x?xf32>, tensor<2x2xi32>) -> tensor<4x?xf32> + %1 = tosa.pad %arg0, %0 : (tensor<1x?xf32>, tensor<2x2xi32>) -> tensor return } @@ -516,8 +516,8 @@ // CHECK-LABEL: @test_padding_simple func.func @test_padding_simple(%arg0 : tensor<1x2xf32>) -> () { %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - // CHECK: "tosa.pad"(%arg0, %cst) : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor<4x9xf32> - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) + // CHECK: tosa.pad %arg0, %cst : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor<4x9xf32> + %1 = tosa.pad %arg0, %0 : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor return } @@ -525,8 +525,8 @@ // CHECK-LABEL: @test_slice func.func @test_slice(%arg0 : tensor) -> () { - // CHECK: "tosa.slice"(%arg0) <{size = array, start = array}> : (tensor) -> tensor<2xi32> - %0 = "tosa.slice"(%arg0) { size = array, start = array } : (tensor) -> tensor + // CHECK: tosa.slice %arg0 {size = array, start = array} : (tensor) -> tensor<2xi32> + %0 = tosa.slice %arg0 { size = array, start = array } : (tensor) -> tensor return } @@ -534,8 +534,8 @@ // CHECK-LABEL: @test_slice_dynamic func.func @test_slice_dynamic(%arg0 : tensor<10x?x2xf32>) -> () { - // CHECK: "tosa.slice"(%arg0) <{size = array, start = array}> : (tensor<10x?x2xf32>) -> tensor<7x?x1xf32> - %0 = "tosa.slice"(%arg0) {size = array, start = array} : (tensor<10x?x2xf32>) -> tensor + // CHECK: tosa.slice %arg0 {size = array, start = array} : (tensor<10x?x2xf32>) -> tensor<7x?x1xf32> + %0 = tosa.slice %arg0 {size = array, start = array} : (tensor<10x?x2xf32>) -> tensor return } @@ -543,8 +543,8 @@ // CHECK-LABEL: @test_tile func.func @test_tile(%arg0 : tensor<2x3x?xi32>) -> () { - // CHECK: "tosa.tile"(%arg0) <{multiples = array}> : (tensor<2x3x?xi32>) -> tensor<4x3x?xi32> - %0 = "tosa.tile"(%arg0) <{multiples = array}> : (tensor<2x3x?xi32>) -> (tensor) + // CHECK: tosa.tile %arg0 {multiples = array} : (tensor<2x3x?xi32>) -> tensor<4x3x?xi32> + %0 = tosa.tile %arg0 {multiples = array} : (tensor<2x3x?xi32>) -> tensor return } @@ -552,8 +552,8 @@ // CHECK-LABEL: @test_transpose_same func.func @test_transpose_same(%arg0 : tensor<4x4x4xi32>, %arg1 : tensor<3xi32>) -> () { - // CHECK: "tosa.transpose"(%arg0, %arg1) : (tensor<4x4x4xi32>, tensor<3xi32>) -> tensor<4x4x4xi32> - %0 = "tosa.transpose"(%arg0, %arg1) : (tensor<4x4x4xi32>, tensor<3xi32>) -> (tensor) + // CHECK: tosa.transpose %arg0, %arg1 : (tensor<4x4x4xi32>, tensor<3xi32>) -> tensor<4x4x4xi32> + %0 = tosa.transpose %arg0, %arg1 : (tensor<4x4x4xi32>, tensor<3xi32>) -> tensor return } @@ -561,8 +561,8 @@ // CHECK-LABEL: @test_transpose_perm_unknown func.func @test_transpose_perm_unknown(%arg0 : tensor<4x4x5xi32>, %arg1 : tensor<3xi32>) -> () { - // CHECK: "tosa.transpose"(%arg0, %arg1) : (tensor<4x4x5xi32>, tensor<3xi32>) -> tensor - %0 = "tosa.transpose"(%arg0, %arg1) : (tensor<4x4x5xi32>, tensor<3xi32>) -> (tensor) + // CHECK: tosa.transpose %arg0, %arg1 : (tensor<4x4x5xi32>, tensor<3xi32>) -> tensor + %0 = tosa.transpose %arg0, %arg1 : (tensor<4x4x5xi32>, tensor<3xi32>) -> tensor return } @@ -571,8 +571,8 @@ // CHECK-LABEL: @test_transpose_static func.func @test_transpose_static(%arg0 : tensor<3x4x5xi32>) -> () { %0 = arith.constant dense<[2, 1, 0]> : tensor<3xi32> - // CHECK: "tosa.transpose"(%arg0, %cst) : (tensor<3x4x5xi32>, tensor<3xi32>) -> tensor<5x4x3xi32> - %1 = "tosa.transpose"(%arg0, %0) : (tensor<3x4x5xi32>, tensor<3xi32>) -> (tensor) + // CHECK: tosa.transpose %arg0, %cst : (tensor<3x4x5xi32>, tensor<3xi32>) -> tensor<5x4x3xi32> + %1 = tosa.transpose %arg0, %0 : (tensor<3x4x5xi32>, tensor<3xi32>) -> tensor return } @@ -580,8 +580,8 @@ // CHECK-LABEL: @gather_static func.func @gather_static(%arg0 : tensor<3x4x5xi32>, %arg1 : tensor<3x6xi32>) { - // CHECK: "tosa.gather"(%arg0, %arg1) : (tensor<3x4x5xi32>, tensor<3x6xi32>) -> tensor<3x6x5xi32> - %0 = "tosa.gather"(%arg0, %arg1) : (tensor<3x4x5xi32>, tensor<3x6xi32>) -> (tensor) + // CHECK: tosa.gather %arg0, %arg1 : (tensor<3x4x5xi32>, tensor<3x6xi32>) -> tensor<3x6x5xi32> + %0 = tosa.gather %arg0, %arg1 : (tensor<3x4x5xi32>, tensor<3x6xi32>) -> tensor return } @@ -589,8 +589,8 @@ // CHECK-LABEL: @gather_dynamic_values func.func @gather_dynamic_values(%arg0 : tensor, %arg1 : tensor<3x6xi32>) { - // CHECK: "tosa.gather"(%arg0, %arg1) : (tensor, tensor<3x6xi32>) -> tensor<3x6x?xi32> - %0 = "tosa.gather"(%arg0, %arg1) : (tensor, tensor<3x6xi32>) -> (tensor) + // CHECK: tosa.gather %arg0, %arg1 : (tensor, tensor<3x6xi32>) -> tensor<3x6x?xi32> + %0 = tosa.gather %arg0, %arg1 : (tensor, tensor<3x6xi32>) -> tensor return } @@ -598,8 +598,8 @@ // CHECK-LABEL: @gather_dynamic_indices func.func @gather_dynamic_indices(%arg0 : tensor<3x4x5xi32>, %arg1 : tensor) { - // CHECK: "tosa.gather"(%arg0, %arg1) : (tensor<3x4x5xi32>, tensor) -> tensor<3x?x5xi32> - %0 = "tosa.gather"(%arg0, %arg1) : (tensor<3x4x5xi32>, tensor) -> (tensor) + // CHECK: tosa.gather %arg0, %arg1 : (tensor<3x4x5xi32>, tensor) -> tensor<3x?x5xi32> + %0 = tosa.gather %arg0, %arg1 : (tensor<3x4x5xi32>, tensor) -> tensor return } @@ -607,8 +607,8 @@ // CHECK-LABEL: @gather_minimum_info func.func @gather_minimum_info(%arg0 : tensor<3x?x5xi32>, %arg1 : tensor) { - // CHECK: "tosa.gather"(%arg0, %arg1) : (tensor<3x?x5xi32>, tensor) -> tensor<3x6x5xi32> - %0 = "tosa.gather"(%arg0, %arg1) : (tensor<3x?x5xi32>, tensor) -> (tensor) + // CHECK: tosa.gather %arg0, %arg1 : (tensor<3x?x5xi32>, tensor) -> tensor<3x6x5xi32> + %0 = tosa.gather %arg0, %arg1 : (tensor<3x?x5xi32>, tensor) -> tensor return } @@ -616,8 +616,8 @@ // CHECK-LABEL: @scatter_static func.func @scatter_static(%arg0 : tensor<3x4x5xi32>, %arg1 : tensor<3x6xi32>, %arg2 : tensor<3x6x5xi32>) { - // CHECK: "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor<3x4x5xi32>, tensor<3x6xi32>, tensor<3x6x5xi32>) -> tensor<3x4x5xi32> - %0 = "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor<3x4x5xi32>, tensor<3x6xi32>, tensor<3x6x5xi32>) -> (tensor) + // CHECK: tosa.scatter %arg0, %arg1, %arg2 : (tensor<3x4x5xi32>, tensor<3x6xi32>, tensor<3x6x5xi32>) -> tensor<3x4x5xi32> + %0 = tosa.scatter %arg0, %arg1, %arg2 : (tensor<3x4x5xi32>, tensor<3x6xi32>, tensor<3x6x5xi32>) -> tensor return } @@ -625,8 +625,8 @@ // CHECK-LABEL: @scatter_static_values func.func @scatter_static_values(%arg0 : tensor<3x4x5xi32>, %arg1 : tensor, %arg2 : tensor) { - // CHECK: "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor<3x4x5xi32>, tensor, tensor) -> tensor<3x4x5xi32> - %0 = "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor<3x4x5xi32>, tensor, tensor) -> (tensor) + // CHECK: tosa.scatter %arg0, %arg1, %arg2 : (tensor<3x4x5xi32>, tensor, tensor) -> tensor<3x4x5xi32> + %0 = tosa.scatter %arg0, %arg1, %arg2 : (tensor<3x4x5xi32>, tensor, tensor) -> tensor return } @@ -634,8 +634,8 @@ // CHECK-LABEL: @scatter_static_indices func.func @scatter_static_indices(%arg0 : tensor, %arg1 : tensor<3x6xi32>, %arg2 : tensor) { - // CHECK: "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor, tensor<3x6xi32>, tensor) -> tensor<3x?x?xi32> - %0 = "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor, tensor<3x6xi32>, tensor) -> (tensor) + // CHECK: tosa.scatter %arg0, %arg1, %arg2 : (tensor, tensor<3x6xi32>, tensor) -> tensor<3x?x?xi32> + %0 = tosa.scatter %arg0, %arg1, %arg2 : (tensor, tensor<3x6xi32>, tensor) -> tensor return } @@ -643,8 +643,8 @@ // CHECK-LABEL: @scatter_static_input func.func @scatter_static_input(%arg0 : tensor, %arg1 : tensor, %arg2 : tensor<3x6x5xi32>) { - // CHECK: "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor<3x6x5xi32>) -> tensor<3x?x5xi32> - %0 = "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor<3x6x5xi32>) -> (tensor) + // CHECK: tosa.scatter %arg0, %arg1, %arg2 : (tensor, tensor, tensor<3x6x5xi32>) -> tensor<3x?x5xi32> + %0 = tosa.scatter %arg0, %arg1, %arg2 : (tensor, tensor, tensor<3x6x5xi32>) -> tensor return } @@ -652,8 +652,8 @@ // CHECK-LABEL: @scatter_minimum_static func.func @scatter_minimum_static(%arg0 : tensor, %arg1 : tensor<3x?xi32>, %arg2 : tensor) { - // CHECK: "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor, tensor<3x?xi32>, tensor) -> tensor<3x4x5xi32> - %0 = "tosa.scatter"(%arg0, %arg1, %arg2) : (tensor, tensor<3x?xi32>, tensor) -> (tensor) + // CHECK: tosa.scatter %arg0, %arg1, %arg2 : (tensor, tensor<3x?xi32>, tensor) -> tensor<3x4x5xi32> + %0 = tosa.scatter %arg0, %arg1, %arg2 : (tensor, tensor<3x?xi32>, tensor) -> tensor return } @@ -662,10 +662,10 @@ // CHECK-LABEL: @test_pool_static func.func @test_pool_static(%arg0: tensor<3x5x6x7xf32>) { // CHECK: -> tensor<3x2x4x7xf32> - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor // CHECK: -> tensor<3x2x4x7xf32> - %1 = "tosa.max_pool2d"(%arg0) {kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor + %1 = tosa.max_pool2d %arg0 {kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor return } @@ -674,7 +674,7 @@ // CHECK-LABEL: @conv2d_static func.func @conv2d_static(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x6x4x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -683,7 +683,7 @@ // CHECK-LABEL: @conv2d_dynamic_input func.func @conv2d_dynamic_input(%input: tensor, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -692,10 +692,10 @@ // CHECK-LABEL: @test_pool_dynamic_input func.func @test_pool_dynamic_input(%arg0: tensor) { // CHECK: -> tensor - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor) -> tensor + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor) -> tensor // CHECK: -> tensor - %1 = "tosa.max_pool2d"(%arg0) {kernel = array, pad = array, stride = array} : (tensor) -> tensor + %1 = tosa.max_pool2d %arg0 {kernel = array, pad = array, stride = array} : (tensor) -> tensor return } @@ -704,10 +704,10 @@ // CHECK-LABEL: @test_pool_padded func.func @test_pool_padded(%arg0: tensor<3x5x6x7xf32>) { // CHECK: -> tensor<3x5x11x7xf32> - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor // CHECK: -> tensor<3x5x11x7xf32> - %1 = "tosa.max_pool2d"(%arg0) {kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor + %1 = tosa.max_pool2d %arg0 {kernel = array, pad = array, stride = array} : (tensor<3x5x6x7xf32>) -> tensor return } @@ -716,7 +716,7 @@ // CHECK-LABEL: @conv2d_dynamic_weight func.func @conv2d_dynamic_weight(%input: tensor<2x8x9x3xf32>, %weights: tensor, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x?x?x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor, tensor<5xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor, tensor<5xf32>) -> tensor return } @@ -725,7 +725,7 @@ // CHECK-LABEL: @conv2d_dynamic_bias func.func @conv2d_dynamic_bias(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor) -> () { // CHECK: -> tensor<2x6x4x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor return } @@ -734,10 +734,10 @@ // CHECK-LABEL: @test_pool_stride func.func @test_pool_stride(%arg0: tensor<3x11x12x7xf32>) { // CHECK: -> tensor<3x4x4x7xf32> - %0 = "tosa.avg_pool2d"(%arg0) {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<3x11x12x7xf32>) -> tensor + %0 = tosa.avg_pool2d %arg0 {acc_type = f32, kernel = array, pad = array, stride = array} : (tensor<3x11x12x7xf32>) -> tensor // CHECK: -> tensor<3x4x4x7xf32> - %1 = "tosa.max_pool2d"(%arg0) {kernel = array, pad = array, stride = array} : (tensor<3x11x12x7xf32>) -> tensor + %1 = tosa.max_pool2d %arg0 {kernel = array, pad = array, stride = array} : (tensor<3x11x12x7xf32>) -> tensor return } @@ -746,7 +746,7 @@ // CHECK-LABEL: @conv2d_padded func.func @conv2d_padded(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x9x11x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -755,7 +755,7 @@ // CHECK-LABEL: @conv2d_dilated func.func @conv2d_dilated(%input: tensor<2x12x14x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x6x4x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<2x12x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<2x12x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -764,7 +764,7 @@ // CHECK-LABEL: @conv2d_strided func.func @conv2d_strided(%input: tensor<1x13x14x1xf32>, %weights: tensor<1x1x1x1xf32>, %bias: tensor<1xf32>) -> () { // CHECK: -> tensor<1x5x7x1xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = array, stride = array, dilation = array} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> (tensor) + %0 = tosa.conv2d %input, %weights, %bias {pad = array, stride = array, dilation = array} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor return } @@ -773,7 +773,7 @@ // CHECK-LABEL: @conv3d_static func.func @conv3d_static(%input: tensor<2x8x9x10x3xf32>, %weights: tensor<5x3x6x4x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x6x4x7x5xf32> - %0 = "tosa.conv3d"(%input, %weights, %bias) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> (tensor) + %0 = tosa.conv3d %input, %weights, %bias {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor return } @@ -782,7 +782,7 @@ // CHECK-LABEL: @conv3d_dynamic_input func.func @conv3d_dynamic_input(%arg0: tensor, %arg1: tensor<5x3x6x4x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor + %0 = tosa.conv3d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor return } @@ -791,7 +791,7 @@ // CHECK-LABEL: @conv3d_dynamic_weight func.func @conv3d_dynamic_weight(%arg0: tensor<2x8x9x10x3xf32>, %arg1: tensor, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x?x?x?x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor, tensor<5xf32>) -> tensor + %0 = tosa.conv3d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor, tensor<5xf32>) -> tensor return } @@ -800,7 +800,7 @@ // CHECK-LABEL: @conv3d_dynamic_bias func.func @conv3d_dynamic_bias(%arg0: tensor<2x8x9x10x3xf32>, %arg1: tensor<5x3x6x4x3xf32>, %arg2: tensor) { // CHECK: -> tensor<2x6x4x7x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor) -> tensor + %0 = tosa.conv3d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor) -> tensor return } @@ -809,7 +809,7 @@ // CHECK-LABEL: @conv3d_padded func.func @conv3d_padded(%arg0: tensor<2x8x9x10x3xf32>, %arg1: tensor<5x3x6x4x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x9x11x18x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor + %0 = tosa.conv3d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor return } @@ -818,7 +818,7 @@ // CHECK-LABEL: @conv3d_dilated func.func @conv3d_dilated(%arg0: tensor<2x12x14x16x3xf32>, %arg1: tensor<5x3x6x2x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x6x4x12x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x12x14x16x3xf32>, tensor<5x3x6x2x3xf32>, tensor<5xf32>) -> tensor + %0 = tosa.conv3d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x12x14x16x3xf32>, tensor<5x3x6x2x3xf32>, tensor<5xf32>) -> tensor return } @@ -827,7 +827,7 @@ // CHECK-LABEL: @conv3d_strided func.func @conv3d_strided(%arg0: tensor<1x13x14x15x1xf32>, %arg1: tensor<1x1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x5x7x4x1xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<1x13x14x15x1xf32>, tensor<1x1x1x1x1xf32>, tensor<1xf32>) -> tensor + %0 = tosa.conv3d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x13x14x15x1xf32>, tensor<1x1x1x1x1xf32>, tensor<1xf32>) -> tensor return } @@ -836,7 +836,7 @@ // CHECK-LABEL: @depthwise_conv2d_static func.func @depthwise_conv2d_static(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x6x4x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> return } @@ -845,7 +845,7 @@ // CHECK-LABEL: @depthwise_conv2d_dynamic_input func.func @depthwise_conv2d_dynamic_input(%arg0: tensor, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor return } @@ -854,7 +854,7 @@ // CHECK-LABEL: @depthwise_conv2d_dynamic_weight func.func @depthwise_conv2d_dynamic_weight(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x?x?x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor, tensor<15xf32>) -> tensor<2x?x?x15xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor, tensor<15xf32>) -> tensor<2x?x?x15xf32> return } @@ -863,7 +863,7 @@ // CHECK-LABEL: @depthwise_conv2d_dynamic_bias func.func @depthwise_conv2d_dynamic_bias(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor) { // CHECK: -> tensor<2x6x4x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor) -> tensor<2x6x4x15xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor) -> tensor<2x6x4x15xf32> return } @@ -872,7 +872,7 @@ // CHECK-LABEL: @depthwise_conv2d_padded func.func @depthwise_conv2d_padded(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x9x11x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x9x11x15xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x9x11x15xf32> return } @@ -881,7 +881,7 @@ // CHECK-LABEL: @depthwise_conv2d_dilated func.func @depthwise_conv2d_dilated(%arg0: tensor<2x12x14x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x6x4x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<2x12x14x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<2x12x14x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> return } @@ -890,7 +890,7 @@ // CHECK-LABEL: @depthwise_conv2d_strided func.func @depthwise_conv2d_strided(%arg0: tensor<1x13x14x1xf32>, %arg1: tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x5x7x1xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = array, pad = array, stride = array} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x5x7x1xf32> + %0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2 {dilation = array, pad = array, stride = array} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x5x7x1xf32> return } @@ -899,7 +899,7 @@ // CHECK-LABEL: @transpose_conv2d_out_shape func.func @transpose_conv2d_out_shape(%arg0: tensor<2x?x?x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x8x9x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x?x?x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x?x?x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> return } @@ -908,7 +908,7 @@ // CHECK-LABEL: @transpose_conv2d_static func.func @transpose_conv2d_static(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x18x19x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -917,7 +917,7 @@ // CHECK-LABEL: @transpose_conv2d_static_strided func.func @transpose_conv2d_static_strided(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x33x45x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -926,7 +926,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_input func.func @transpose_conv2d_dynamic_input(%arg0: tensor, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -935,7 +935,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_weights func.func @transpose_conv2d_dynamic_weights(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x?x?x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x6x4x3xf32>, tensor, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x6x4x3xf32>, tensor, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -944,7 +944,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_bias func.func @transpose_conv2d_dynamic_bias(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor) { // CHECK: -> tensor<2x8x9x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor<2x8x9x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor<2x8x9x5xf32> return } @@ -953,14 +953,14 @@ // CHECK-LABEL: @transpose_conv2d_padded func.func @transpose_conv2d_padded(%arg0: tensor<2x9x11x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x10x13x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<2x9x11x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<2x9x11x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> return } // CHECK-LABEL: @transpose_conv2d_strided func.func @transpose_conv2d_strided(%arg0: tensor<1x5x7x1xf32>, %arg1: tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x13x13x1xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array, out_shape = array, stride = array} : (tensor<1x5x7x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> + %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2 {out_pad = array, out_shape = array, stride = array} : (tensor<1x5x7x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> return } @@ -969,7 +969,7 @@ // CHECK-LABEL: @resize_int_horizontal func.func @resize_int_horizontal(%arg0: tensor<1x15x13x1xi8>) { // CHECK: -> tensor<1x23x179x1xi8> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor + %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x15x13x1xi8>) -> tensor return } @@ -978,7 +978,7 @@ // CHECK-LABEL: @resize_int_vertical func.func @resize_int_vertical(%arg0: tensor<1x49x42x1xi16>) { // CHECK: -> tensor<1x112x220x1xi16> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x49x42x1xi16>) -> tensor + %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x49x42x1xi16>) -> tensor return } @@ -987,7 +987,7 @@ // CHECK-LABEL: @resize_int_power_of_two_upscale func.func @resize_int_power_of_two_upscale(%arg0: tensor<1x23x19x1xi8>) { // CHECK: -> tensor<1x353x289x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x19x1xi8>) -> tensor + %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x19x1xi8>) -> tensor return } @@ -996,7 +996,7 @@ // CHECK-LABEL: @resize_int_power_of_two_upscale_offsetted func.func @resize_int_power_of_two_upscale_offsetted(%arg0: tensor<1x41x26x1xi16>) { // CHECK: -> tensor<1x328x208x1xi48> - %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x41x26x1xi16>) -> tensor + %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x41x26x1xi16>) -> tensor return } @@ -1004,7 +1004,7 @@ // CHECK-LABEL: @resize_fp_horizontal func.func @resize_fp_horizontal(%arg0: tensor<1x50x48x1xf32>) { // CHECK: -> tensor<1x106x85x1xf32> - %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor + %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor return } @@ -1012,7 +1012,7 @@ // CHECK-LABEL: @resize_fp_vertical func.func @resize_fp_vertical(%arg0: tensor<1x50x48x1xf32>) { // CHECK: -> tensor<1x128x13x1xf32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor + %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor return } @@ -1021,7 +1021,7 @@ // CHECK-LABEL: @resize_fp_power_of_two_upscale func.func @resize_fp_power_of_two_upscale(%arg0: tensor<1x23x23x1xf32>) { // CHECK: -> tensor<1x89x89x1xf32> - %0 = "tosa.resize"(%arg0) {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x23x1xf32>) -> tensor + %0 = tosa.resize %arg0 {mode = "BILINEAR", scale = array, offset = array, border = array} : (tensor<1x23x23x1xf32>) -> tensor return } @@ -1030,7 +1030,7 @@ // CHECK-LABEL: @resize_fp_power_of_two_upscale_offsetted func.func @resize_fp_power_of_two_upscale_offsetted(%arg0: tensor<1x50x48x1xf32>) { // CHECK: -> tensor<1x1600x1536x1xf32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor + %0 = tosa.resize %arg0 {mode = "NEAREST_NEIGHBOR", scale = array, offset = array, border = array} : (tensor<1x50x48x1xf32>) -> tensor return } @@ -1038,16 +1038,16 @@ // CHECK-LABEL: @if_test_simple func.func @if_test_simple(%arg0 : tensor, %arg1 : tensor, %arg2 : tensor) -> () { - %a = "tosa.log"(%arg0) : (tensor) -> tensor<*xf32> - %b = "tosa.log"(%arg1) : (tensor) -> tensor<*xf32> - // CHECK: (tensor, tensor, tensor) -> tensor - %0 = "tosa.cond_if"(%arg2, %a, %b) ({ - ^bb1(%arg3 : tensor<*xf32>, %arg4 : tensor<*xf32>): - "tosa.yield"(%arg3) : (tensor<*xf32>) -> () - }, { - ^bb1(%arg5 : tensor<*xf32>, %arg6 : tensor<*xf32>): - "tosa.yield"(%arg6) : (tensor<*xf32>) -> () - }) : (tensor, tensor<*xf32>, tensor<*xf32>) -> (tensor<*xf32>) + %a = tosa.log %arg0 : (tensor) -> tensor + %b = tosa.log %arg1 : (tensor) -> tensor + + // CHECK: tosa.cond_if + // CHECK: -> (tensor) + %0 = tosa.cond_if %arg2 -> (tensor) { + tosa.yield %a : tensor + } else { + tosa.yield %b : tensor + } return } @@ -1055,14 +1055,13 @@ // CHECK-LABEL: @if_test_dynamic func.func @if_test_dynamic(%arg0 : tensor<2xf32>, %arg1 : tensor<3xf32>, %arg2 : tensor) -> () { - // CHECK: (tensor, tensor<2xf32>, tensor<3xf32>) -> tensor - %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ - ^bb1(%arg3 : tensor<2xf32>, %arg4 : tensor<3xf32>): - "tosa.yield"(%arg3) : (tensor<2xf32>) -> () - }, { - ^bb1(%arg5 : tensor<2xf32>, %arg6 : tensor<3xf32>): - "tosa.yield"(%arg6) : (tensor<3xf32>) -> () - }) : (tensor, tensor<2xf32>, tensor<3xf32>) -> (tensor<*xf32>) + // CHECK: tosa.cond_if + // CHECK: -> (tensor) + %0 = tosa.cond_if %arg2 -> (tensor) { + tosa.yield %arg0 : tensor<2xf32> + } else { + tosa.yield %arg1 : tensor<3xf32> + } return } @@ -1070,14 +1069,13 @@ // CHECK-LABEL: @if_test_unranked func.func @if_test_unranked(%arg0 : tensor, %arg1 : tensor<3xf32>, %arg2 : tensor) -> () { - // CHECK: (tensor, tensor, tensor<3xf32>) -> tensor<*xf32> - %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ - ^bb1(%arg3 : tensor, %arg4 : tensor<3xf32>): - "tosa.yield"(%arg3) : (tensor) -> () - }, { - ^bb1(%arg5 : tensor, %arg6 : tensor<3xf32>): - "tosa.yield"(%arg6) : (tensor<3xf32>) -> () - }) : (tensor, tensor, tensor<3xf32>) -> (tensor<*xf32>) + // CHECK: tosa.cond_if + // CHECK: -> (tensor<*xf32>) + %0 = tosa.cond_if %arg2 -> (tensor<*xf32>) { + tosa.yield %arg0 : tensor + } else { + tosa.yield %arg1 : tensor<3xf32> + } return } @@ -1085,16 +1083,15 @@ // CHECK-LABEL: @if_test_propagate func.func @if_test_propagate(%arg0 : tensor, %arg1 : tensor, %arg2 : tensor) -> () { - // CHECK: (tensor, tensor, tensor) -> tensor - %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ - ^bb1(%arg3 : tensor<*xf32>, %arg4 : tensor<*xf32>): - %1 = "tosa.add"(%arg3, %arg4) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> - "tosa.yield"(%1) : (tensor<*xf32>) -> () - }, { - ^bb1(%arg5 : tensor<*xf32>, %arg6 : tensor<*xf32>): - %1 = "tosa.sub"(%arg5, %arg6) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> - "tosa.yield"(%1) : (tensor<*xf32>) -> () - }) : (tensor, tensor, tensor) -> (tensor<*xf32>) + // CHECK: tosa.cond_if + // CHECK: -> (tensor) + %0 = tosa.cond_if %arg2 -> (tensor) { + %1 = tosa.add %arg0, %arg1 : (tensor, tensor) -> tensor + tosa.yield %1 : tensor + } else { + %1 = tosa.sub %arg0, %arg1 : (tensor, tensor) -> tensor + tosa.yield %1 : tensor + } return } @@ -1102,39 +1099,38 @@ // CHECK-LABEL: @while_test func.func @while_test(%arg0 : tensor) -> (tensor<*xi32>) { - // CHECK: "tosa.add" + // CHECK: tosa.add // CHECK-SAME: (tensor, tensor) -> tensor - %0 = "tosa.add"(%arg0, %arg0) : (tensor, tensor) -> tensor<*xi32> + %0 = tosa.add %arg0, %arg0 : (tensor, tensor) -> tensor<*xi32> - // CHECK: "tosa.while_loop" - %1 = "tosa.while_loop"(%0) ({ + // CHECK: tosa.while_loop + // CHECK-SAME: (tensor) -> tensor + %1 = tosa.while_loop (%arg1 = %0) : (tensor<*xi32>) -> tensor<*xi32> { + %2 = "tosa.const"() <{value = dense<3> : tensor}> : () -> tensor - // CHECK: ^bb0 - // CHECK-SAME: tensor - ^bb0(%arg2: tensor<*xi32>): - %2 = "tosa.const"() {value = dense<3> : tensor} : () -> tensor - // CHECK: "tosa.greater_equal" + // CHECK: tosa.greater_equal // CHECK-SAME: (tensor, tensor) -> tensor - %3 = "tosa.greater_equal"(%2, %arg2) : (tensor, tensor<*xi32>) -> tensor<*xi1> - // CHECK: "tosa.yield" + %3 = tosa.greater_equal %2, %arg1 : (tensor, tensor<*xi32>) -> tensor<*xi1> + + // CHECK: tosa.yield // CHECK-SAME: tensor - "tosa.yield"(%3) : (tensor<*xi1>) -> () - }, { + tosa.yield %3 : tensor<*xi1> + + } do { + // CHECK: ^bb0 // CHECK-SAME: tensor - ^bb0(%arg2: tensor<*xi32>): - %2 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor + ^bb0(%arg1: tensor<*xi32>): + %2 = "tosa.const"() <{value = dense<1> : tensor}> : () -> tensor - // CHECK: "tosa.add" + // CHECK: tosa.add // CHECK-SAME: (tensor, tensor) -> tensor - %3 = "tosa.add"(%arg2, %2) : (tensor<*xi32>, tensor) -> tensor<*xi32> + %3 = tosa.add %arg1, %2 : (tensor<*xi32>, tensor) -> tensor<*xi32> - // CHECK: "tosa.yield" + // CHECK: tosa.yield // CHECK-SAME: tensor - "tosa.yield"(%3) : (tensor<*xi32>) -> () - - // CHECK: (tensor) -> tensor - }) : (tensor<*xi32>) -> (tensor<*xi32>) + tosa.yield %3 : tensor<*xi32> + } // CHECK: tensor.cast return %1 : tensor<*xi32> @@ -1144,42 +1140,38 @@ // CHECK-LABEL: @while_test func.func @while_test(%arg0 : tensor, %arg1 : tensor<1xi32>) -> () { - // CHECK: "tosa.while_loop" - %1:2 = "tosa.while_loop"(%arg0, %arg1) ({ - - // CHECK: ^bb0 - // CHECK-SAME: tensor - // CHECK-SAME: tensor - ^bb0(%arg2: tensor<*xi32>, %arg3: tensor<*xi32>): - %2 = "tosa.const"() {value = dense<3> : tensor} : () -> tensor - - // CHECK: "tosa.greater_equal" + // CHECK: tosa.while_loop + // CHECK-SAME: (tensor, tensor<1xi32>) -> (tensor, tensor) + %0:2 = tosa.while_loop (%arg2 = %arg0, %arg3 = %arg1) : (tensor, tensor<1xi32>) -> (tensor, tensor) { + %1 = "tosa.const"() <{value = dense<3> : tensor}> : () -> tensor + // CHECK: tosa.greater_equal // CHECK-SAME: (tensor, tensor) -> tensor - %3 = "tosa.greater_equal"(%2, %arg2) : (tensor, tensor<*xi32>) -> tensor<*xi1> - "tosa.yield"(%3) : (tensor<*xi1>) -> () - }, { + %2 = tosa.greater_equal %1, %arg2 : (tensor, tensor) -> tensor + + // CHECK: tosa.yield + // CHECK-SAME: tensor + tosa.yield %2 : tensor + } do { // CHECK: ^bb0 // CHECK-SAME: tensor // CHECK-SAME: tensor - ^bb0(%arg2: tensor<*xi32>, %arg3: tensor<*xi32>): - %2 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor + ^bb0(%arg2: tensor, %arg3: tensor): + %1 = "tosa.const"() <{value = dense<1> : tensor}> : () -> tensor - // CHECK: "tosa.add" + // CHECK: tosa.add // CHECK-SAME: (tensor, tensor) -> tensor - %3 = "tosa.add"(%arg2, %2) : (tensor<*xi32>, tensor) -> tensor<*xi32> + %2 = tosa.add %arg2, %1 : (tensor, tensor) -> tensor - // CHECK: "tosa.concat" + // CHECK: tosa.concat // CHECK-SAME: (tensor, tensor) -> tensor - %4 = "tosa.concat"(%arg3, %arg3) { axis = 0 : i64 } : (tensor<*xi32>, tensor<*xi32>) -> (tensor<*xi32>) + %3 = tosa.concat %arg3, %arg3 {axis = 0 : i64} : (tensor, tensor) -> tensor - // CHECK: "tosa.yield" + // CHECK: tosa.yield // CHECK-SAME: tensor // CHECK-SAME: tensor - "tosa.yield"(%3, %4) : (tensor<*xi32>, tensor<*xi32>) -> () - - // CHECK: (tensor, tensor<1xi32>) -> (tensor, tensor) - }) : (tensor, tensor<1xi32>) -> (tensor<*xi32>, tensor<*xi32>) + tosa.yield %2, %3 : tensor, tensor + } return } @@ -1188,7 +1180,7 @@ // CHECK-LABEL: @test_static_rfft2d func.func @test_static_rfft2d(%arg0: tensor<5x2x8xf32>) -> () { // CHECK: -> (tensor<5x2x5xf32>, tensor<5x2x5xf32>) - %output_real, %output_imag = "tosa.rfft2d"(%arg0) {} : (tensor<5x2x8xf32>) -> (tensor, tensor) + %output_real, %output_imag = tosa.rfft2d %arg0 : (tensor<5x2x8xf32>) -> (tensor, tensor) return } @@ -1197,7 +1189,7 @@ // CHECK-LABEL: @test_dynamic_batch_rfft2d func.func @test_dynamic_batch_rfft2d(%arg0 : tensor) -> () { // CHECK: -> (tensor, tensor) - %output_real, %output_imag = "tosa.rfft2d"(%arg0) {} : (tensor) -> (tensor, tensor) + %output_real, %output_imag = tosa.rfft2d %arg0 : (tensor) -> (tensor, tensor) return } @@ -1206,7 +1198,7 @@ // CHECK-LABEL: @test_dynamic_width_rfft2d func.func @test_dynamic_width_rfft2d(%arg0 : tensor<5x2x?xf32>) -> () { // CHECK: -> (tensor<5x2x?xf32>, tensor<5x2x?xf32>) - %output_real, %output_imag = "tosa.rfft2d"(%arg0) {} : (tensor<5x2x?xf32>) -> (tensor, tensor) + %output_real, %output_imag = tosa.rfft2d %arg0 : (tensor<5x2x?xf32>) -> (tensor, tensor) return } @@ -1215,7 +1207,7 @@ // CHECK-LABEL: @test_static_fft2d func.func @test_static_fft2d(%arg0: tensor<1x4x8xf32>, %arg1: tensor<1x4x8xf32>) -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) { // CHECK: -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) - %output_real, %output_imag = "tosa.fft2d"(%arg0, %arg1) {inverse = false} : (tensor<1x4x8xf32>, tensor<1x4x8xf32>) -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) + %output_real, %output_imag = tosa.fft2d %arg0, %arg1 {inverse = false} : (tensor<1x4x8xf32>, tensor<1x4x8xf32>) -> (tensor<1x4x8xf32>, tensor<1x4x8xf32>) return %output_real, %output_imag : tensor<1x4x8xf32>, tensor<1x4x8xf32> } @@ -1224,7 +1216,7 @@ // CHECK-LABEL: @test_dynamic_batch_fft2d func.func @test_dynamic_batch_fft2d(%arg0: tensor, %arg1: tensor) -> (tensor, tensor) { // CHECK: -> (tensor, tensor) - %output_real, %output_imag = "tosa.fft2d"(%arg0, %arg1) {inverse = false} : (tensor, tensor) -> (tensor, tensor) + %output_real, %output_imag = tosa.fft2d %arg0, %arg1 {inverse = false} : (tensor, tensor) -> (tensor, tensor) return %output_real, %output_imag : tensor, tensor } @@ -1232,8 +1224,8 @@ // CHECK-LABEL: @test_unranked_equal func.func @test_unranked_equal(%arg0 : tensor<*xf32>, %arg1 : tensor) -> () { - // CHECK: "tosa.equal"(%arg0, %arg1) : (tensor<*xf32>, tensor) -> tensor<*xi1> - %0 = "tosa.equal"(%arg0, %arg1) : (tensor<*xf32>, tensor) -> tensor<*xi1> + // CHECK: tosa.equal %arg0, %arg1 : (tensor<*xf32>, tensor) -> tensor<*xi1> + %0 = tosa.equal %arg0, %arg1 : (tensor<*xf32>, tensor) -> tensor<*xi1> return } @@ -1242,8 +1234,8 @@ // CHECK-LABEL: test_non_tosa_consumer_shape func.func @test_non_tosa_consumer_shape(%arg0: tensor<4x4xf32>) -> !shape.shape { - // CHECK: "tosa.log"(%arg0) : (tensor<4x4xf32>) -> tensor<4x4xf32> - %0 = "tosa.log"(%arg0) : (tensor<4x4xf32>) -> tensor<*xf32> + // CHECK: tosa.log %arg0 : (tensor<4x4xf32>) -> tensor<4x4xf32> + %0 = tosa.log %arg0 : (tensor<4x4xf32>) -> tensor<*xf32> %1 = shape.shape_of %0 : tensor<*xf32> -> !shape.shape return %1 : !shape.shape } @@ -1252,8 +1244,8 @@ // CHECK-LABEL: test_non_tosa_consumer_shape2 func.func @test_non_tosa_consumer_shape2(%arg0: tensor<4x4xf32>) -> tensor { - // CHECK: "tosa.log"(%arg0) : (tensor<4x4xf32>) -> tensor<4x4xf32> - %0 = "tosa.log"(%arg0) : (tensor<4x4xf32>) -> tensor<*xf32> + // CHECK: tosa.log %arg0 : (tensor<4x4xf32>) -> tensor<4x4xf32> + %0 = tosa.log %arg0 : (tensor<4x4xf32>) -> tensor<*xf32> %1 = shape.shape_of %0 : tensor<*xf32> -> tensor return %1 : tensor } @@ -1262,8 +1254,8 @@ // CHECK-LABEL: test_non_tosa_consumer_extract func.func @test_non_tosa_consumer_extract(%arg0: tensor<4x4xf32>, %arg1: index) -> f32 { - // CHECK: "tosa.log"(%arg0) : (tensor<4x4xf32>) -> tensor<4x4xf32> - %0 = "tosa.log"(%arg0) : (tensor<4x4xf32>) -> tensor + // CHECK: tosa.log %arg0 : (tensor<4x4xf32>) -> tensor<4x4xf32> + %0 = tosa.log %arg0 : (tensor<4x4xf32>) -> tensor %1 = tensor.extract %0[%arg1, %arg1] : tensor return %1 : f32 } diff --git a/mlir/test/Dialect/Tosa/transpose-fold.mlir b/mlir/test/Dialect/Tosa/transpose-fold.mlir --- a/mlir/test/Dialect/Tosa/transpose-fold.mlir +++ b/mlir/test/Dialect/Tosa/transpose-fold.mlir @@ -7,9 +7,9 @@ func.func @test_cancel_transpose_transpose(%arg0: tensor<1x2x3xi32>) -> (tensor<1x2x3xi32>) { %0 = arith.constant dense<[1, 2, 0]> : tensor<3xi32> - %1 = "tosa.transpose"(%arg0, %0) : (tensor<1x2x3xi32>, tensor<3xi32>) -> (tensor<2x3x1xi32>) + %1 = tosa.transpose %arg0, %0 : (tensor<1x2x3xi32>, tensor<3xi32>) -> tensor<2x3x1xi32> %2 = arith.constant dense<[2, 0, 1]> : tensor<3xi32> - %3 = "tosa.transpose"(%1, %2) : (tensor<2x3x1xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> + %3 = tosa.transpose %1, %2 : (tensor<2x3x1xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> return %3 : tensor<1x2x3xi32> } @@ -22,7 +22,7 @@ func.func @test_remove_identity_transpose(%arg0: tensor<1x2x3xi32>) -> (tensor<1x2x3xi32>) { %0 = arith.constant dense<[0, 1, 2]> : tensor<3xi32> - %1 = "tosa.transpose"(%arg0, %0) : (tensor<1x2x3xi32>, tensor<3xi32>) -> (tensor<1x2x3xi32>) + %1 = tosa.transpose %arg0, %0 : (tensor<1x2x3xi32>, tensor<3xi32>) -> tensor<1x2x3xi32> return %1 : tensor<1x2x3xi32> } @@ -31,15 +31,15 @@ // CHECK-LABEL: func.func @test_do_not_cancel_different_transpose( // CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3x4x5xi32>) -> tensor<5x4x3x2xi32> { // CHECK: %[[VAL_1:.*]] = arith.constant dense<[3, 2, 1, 0]> : tensor<4xi32> -// CHECK: %[[VAL_2:.*]] = "tosa.transpose"(%[[VAL_0]], %[[VAL_1]]) : (tensor<2x3x4x5xi32>, tensor<4xi32>) -> tensor<5x4x3x2xi32> +// CHECK: %[[VAL_2:.*]] = tosa.transpose %[[VAL_0]], %[[VAL_1]] : (tensor<2x3x4x5xi32>, tensor<4xi32>) -> tensor<5x4x3x2xi32> // CHECK: return %[[VAL_2]] : tensor<5x4x3x2xi32> // CHECK: } func.func @test_do_not_cancel_different_transpose(%arg0: tensor<2x3x4x5xi32>) -> (tensor<5x4x3x2xi32>) { %0 = arith.constant dense<[1, 2, 0, 3]> : tensor<4xi32> - %1 = "tosa.transpose"(%arg0, %0) : (tensor<2x3x4x5xi32>, tensor<4xi32>) -> (tensor<3x4x2x5xi32>) + %1 = tosa.transpose %arg0, %0 : (tensor<2x3x4x5xi32>, tensor<4xi32>) -> tensor<3x4x2x5xi32> %2 = arith.constant dense<[3, 1, 0, 2]> : tensor<4xi32> - %3 = "tosa.transpose"(%1, %2) : (tensor<3x4x2x5xi32>, tensor<4xi32>) -> tensor<5x4x3x2xi32> + %3 = tosa.transpose %1, %2 : (tensor<3x4x2x5xi32>, tensor<4xi32>) -> tensor<5x4x3x2xi32> return %3 : tensor<5x4x3x2xi32> } @@ -48,14 +48,14 @@ // CHECK-LABEL: func.func @test_prefer_compose_transpose( // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x2x3x4xi32>) -> tensor<4x3x2x1xi32> { // CHECK: %[[VAL_1:.*]] = arith.constant dense<[3, 2, 1, 0]> : tensor<4xi32> -// CHECK: %[[VAL_2:.*]] = "tosa.transpose"(%[[VAL_0]], %[[VAL_1]]) : (tensor<1x2x3x4xi32>, tensor<4xi32>) -> tensor<4x3x2x1xi32> +// CHECK: %[[VAL_2:.*]] = tosa.transpose %[[VAL_0]], %[[VAL_1]] : (tensor<1x2x3x4xi32>, tensor<4xi32>) -> tensor<4x3x2x1xi32> // CHECK: return %[[VAL_2]] : tensor<4x3x2x1xi32> // CHECK: } func.func @test_prefer_compose_transpose(%arg0: tensor<1x2x3x4xi32>) -> (tensor<4x3x2x1xi32>) { %0 = arith.constant dense<[1, 2, 0, 3]> : tensor<4xi32> - %1 = "tosa.transpose"(%arg0, %0) : (tensor<1x2x3x4xi32>, tensor<4xi32>) -> (tensor<2x3x1x4xi32>) + %1 = tosa.transpose %arg0, %0 : (tensor<1x2x3x4xi32>, tensor<4xi32>) -> tensor<2x3x1x4xi32> %2 = arith.constant dense<[3, 1, 0, 2]> : tensor<4xi32> - %3 = "tosa.transpose"(%1, %2) : (tensor<2x3x1x4xi32>, tensor<4xi32>) -> tensor<4x3x2x1xi32> + %3 = tosa.transpose %1, %2 : (tensor<2x3x1x4xi32>, tensor<4xi32>) -> tensor<4x3x2x1xi32> return %3 : tensor<4x3x2x1xi32> }