diff --git a/mlir/include/mlir/Dialect/Tosa/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Tosa/IR/CMakeLists.txt --- a/mlir/include/mlir/Dialect/Tosa/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/Tosa/IR/CMakeLists.txt @@ -3,6 +3,6 @@ add_mlir_interface(TosaInterfaces) set(LLVM_TARGET_DEFINITIONS TosaOps.td) -mlir_tablegen(TosaAttributes.h.inc -gen-attrdef-decls) -mlir_tablegen(TosaAttributes.cpp.inc -gen-attrdef-defs) +mlir_tablegen(TosaAttributes.h.inc -gen-attrdef-decls -attrdefs-dialect=tosa) +mlir_tablegen(TosaAttributes.cpp.inc -gen-attrdef-defs -attrdefs-dialect=tosa) add_public_tablegen_target(MLIRTosaAttributesIncGen) diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td @@ -193,18 +193,19 @@ // own OptionalAttr quantization_attr parameter to scale the padding values // correctly. def Tosa_PadOpQuantInfoBuilder : OpBuilder< - (ins "Type":$outputType, "Value":$input, "Value":$paddings), + (ins "Type":$outputType, "Value":$input, "DenseI32ArrayAttr":$padding), [{ buildPadOpWithQuantInfo($_builder, $_state, outputType, - input, paddings); + input, padding); }]>; def Tosa_ExplicitValuePadOpQuantInfoBuilder : OpBuilder< - (ins "Type":$outputType, "Value":$input, "Value":$paddings, - "Value":$pad_value), + (ins "Type":$outputType, "Value":$input, + "DenseI32ArrayAttr":$padding, + "DenseIntOrFPElementsAttr":$pad_const), [{ buildExplicitValuePadOpWithQuantInfo($_builder, $_state, outputType, - input, paddings, pad_value); + input, padding, pad_const); }]>; //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -15,6 +15,7 @@ #define TOSA_OPS include "mlir/IR/OpBase.td" +include "mlir/IR/BuiltinAttributes.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/InferTypeOpInterface.td" @@ -1426,8 +1427,8 @@ let arguments = (ins Tosa_RankedTensor:$input1, - Tosa_Int32Or64Tensor:$padding, - Optional:$pad_const, + DenseI32ArrayAttr:$padding, + OptionalAttr:$pad_const, OptionalAttr:$quantization_info ); diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -67,19 +67,25 @@ PatternRewriter &rewriter) const final { auto loc = padOp.getLoc(); auto input = padOp.getInput1(); - auto padding = padOp.getPadding(); ShapedType inputTy = input.getType().cast(); Type elementTy = inputTy.getElementType(); + int64_t rank = inputTy.getRank(); + SmallVector padding_array(padOp.getPadding()); + RankedTensorType padding_shape = + RankedTensorType::get({rank, 2}, rewriter.getI32Type()); + DenseIntElementsAttr padding_attr = + DenseIntElementsAttr::get(padding_shape, padding_array); + Value padding = rewriter.createOrFold(loc, padding_attr); // Setup the default constantAttr. - Value padConstant; - if (padOp.getPadConst()) { - padConstant = rewriter.createOrFold( - loc, padOp.getPadConst(), ValueRange({})); + Value pad_const = rewriter.createOrFold( + loc, padOp.getPadConstAttr()); + padConstant = rewriter.createOrFold(loc, pad_const, + ValueRange({})); } else { Attribute constantAttr; if (elementTy.isa()) { diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -341,35 +341,30 @@ return failure(); auto input = op.getInput1(); - auto padding = op.getPadding(); - ShapedType inputTy = input.getType().cast(); Type elementTy = inputTy.getElementType(); + Attribute constantAttr = rewriter.getZeroAttr(elementTy); - Attribute constantAttr; - if (elementTy.isa()) { - constantAttr = rewriter.getFloatAttr(elementTy, 0.0); - } else if (elementTy.isa() && !op.getQuantizationInfo()) { - constantAttr = rewriter.getIntegerAttr(elementTy, 0); - } else if (elementTy.isa() && op.getQuantizationInfo()) { - auto value = op.getQuantizationInfo()->getInputZp(); - constantAttr = rewriter.getIntegerAttr(elementTy, value); - } + if (auto quantizationInfo = op.getQuantizationInfo()) { + int64_t iZp = quantizationInfo->getInputZp(); - if (!constantAttr) { - return rewriter.notifyMatchFailure( - op, - "tosa.pad to linalg lowering encountered an unknown element type"); + if (!validIntegerRange(elementTy.cast(), iZp)) + return rewriter.notifyMatchFailure( + op, "tosa.pad op quantization has zp outside of input range"); + + constantAttr = rewriter.getIntegerAttr(elementTy, iZp); } - auto denseAttr = DenseElementsAttr::get( - RankedTensorType::get({}, elementTy), constantAttr); - auto constantVal = rewriter.create( - op.getLoc(), denseAttr.getType(), denseAttr); + auto padTy = RankedTensorType::get({}, elementTy); + DenseIntOrFPElementsAttr constantVal; + if (elementTy.isa()) { + constantVal = DenseIntElementsAttr::get(padTy, constantAttr); + } else { + constantVal = DenseFPElementsAttr::get(padTy, constantAttr); + } - rewriter.replaceOpWithNewOp( - op, op.getType(), ValueRange{input, padding, constantVal}, - op->getAttrs()); + rewriter.replaceOpWithNewOp(op, op.getType(), input, + op.getPaddingAttr(), constantVal); return success(); } }; @@ -918,15 +913,14 @@ } OpFoldResult PadOp::fold(FoldAdaptor adaptor) { + DenseI32ArrayAttr padding = adaptor.getPaddingAttr(); // If the pad is all zeros we can fold this operation away. - if (adaptor.getPadding()) { - auto densePad = adaptor.getPadding().cast(); - if (densePad.isSplat() && densePad.getSplatValue().isZero()) { - return getInput1(); + for (int i = 0; i < padding.size(); i++) { + if (padding[i] != 0) { + return {}; } } - - return {}; + return getInput1(); } // Fold away cases where a tosa.resize operation returns a copy diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -298,8 +298,22 @@ /// correctly. No pad_const is interpreted as zero-padding. static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, - Value paddings) { - result.addOperands({input, paddings}); + DenseI32ArrayAttr padding) { + result.addOperands({input}); + result.addAttribute("padding", padding); + + Type outputETy = outputType.cast().getElementType(); + Attribute zeroAttr = builder.getZeroAttr(outputETy); + DenseIntOrFPElementsAttr pad_const; + auto padTy = RankedTensorType::get({}, outputETy); + + if (outputETy.isa()) { + pad_const = DenseIntElementsAttr::get(padTy, zeroAttr); + } else { + pad_const = DenseFPElementsAttr::get(padTy, zeroAttr); + } + result.addAttribute("pad_const", pad_const); + auto quantAttr = buildPadOpQuantizationAttr(builder, input); if (quantAttr) result.addAttribute("quantization_info", quantAttr); @@ -308,12 +322,12 @@ /// This builder is called on TOSA pad operator when an explicit pad_const /// value is passed in. It also optionally constructs quantization_attr. -static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, - OperationState &result, - Type outputType, Value input, - Value paddings, - Value padConst) { - result.addOperands({input, paddings, padConst}); +static void buildExplicitValuePadOpWithQuantInfo( + OpBuilder &builder, OperationState &result, Type outputType, Value input, + DenseI32ArrayAttr padding, DenseIntOrFPElementsAttr pad_const) { + result.addOperands({input}); + result.addAttribute("padding", padding); + result.addAttribute("pad_const", pad_const); auto quantAttr = buildPadOpQuantizationAttr(builder, input); if (quantAttr) result.addAttribute("quantization_info", quantAttr); @@ -552,42 +566,18 @@ ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); - ShapeAdaptor paddingShape = operands.getShape(1); + PadOpAdaptor adaptor(operands, attributes); SmallVector outputShape; - - // If both inputs have unknown shape, we cannot determine the shape of the - // output. - if (!inputShape.hasRank() && !paddingShape.hasRank()) { - inferredReturnShapes.push_back(ShapedTypeComponents()); - return success(); - } + DenseI32ArrayAttr padding = adaptor.getPaddingAttr(); // If the input rank is unknown we can info the output rank using the padding - // shape's first dim. + // size. if (!inputShape.hasRank()) { - if (paddingShape.isDynamicDim(0)) { - inferredReturnShapes.push_back(ShapedTypeComponents()); - return success(); - } - - outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic); + outputShape.resize(padding.size() / 2, ShapedType::kDynamic); inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); return success(); } - DenseIntElementsAttr paddings; - // If the paddings value is not a constant, all dimensions must be dynamic. - if (!matchPattern(operands[1], m_Constant(&paddings))) { - outputShape.resize(inputShape.getRank(), ShapedType::kDynamic); - inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); - return success(); - } - - SmallVector paddingValues; - for (auto val : paddings) { - paddingValues.push_back(val.getSExtValue()); - } - outputShape.reserve(inputShape.getRank()); for (int i = 0, s = inputShape.getRank(); i < s; i++) { if (inputShape.isDynamicDim(i)) { @@ -595,8 +585,8 @@ continue; } - outputShape.push_back(inputShape.getDimSize(i) + paddingValues[i * 2] + - paddingValues[i * 2 + 1]); + outputShape.push_back(inputShape.getDimSize(i) + padding[i * 2] + + padding[i * 2 + 1]); } inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -55,9 +55,9 @@ return failure(); llvm::ArrayRef padAttr = op.getPad(); - llvm::SmallVector pad(8, 0); + llvm::SmallVector pad(8, 0); for (const auto &it : llvm::enumerate(padAttr)) - pad[it.index() + 2] = it.value(); + pad[it.index() + 2] = static_cast(it.value()); if (llvm::any_of(pad, [](int64_t p) { return p != 0; })) { Type inputETy = inputType.getElementType(); @@ -81,16 +81,15 @@ } } - auto padSizeTy = RankedTensorType::get({4, 2}, rewriter.getI64Type()); - auto padSize = - DenseIntElementsAttr::get(padSizeTy, ArrayRef(pad)); - Value padSizeVal = - rewriter.create(op->getLoc(), padSizeTy, padSize); - auto padTy = RankedTensorType::get({}, inputETy); - auto padAttr = DenseElementsAttr::get(padTy, zeroAttr); - Value padVal = - rewriter.create(op->getLoc(), padTy, padAttr); + DenseIntOrFPElementsAttr padVal; + if (inputETy.isa()) { + padVal = DenseIntElementsAttr::get(padTy, zeroAttr); + } else { + padVal = DenseFPElementsAttr::get(padTy, zeroAttr); + } + + DenseI32ArrayAttr padSizeVal = rewriter.getDenseI32ArrayAttr(pad); inputType = RankedTensorType::get(newShape, inputETy); input = rewriter.create(op->getLoc(), inputType, input, padSizeVal, padVal); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp @@ -90,9 +90,9 @@ } ArrayRef padAttr = op.getPad(); - llvm::SmallVector pad(10, 0); + llvm::SmallVector pad(10, 0); for (const auto &it : llvm::enumerate(padAttr)) - pad[it.index() + 2] = it.value(); + pad[it.index() + 2] = static_cast(it.value()); if (llvm::any_of(pad, [](int64_t p) { return p != 0; })) { Type inputETy = inputType.getElementType(); @@ -105,16 +105,16 @@ } } - auto padSizeTy = RankedTensorType::get({5, 2}, rewriter.getI64Type()); - auto padSize = - DenseIntElementsAttr::get(padSizeTy, ArrayRef(pad)); - Value padSizeVal = - rewriter.create(op->getLoc(), padSizeTy, padSize); - auto padTy = RankedTensorType::get({}, inputETy); - auto padAttr = DenseElementsAttr::get(padTy, zeroAttr); - Value padVal = - rewriter.create(op->getLoc(), padTy, padAttr); + DenseIntOrFPElementsAttr padVal; + if (inputETy.isa()) { + padVal = DenseIntElementsAttr::get(padTy, zeroAttr); + } else { + padVal = DenseFPElementsAttr::get(padTy, zeroAttr); + } + + DenseI32ArrayAttr padSizeVal = + rewriter.getDenseI32ArrayAttr(ArrayRef(pad)); inputType = RankedTensorType::get(newShape, inputETy); input = rewriter.create(op->getLoc(), inputType, input, padSizeVal, padVal); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -183,22 +183,27 @@ weightHeight % stride[0] ? stride[0] - weightHeight % stride[0] : 0; weightPadding[5] = weightWidth % stride[1] ? stride[1] - weightWidth % stride[1] : 0; - DenseElementsAttr weightPaddingAttr = DenseIntElementsAttr::get( - RankedTensorType::get({4, 2}, rewriter.getI32Type()), weightPadding); - Value weightPaddingVal = createOpAndInfer( - rewriter, loc, weightPaddingAttr.getType(), weightPaddingAttr); + + Attribute zeroAttr = rewriter.getZeroAttr(inputETy); + auto padTy = RankedTensorType::get({}, inputETy); + DenseI32ArrayAttr weightPaddingVal = + rewriter.getDenseI32ArrayAttr(ArrayRef(weightPadding)); + DenseIntOrFPElementsAttr pad_const_attr; if (op.getQuantizationInfo().has_value()) { auto quantInfo = op.getQuantizationInfo().value(); + pad_const_attr = DenseIntElementsAttr::get(padTy, zeroAttr); + weight = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(weightETy), weight, - weightPaddingVal, nullptr, + weightPaddingVal, pad_const_attr, rewriter.getAttr(quantInfo.getWeightZp())); } else { - weight = createOpAndInfer(rewriter, loc, - UnrankedTensorType::get(weightETy), - weight, weightPaddingVal); + pad_const_attr = DenseFPElementsAttr::get(padTy, zeroAttr); + weight = createOpAndInfer( + rewriter, loc, UnrankedTensorType::get(weightETy), weight, + weightPaddingVal, pad_const_attr); } weightTy = weight.getType().cast(); @@ -246,22 +251,19 @@ inputPadding[4] += restridedWeightTy.getDimSize(2) - 1; inputPadding[5] += restridedWeightTy.getDimSize(2) - 1; - DenseElementsAttr inputPaddingAttr = DenseIntElementsAttr::get( - RankedTensorType::get({4, 2}, rewriter.getI32Type()), inputPadding); - - Value inputPaddingVal = createOpAndInfer( - rewriter, loc, inputPaddingAttr.getType(), inputPaddingAttr); + DenseI32ArrayAttr inputPaddingVal = + rewriter.getDenseI32ArrayAttr(inputPadding); if (op.getQuantizationInfo().has_value()) { auto quantInfo = op.getQuantizationInfo().value(); input = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(inputETy), input, - inputPaddingVal, nullptr, + inputPaddingVal, pad_const_attr, rewriter.getAttr(quantInfo.getInputZp())); } else { - input = createOpAndInfer(rewriter, loc, - UnrankedTensorType::get(inputETy), - input, inputPaddingVal); + input = createOpAndInfer( + rewriter, loc, UnrankedTensorType::get(inputETy), input, + inputPaddingVal, pad_const_attr); } // We use a zero bias as we need to broadcast the bias. @@ -358,15 +360,12 @@ resultPadding[4] = resultPadLeft; resultPadding[5] = resultTy.getDimSize(2) - resultPadLeft - sliceSize[2]; - DenseElementsAttr resultPaddingAttr = DenseIntElementsAttr::get( - RankedTensorType::get({4, 2}, rewriter.getI32Type()), resultPadding); - - Value resultPaddingVal = createOpAndInfer( - rewriter, loc, resultPaddingAttr.getType(), resultPaddingAttr); + DenseI32ArrayAttr resultPaddingVal = + rewriter.getDenseI32ArrayAttr(resultPadding); auto resultPad = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), slice, - resultPaddingVal); + resultPaddingVal, pad_const_attr); rewriter.replaceOpWithNewOp(op, op.getType(), resultPad, bias); return success(); diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -25,7 +25,6 @@ // CHECK-LABEL: @pad_float // CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]: func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> // TODO: Output contains multiple "arith.constant 1 : index". // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index @@ -35,32 +34,29 @@ // CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor<4x9xf32> - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>) - return %1 : tensor<4x9xf32> + %0 = "tosa.pad"(%arg0) {padding = array} : (tensor<1x2xf32>) -> (tensor<4x9xf32>) + return %0 : tensor<4x9xf32> } func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> // CHECK: [[CST:%.+]] = arith.constant 0 : i32 // CHECK: tensor.pad // CHECK: tensor.yield [[CST]] - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xi32>, tensor<2x2xi32>) -> (tensor<4x9xi32>) - return %1 : tensor<4x9xi32> + %0 = "tosa.pad"(%arg0) {padding = array} : (tensor<1x2xi32>) -> (tensor<4x9xi32>) + return %0 : tensor<4x9xi32> } func.func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> // CHECK: [[CST:%.+]] = arith.constant 42 : i32 // CHECK: tensor.pad // CHECK: tensor.yield [[CST]] - %1 = "tosa.pad"(%arg0, %0) {quantization_info = #tosa.pad_quant} : (tensor<1x2xi32>, tensor<2x2xi32>) -> (tensor<4x9xi32>) - return %1 : tensor<4x9xi32> + %0 = "tosa.pad"(%arg0) {padding = array, quantization_info = #tosa.pad_quant} : (tensor<1x2xi32>) -> (tensor<4x9xi32>) + return %0 : tensor<4x9xi32> } // ----- func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> // TODO: Output contains multiple "arith.constant 1 : index". // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index @@ -70,15 +66,13 @@ // CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor<4x9xf32> - %1 = arith.constant dense<42.0> : tensor - %2 = "tosa.pad"(%arg0, %0, %1) : (tensor<1x2xf32>, tensor<2x2xi32>, tensor) -> (tensor<4x9xf32>) - return %2 : tensor<4x9xf32> + %0 = "tosa.pad"(%arg0) {pad_const = dense<42.0> : tensor, padding = array} : (tensor<1x2xf32>) -> tensor<4x9xf32> + return %0 : tensor<4x9xf32> } // ----- func.func @pad_dyn_input(%arg0 : tensor) -> (tensor) { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> // TODO: Output contains multiple "arith.constant 1 : index". // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index @@ -88,12 +82,11 @@ // CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor to tensor - %1 = "tosa.pad"(%arg0, %0) : (tensor, tensor<2x2xi32>) -> (tensor) - return %1 : tensor + %0 = "tosa.pad"(%arg0) {padding = array} : (tensor) -> tensor + return %0 : tensor } func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor) { - %0 = arith.constant dense<[[-1, 2], [3, 4]]> : tensor<2x2xi32> // TODO: Output contains multiple "arith.constant 1 : index". // CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index // CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index @@ -103,6 +96,6 @@ // CHECK: tensor.pad %[[ARG0]] low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) - return %1 : tensor + %0 = "tosa.pad"(%arg0) {padding = array} : (tensor<1x2xf32>) -> tensor + return %0 : tensor } diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -144,36 +144,29 @@ // CHECK-LABEL: @pad_noop func.func @pad_noop(%arg0: tensor) -> tensor { // CHECK: return %arg0 - %0 = "tosa.const"() { value = dense<0> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %0) : (tensor, tensor<2x2xi32>) -> tensor - return %1 : tensor + %0 = "tosa.pad"(%arg0) {pad_const = dense<0.0> : tensor, padding = array} : (tensor) -> tensor + return %0 : tensor } // CHECK-LABEL: @pad_determine_val_i32 -func.func @pad_determine_val_i32(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { - // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor} - // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]]) - %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %arg1) : (tensor, tensor<2x2xi32>) -> tensor - return %1 : tensor +func.func @pad_determine_val_i32(%arg0: tensor) -> tensor { + // CHECK: "tosa.pad"(%arg0) {pad_const = dense<11> : tensor, padding = array} + %0 = "tosa.pad"(%arg0) {pad_const = dense<11> : tensor, padding = array} : (tensor) -> tensor + return %0 : tensor } // CHECK-LABEL: @pad_determine_val_f32 -func.func @pad_determine_val_f32(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { - // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor} - // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]]) - %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %arg1) : (tensor, tensor<2x2xi32>) -> tensor - return %1 : tensor +func.func @pad_determine_val_f32(%arg0: tensor) -> tensor { + // CHECK: "tosa.pad"(%arg0) {pad_const = dense<1.230000e+00> : tensor, padding = array} + %0 = "tosa.pad"(%arg0) {pad_const = dense<1.23> : tensor, padding = array} : (tensor) -> (tensor) + return %0 : tensor } // CHECK-LABEL: @pad_determine_val_quant func.func @pad_determine_val_quant(%arg0: tensor, %arg1 : tensor<2x2xi32>) -> tensor { - // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<42> : tensor} - // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]]) - %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32> - %1 = "tosa.pad"(%arg0, %arg1) {quantization_info = #tosa.pad_quant} : (tensor, tensor<2x2xi32>) -> tensor - return %1 : tensor + // CHECK: "tosa.pad"(%arg0) {pad_const = dense<42> : tensor, padding = array} + %0 = "tosa.pad"(%arg0) {padding = array, quantization_info = #tosa.pad_quant} : (tensor) -> tensor + return %0 : tensor } // CHECK-LABEL: @mul_one_different_shape diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -389,17 +389,9 @@ // ----- // CHECK-LABEL: pad -func.func @test_pad(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> { - %0 = "tosa.pad"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32> - return %0 : tensor<13x21x3xf32> -} - -// ----- -// CHECK-LABEL: pad_explicit_value -func.func @test_pad_explicit_value(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>) -> tensor<13x21x3xf32> { - %0 = "tosa.const"() {value = dense<3.14> : tensor} : () -> tensor - %1 = "tosa.pad"(%arg0, %arg1, %0) : (tensor<13x21x3xf32>, tensor<3x2xi32>, tensor) -> tensor<13x21x3xf32> - return %1 : tensor<13x21x3xf32> +func.func @test_pad(%arg0: tensor<5x3x5x3xf32>) -> tensor<5x4x6x3xf32> { + %0 = "tosa.pad"(%arg0) {pad_const = dense<3.14> : tensor, padding = array} : (tensor<5x3x5x3xf32>) -> tensor<5x4x6x3xf32> + return %0 : tensor<5x4x6x3xf32> } // ----- diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir @@ -58,9 +58,7 @@ // CHECK-LABEL: @conv2d_as_fully_connected_padded func.func @conv2d_as_fully_connected_padded(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x12x12x3xi32> { - // CHECK-DAG: %[[PAD_SHAPE:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>} - // CHECK-DAG: %[[PAD_VAL:.+]] = "tosa.const"() {value = dense<42> : tensor} - // CHECK-DAG: %[[PAD:.+]] = "tosa.pad"(%arg0, %[[PAD_SHAPE]], %[[PAD_VAL]]) : (tensor<4x10x10x2xi8>, tensor<4x2xi64>, tensor) -> tensor<4x12x12x2xi8> + // CHECK-DAG: %[[PAD:.+]] = "tosa.pad"(%arg0) {pad_const = dense<42> : tensor, padding = array} : (tensor<4x10x10x2xi8>) -> tensor<4x12x12x2xi8> // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = "tosa.reshape"(%[[PAD]]) {new_shape = array} // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = "tosa.reshape"(%arg1) {new_shape = array} // CHECK-DAG: %[[FULLY:.+]] = "tosa.fully_connected"(%[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2) {quantization_info = #tosa.conv_quant} @@ -68,3 +66,16 @@ %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array, quantization_info = #tosa.conv_quant} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x12x12x3xi32> return %0 : tensor<4x12x12x3xi32> } + +// ----- + +// CHECK-LABEL: @conv2d_as_fully_connected_padded_fp +func.func @conv2d_as_fully_connected_padded_fp(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<3x1x1x2xf32>, %arg2: tensor<3xf32>) -> tensor<4x13x13x3xf32> { + // CHECK-DAG: %[[PAD:.+]] = "tosa.pad"(%arg0) {pad_const = dense<0.000000e+00> : tensor, padding = array} : (tensor<4x10x10x2xf32>) -> tensor<4x13x13x2xf32> + // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = "tosa.reshape"(%[[PAD]]) {new_shape = array} + // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = "tosa.reshape"(%arg1) {new_shape = array} + // CHECK-DAG: %[[FULLY:.+]] = "tosa.fully_connected"(%[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2) + // CHECK: %[[RESHAPE:.+]] = "tosa.reshape"(%[[FULLY]]) {new_shape = array} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x13x13x3xf32> + return %0 : tensor<4x13x13x3xf32> +} diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir @@ -36,15 +36,14 @@ return %0 : tensor<4x10x10x6xi32> } + // ----- // CHECK-LABEL: @depthwise_conv2d_as_mul_padded func.func @depthwise_conv2d_as_mul_padded(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x12x12x6xf32> { - // CHECK: %[[pad:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0], [0, 0]]> : tensor<5x2xi64>} - // CHECK: %[[zero:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor} // CHECK: %[[reIn:.+]] = "tosa.reshape"(%arg0) {new_shape = array} - // CHECK: %[[padded:.+]] = "tosa.pad"(%[[reIn]], %[[pad]], %[[zero]]) : (tensor<4x10x10x2x1xf32>, tensor<5x2xi64>, tensor) -> tensor<4x12x12x2x1xf32> - // CHECK: %[[mul:.+]] = "tosa.mul"(%3, %arg1) {shift = 0 : i32} + // CHECK: %[[padded:.+]] = "tosa.pad"(%[[reIn]]) {pad_const = dense<0.000000e+00> : tensor, padding = array} : (tensor<4x10x10x2x1xf32>) -> tensor<4x12x12x2x1xf32> + // CHECK: %[[mul:.+]] = "tosa.mul"(%[[padded]], %arg1) {shift = 0 : i32} // CHECK: %[[reOut:.+]] = "tosa.reshape"(%[[mul]]) {new_shape = array} // CHECK: %[[add:.+]] = "tosa.add"(%[[reOut]], %arg2) %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32> diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir @@ -44,9 +44,8 @@ // CHECK-LABEL: @transpose_conv2d_strided func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor<5x3x5x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x?x?x5xf32> { // Manipulate the weight matrix to handle striding. - // CHECK-DAG: %[[PADV:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() {value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[PADW:.+]] = "tosa.pad"(%arg1, %[[PADV]]) + // CHECK-DAG: %[[PADW:.+]] = "tosa.pad"(%arg1) {pad_const = dense<0.000000e+00> : tensor, padding = array} : (tensor<5x3x5x3xf32>) -> tensor<5x4x6x3xf32> // CHECK-DAG: %[[RESW1:.+]] = "tosa.reshape"(%[[PADW]]) {new_shape = array} // CHECK-DAG: %[[TRANS:.+]] = "tosa.transpose"(%[[RESW1]], %[[TRANSV]]) // CHECK-DAG: %[[RESW2:.+]] = "tosa.reshape"(%[[TRANS]]) {new_shape = array} @@ -54,9 +53,8 @@ // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64} // Pad out the input matrix to handle the transpose conv. - // CHECK-DAG: %[[PAD:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANS2:.+]] = "tosa.const"() {value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]]) + // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0) {pad_const = dense<0.000000e+00> : tensor, padding = array} : (tensor<2x17x15x3xf32>) -> tensor<2x19x17x3xf32> // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor<30xf32>} @@ -76,9 +74,8 @@ // CHECK-LABEL: @transpose_conv2d_strided_quantized func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1: tensor<5x3x5x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x35x47x5xi32>) { // Manipulate the weight matrix to handle striding. - // CHECK-DAG: %[[PADV:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANSV:.+]] = "tosa.const"() {value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[PADW:.+]] = "tosa.pad"(%arg1, %[[PADV]]) {quantization_info = #tosa.pad_quant} + // CHECK-DAG: %[[PADW:.+]] = "tosa.pad"(%arg1) {pad_const = dense<0> : tensor, padding = array, quantization_info = #tosa.pad_quant} : (tensor<5x3x5x3xi8>) -> tensor<5x4x6x3xi8> // CHECK-DAG: %[[RESW1:.+]] = "tosa.reshape"(%[[PADW]]) {new_shape = array} // CHECK-DAG: %[[TRANS:.+]] = "tosa.transpose"(%[[RESW1]], %[[TRANSV]]) // CHECK-DAG: %[[RESW2:.+]] = "tosa.reshape"(%[[TRANS]]) {new_shape = array} @@ -86,9 +83,8 @@ // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64} // Pad out the input matrix to handle the transpose conv. - // CHECK-DAG: %[[PAD:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>} // CHECK-DAG: %[[TRANS2:.+]] = "tosa.const"() {value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]]) {quantization_info = #tosa.pad_quant} + // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0) {pad_const = dense<0> : tensor, padding = array, quantization_info = #tosa.pad_quant} : (tensor<2x17x15x3xi8>) -> tensor<2x19x17x3xi8> // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() {value = dense<0> : tensor<30xi32>} @@ -106,27 +102,21 @@ // CHECK-LABEL: @transpose_conv2d_strided_overpad func.func @transpose_conv2d_strided_overpad(%arg0 : tensor<1x16x1x1xi8>, %arg1 : tensor<1x2x1x1xi8>, %arg2 : tensor<1xi32>) -> (tensor<1x19x2x1xi32>) { - // CHECK: %[[WEIGHT_PAD:.+]] = "tosa.const"() - // CHECK-SAME{literal}: value = dense<[[0, 0], [0, 0], [0, 1], [0, 0]]> : tensor<4x2xi32> // CHECK: %[[WEIGHT_PERMS:.+]] = "tosa.const"() {value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} : () -> tensor<6xi32> - // CHECK: %[[INPUT_PAD:.+]] = "tosa.const"() - // CHECK-SAME{literal}: value = dense<[[0, 0], [1, 1], [0, 0], [0, 0]]> : tensor<4x2xi32>} // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor<2xi32>} : () -> tensor<2xi32> // CHECK: %[[RESULT_PERMS:.+]] = "tosa.const"() {value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>} - // CHECK: %[[RESULT_PAD:.+]] = "tosa.const"() - // CHECK-SAME{literal}: value = dense<[[0, 0], [2, 0], [0, 0], [0, 0]]> : tensor<4x2xi32>} - // CHECK: %[[PAD_WEIGHT:.+]] = "tosa.pad"(%arg1, %[[WEIGHT_PAD]]) {quantization_info = #tosa.pad_quant} + // CHECK: %[[PAD_WEIGHT:.+]] = "tosa.pad"(%arg1) {pad_const = dense<0> : tensor, padding = array, quantization_info = #tosa.pad_quant} : (tensor<1x2x1x1xi8>) -> tensor<1x2x2x1xi8> // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = "tosa.reshape"(%[[PAD_WEIGHT]]) {new_shape = array} // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = "tosa.transpose"(%[[RESHAPE_WEIGHT_0]], %[[WEIGHT_PERMS]]) // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_WEIGHT]]) {new_shape = array} // CHECK: %[[REVERSE:.+]] = "tosa.reverse"(%[[RESHAPE_WEIGHT_1]]) {axis = 1 : i64} - // CHECK: %[[PAD_INPUT:.+]] = "tosa.pad"(%arg0, %[[INPUT_PAD]]) {quantization_info = #tosa.pad_quant} + // CHECK: %[[PAD_INPUT:.+]] = "tosa.pad"(%arg0) {pad_const = dense<0> : tensor, padding = array, quantization_info = #tosa.pad_quant} : (tensor<1x16x1x1xi8>) -> tensor<1x18x1x1xi8> // CHECK: %[[CONV:.+]] = "tosa.conv2d"(%[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]]) // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant, stride = [1, 1]} // CHECK: %[[RESHAPE_RESULT_0:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = array} // CHECK: %[[TRANSPOSE_RESULT:.+]] = "tosa.transpose"(%[[RESHAPE_RESULT_0]], %[[RESULT_PERMS]]) // CHECK: %[[RESHAPE_RESULT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_RESULT]]) {new_shape = array} - // CHECK: %[[PAD_RESULT:.+]] = "tosa.pad"(%[[RESHAPE_RESULT_1]], %[[RESULT_PAD]]) + // CHECK: %[[PAD_RESULT:.+]] = "tosa.pad"(%[[RESHAPE_RESULT_1]]) {pad_const = dense<0> : tensor, padding = array} : (tensor<1x17x2x1xi32>) -> tensor<1x19x2x1xi32> // CHECK: %[[ADD:.+]] = "tosa.add"(%[[PAD_RESULT]], %arg2) %2 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) { out_pad = array, diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -502,9 +502,9 @@ // ----- // CHECK-LABEL: @test_padding_no_const -func.func @test_padding_no_const(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xi32>) -> () { - // CHECK: "tosa.pad"(%arg0, %arg1) : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor - %0 = "tosa.pad"(%arg0, %arg1) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) +func.func @test_padding_no_const(%arg0 : tensor<1x2xf32>) -> () { + // CHECK: "tosa.pad"(%arg0) {padding = array} : (tensor<1x2xf32>) -> tensor<1x3xf32> + %0 = "tosa.pad"(%arg0) {padding = array} : (tensor<1x2xf32>) -> (tensor) return } @@ -512,9 +512,8 @@ // CHECK-LABEL:@test_padding_dynamic_input func.func @test_padding_dynamic_input(%arg0 : tensor<1x?xf32>) -> () { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - // CHECK: "tosa.pad"(%arg0, %cst) : (tensor<1x?xf32>, tensor<2x2xi32>) -> tensor<4x?xf32> - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x?xf32>, tensor<2x2xi32>) -> (tensor) + // CHECK: "tosa.pad"(%arg0) {pad_const = dense<1.000000e+00> : tensor, padding = array} : (tensor<1x?xf32>) -> tensor<2x?xf32> + %0 = "tosa.pad"(%arg0) {pad_const = dense<1.0> : tensor, padding = array} : (tensor<1x?xf32>) -> (tensor) return } @@ -522,9 +521,8 @@ // CHECK-LABEL: @test_padding_simple func.func @test_padding_simple(%arg0 : tensor<1x2xf32>) -> () { - %0 = arith.constant dense<[[1, 2], [3, 4]]> : tensor<2x2xi32> - // CHECK: "tosa.pad"(%arg0, %cst) : (tensor<1x2xf32>, tensor<2x2xi32>) -> tensor<4x9xf32> - %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor) + // CHECK: "tosa.pad"(%arg0) {pad_const = dense<0> : tensor, padding = array} : (tensor<1x2xf32>) -> tensor<2x4xf32> + %0 = "tosa.pad"(%arg0) {pad_const = dense<0> : tensor, padding = array} : (tensor<1x2xf32>) -> tensor return }