diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td @@ -115,7 +115,8 @@ // bitwidth of the output given the bit width of the input & weight content. def Tosa_ConvOpQuantInfoBuilder : OpBuilder< (ins "Type":$outputType, "Value":$input, "Value":$weight, "Value":$bias, - "ArrayAttr":$pad, "ArrayAttr":$stride, "ArrayAttr":$dilation), + "DimensionListAttr":$pad, "DimensionListAttr":$stride, + "DimensionListAttr":$dilation), [{ buildConvOpWithQuantInfo($_builder, $_state, outputType, input, weight, bias, @@ -125,8 +126,8 @@ // Handles tosa.transpose_conv2d which has an outpad and output shape attribute. def Tosa_TransConvOpQuantInfoBuilder : OpBuilder< (ins "Type":$outputType, "Value":$input, "Value":$weight, "Value":$bias, - "ArrayAttr":$outpad, "ArrayAttr":$stride, "ArrayAttr":$dilation, - "ArrayAttr":$outputShape), + "DimensionListAttr":$outpad, "DimensionListAttr":$stride, + "DimensionListAttr":$dilation, "DimensionListAttr":$outputShape), [{ buildTransConvOpWithQuantInfo($_builder, $_state, outputType, input, weight, bias, @@ -158,8 +159,8 @@ // UnaruOpQuantizationAttr but the avg_pool operator has its own builder as it // has additional parameters not part of the unary ops. def Tosa_AvgPool2dOpQuantInfoBuilder : OpBuilder< - (ins "Type":$outputType, "Value":$input, "ArrayAttr":$kernel, - "ArrayAttr":$stride, "ArrayAttr":$pad), + (ins "Type":$outputType, "Value":$input, "DimensionListAttr":$kernel, + "DimensionListAttr":$stride, "DimensionListAttr":$pad), [{ buildAvgPool2dOpWithQuantInfo($_builder, $_state, outputType, input, kernel, stride, pad); diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td @@ -14,6 +14,8 @@ #define TOSA_TYPES_BASE include "mlir/IR/OpBase.td" +include "mlir/IR/BuiltinAttributes.td" + //===----------------------------------------------------------------------===// // Tosa Type Definitions. @@ -162,9 +164,6 @@ //===----------------------------------------------------------------------===// // Attribute predicates and classes. //===----------------------------------------------------------------------===// -class ArrayMaxCt : AttrConstraint< - CPred<"$_self.cast<::mlir::ArrayAttr>().size() <= " # n>, - "with at least " # n # " elements">; def Tosa_Fp32ArrayAttr2 : Confined]>; def Tosa_Fp32ArrayAttr3 : Confined]>; @@ -172,15 +171,15 @@ def Tosa_Fp32ArrayAttr5 : Confined]>; def Tosa_Fp32ArrayAttr6 : Confined]>; -def Tosa_IntArrayAttr2 : Confined]>; -def Tosa_IntArrayAttr3 : Confined]>; -def Tosa_IntArrayAttr4 : Confined]>; -def Tosa_IntArrayAttr5 : Confined]>; -def Tosa_IntArrayAttr6 : Confined]>; +def Tosa_IntArrayAttr2 : DimensionListAttrOfSize<2>; +def Tosa_IntArrayAttr3 : DimensionListAttrOfSize<3>; +def Tosa_IntArrayAttr4 : DimensionListAttrOfSize<4>; +def Tosa_IntArrayAttr5 : DimensionListAttrOfSize<5>; +def Tosa_IntArrayAttr6 : DimensionListAttrOfSize<6>; -def Tosa_IntArrayAttrUpto2 : Confined]>; -def Tosa_IntArrayAttrUpto4 : Confined]>; -def Tosa_IntArrayAttrUpto5 : Confined]>; +def Tosa_IntArrayAttrUpto2 : DimensionListAttrOfMaxSize<2>; +def Tosa_IntArrayAttrUpto4 : DimensionListAttrOfMaxSize<4>; +def Tosa_IntArrayAttrUpto5 : DimensionListAttrOfMaxSize<5>; //===----------------------------------------------------------------------===// // Iterable attributes. diff --git a/mlir/include/mlir/IR/AttributeSupport.h b/mlir/include/mlir/IR/AttributeSupport.h --- a/mlir/include/mlir/IR/AttributeSupport.h +++ b/mlir/include/mlir/IR/AttributeSupport.h @@ -254,17 +254,17 @@ namespace AttributeTrait { /// This trait provides an "ArrayRef-like" behavior by defining all the methods -/// from ArrayRef. This expects the attribute to define an `arr()` accessor for -/// the underlying ArrayRef. +/// from ArrayRef. This expects the attribute to define an `getArray()` accessor +/// for the underlying ArrayRef. template struct ArrayRefAttr { template class Impl { - ConcreteType *crtp() { return static_cast(this)->arr(); } + ArrayRef crtp() const { + return static_cast(this)->getArray(); + } public: - // using T = typename - // decltype(std::declval().arr())::value_type; using value_type = T; using pointer = value_type *; using const_pointer = const value_type *; @@ -279,41 +279,41 @@ operator ArrayRef() { return crtp(); } - iterator begin() const { return crtp()->begin(); } - iterator end() const { return crtp()->end(); } - reverse_iterator rbegin() const { return crtp()->rbegin(); } - reverse_iterator rend() const { return crtp()->rend(); } - bool empty() const { return crtp()->empty(); } - const T *data() const { return crtp()->data(); } - size_t size() const { return crtp()->size(); } - const T &front() const { return crtp()->front(); } - const T &back() const { return crtp()->back(); } + iterator begin() const { return crtp().begin(); } + iterator end() const { return crtp().end(); } + reverse_iterator rbegin() const { return crtp().rbegin(); } + reverse_iterator rend() const { return crtp().rend(); } + bool empty() const { return crtp().empty(); } + const T *data() const { return crtp().data(); } + size_t size() const { return crtp().size(); } + const T &front() const { return crtp().front(); } + const T &back() const { return crtp().back(); } template ArrayRef copy(Allocator &A) { - return crtp()->copy(A); + return crtp().copy(A); } - bool equals(ArrayRefAttr RHS) const { return crtp()->equals(RHS->crtp()); } - ArrayRef slice(size_t N, size_t M) const { return crtp()->slice(N, M); } - ArrayRef slice(size_t N) const { return crtp()->slice(N); } - ArrayRef drop_front(size_t N = 1) const { return crtp()->drop_front(N); } - ArrayRef drop_back(size_t N = 1) const { return crtp()->drop_back(N); } + bool equals(ArrayRefAttr RHS) const { return crtp().equals(RHS->crtp()); } + ArrayRef slice(size_t N, size_t M) const { return crtp().slice(N, M); } + ArrayRef slice(size_t N) const { return crtp().slice(N); } + ArrayRef drop_front(size_t N = 1) const { return crtp().drop_front(N); } + ArrayRef drop_back(size_t N = 1) const { return crtp().drop_back(N); } template ArrayRef drop_while(PredicateT Pred) const { - return crtp()->drop_while(Pred); + return crtp().drop_while(Pred); } template ArrayRef drop_until(PredicateT Pred) const { - return crtp()->drop_until(Pred); + return crtp().drop_until(Pred); } - ArrayRef take_front(size_t N = 1) const { return crtp()->take_front(N); } - ArrayRef take_back(size_t N = 1) const { return crtp()->take_back(N); } + ArrayRef take_front(size_t N = 1) const { return crtp().take_front(N); } + ArrayRef take_back(size_t N = 1) const { return crtp().take_back(N); } template ArrayRef take_while(PredicateT Pred) const { - return crtp()->take_while(Pred); + return crtp().take_while(Pred); } template ArrayRef take_until(PredicateT Pred) const { - return crtp()->take_until(Pred); + return crtp().take_until(Pred); } const T &operator[](size_t Index) const { return (*crtp())[Index]; } }; diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -1466,9 +1466,8 @@ yOffset = rewriter.create(loc, op.offset_fp()[0]); xOffset = rewriter.create(loc, op.offset_fp()[1]); } else { - SmallVector stride, offset; - getValuesFromIntArrayAttribute(op.stride(), stride); - getValuesFromIntArrayAttribute(op.offset(), offset); + ArrayRef stride = op.stride(); + ArrayRef offset = op.offset(); yStride = rewriter.create( loc, rewriter.getI32IntegerAttr(stride[0])); @@ -1649,28 +1648,27 @@ dy = rewriter.create(loc, resultElementTy, dy); } - auto unitVal = rewriter.create( - loc, rewriter.getIntegerAttr(resultElementTy, 1 << shift)); - Value rightPart = dx; - Value leftPart = rewriter.create(loc, unitVal, dx); + auto unitVal = rewriter.create( + loc, rewriter.getIntegerAttr(resultElementTy, 1 << shift)); + Value rightPart = dx; + Value leftPart = rewriter.create(loc, unitVal, dx); - y0x0 = rewriter.create(loc, y0x0, leftPart); - y0x1 = rewriter.create(loc, y0x1, rightPart); - Value topAcc = rewriter.create(loc, y0x0, y0x1); + y0x0 = rewriter.create(loc, y0x0, leftPart); + y0x1 = rewriter.create(loc, y0x1, rightPart); + Value topAcc = rewriter.create(loc, y0x0, y0x1); - y1x0 = rewriter.create(loc, y1x0, leftPart); - y1x1 = rewriter.create(loc, y1x1, rightPart); - Value bottomAcc = rewriter.create(loc, y1x0, y1x1); + y1x0 = rewriter.create(loc, y1x0, leftPart); + y1x1 = rewriter.create(loc, y1x1, rightPart); + Value bottomAcc = rewriter.create(loc, y1x0, y1x1); - Value bottomPart = dy; - Value topPart = rewriter.create(loc, unitVal, dy); - topAcc = rewriter.create(loc, topAcc, topPart); - bottomAcc = - rewriter.create(loc, bottomAcc, bottomPart); - Value result = rewriter.create(loc, topAcc, bottomAcc); + Value bottomPart = dy; + Value topPart = rewriter.create(loc, unitVal, dy); + topAcc = rewriter.create(loc, topAcc, topPart); + bottomAcc = rewriter.create(loc, bottomAcc, bottomPart); + Value result = rewriter.create(loc, topAcc, bottomAcc); - rewriter.create(loc, result); - return success(); + rewriter.create(loc, result); + return success(); } return failure(); diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp @@ -111,11 +111,6 @@ Type inputETy = inputTy.getElementType(); Type resultETy = resultTy.getElementType(); - auto padAttr = op->getAttr("pad").cast(); - auto strideTosaAttr = op->getAttr("stride").cast(); - auto dilationTosaAttr = op->getAttr("dilation").cast(); - bool isQuantized = op->hasAttr("quantization_info"); - if (!inputTy.hasStaticShape() || !weightTy.hasStaticShape() || !biasTy.hasStaticShape() || !resultTy.hasStaticShape()) return rewriter.notifyMatchFailure(op, @@ -129,6 +124,7 @@ // Apply padding as necessary. Attribute zeroAttr = rewriter.getZeroAttr(inputETy); + bool isQuantized = op->hasAttr("quantization_info"); if (isQuantized) { auto quantizationInfo = op->getAttr("quantization_info").cast(); @@ -147,10 +143,10 @@ zeroAttr = rewriter.getIntegerAttr(inputETy, iZp); } - llvm::SmallVector pad; pad.resize(2, 0); - getValuesFromIntArrayAttribute(padAttr, pad); + ArrayRef originalPad = op.pad(); + pad.append(originalPad.begin(), originalPad.end()); pad.resize(pad.size() + 2, 0); input = applyPad(loc, input, pad, zeroAttr, rewriter); @@ -178,9 +174,8 @@ rewriter.create(loc, zero, initTensor).getResult(0); // Extract the attributes for convolution. - llvm::SmallVector stride, dilation; - getValuesFromIntArrayAttribute(strideTosaAttr, stride); - getValuesFromIntArrayAttribute(dilationTosaAttr, dilation); + ArrayRef stride = op.stride(); + ArrayRef dilation = op.dilation(); // Create the convolution op. auto strideAttr = DenseIntElementsAttr::get( @@ -276,10 +271,6 @@ Type inputETy = inputTy.getElementType(); Type resultETy = resultTy.getElementType(); - auto padAttr = op->getAttr("pad").cast(); - auto strideTosaAttr = op->getAttr("stride").cast(); - auto dilationTosaAttr = op->getAttr("dilation").cast(); - bool isQuantized = op->hasAttr("quantization_info"); IntegerAttr iZp; IntegerAttr kZp; @@ -324,15 +315,15 @@ llvm::SmallVector pad; pad.resize(2, 0); - getValuesFromIntArrayAttribute(padAttr, pad); + ArrayRef originalPad = op.pad(); + pad.append(originalPad.begin(), originalPad.end()); pad.resize(pad.size() + 2, 0); input = applyPad(loc, input, pad, zeroAttr, rewriter); // Extract the attributes for convolution. - llvm::SmallVector stride, dilation; - getValuesFromIntArrayAttribute(strideTosaAttr, stride); - getValuesFromIntArrayAttribute(dilationTosaAttr, dilation); + ArrayRef stride = op.stride(); + ArrayRef dilation = op.dilation(); // Create the convolution op. auto strideAttr = DenseIntElementsAttr::get( @@ -634,15 +625,15 @@ // Apply padding as necessary. llvm::SmallVector pad; pad.resize(2, 0); - getValuesFromIntArrayAttribute(op.pad(), pad); + ArrayRef originalPad = op.pad(); + pad.append(originalPad.begin(), originalPad.end()); pad.resize(pad.size() + 2, 0); Value paddedInput = applyPad(loc, input, pad, initialAttr, rewriter); Value initialValue = rewriter.create(loc, initialAttr); - SmallVector kernel, stride; - getValuesFromIntArrayAttribute(op.kernel(), kernel); - getValuesFromIntArrayAttribute(op.stride(), stride); + ArrayRef kernel = op.kernel(); + ArrayRef stride = op.stride(); Attribute strideAttr = rewriter.getI64VectorAttr(stride); Attribute dilationAttr = rewriter.getI64VectorAttr({1, 1}); @@ -688,7 +679,8 @@ // Apply padding as necessary. llvm::SmallVector pad; pad.resize(2, 0); - getValuesFromIntArrayAttribute(op.pad(), pad); + ArrayRef originalPad = op.pad(); + pad.append(originalPad.begin(), originalPad.end()); pad.resize(pad.size() + 2, 0); Attribute padAttr = rewriter.getZeroAttr(inElementTy); Value paddedInput = applyPad(loc, input, pad, padAttr, rewriter); @@ -696,9 +688,8 @@ Attribute initialAttr = rewriter.getZeroAttr(accETy); Value initialValue = rewriter.create(loc, initialAttr); - SmallVector kernel, stride; - getValuesFromIntArrayAttribute(op.kernel(), kernel); - getValuesFromIntArrayAttribute(op.stride(), stride); + ArrayRef kernel = op.kernel(); + ArrayRef stride = op.stride(); Attribute strideAttr = rewriter.getI64VectorAttr(stride); Attribute dilationAttr = rewriter.getI64VectorAttr({1, 1}); diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -627,8 +627,9 @@ /// bitwidth of the output given the bit width of the input & weight content. static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, - Value bias, ArrayAttr pad, - ArrayAttr stride, ArrayAttr dilation) { + Value bias, DimensionListAttr pad, + DimensionListAttr stride, + DimensionListAttr dilation) { result.addOperands({input, weight, bias}); result.addAttribute("pad", pad); @@ -646,11 +647,11 @@ } /// Handles tosa.transpose_conv2d which has outpad and output shape attributes. -static void -buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, - Type outputType, Value input, Value weight, - Value bias, ArrayAttr outpad, ArrayAttr stride, - ArrayAttr dilation, ArrayAttr outputShape) { +static void buildTransConvOpWithQuantInfo( + OpBuilder &builder, OperationState &result, Type outputType, Value input, + Value weight, Value bias, DimensionListAttr outpad, + DimensionListAttr stride, DimensionListAttr dilation, + DimensionListAttr outputShape) { result.addOperands({input, weight, bias}); result.addAttribute("out_pad", outpad); result.addAttribute("stride", stride); @@ -724,11 +725,9 @@ /// Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr /// but avg_pool operator has its own builder as it has additional parameters /// not part of the unary ops. -static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, - OperationState &result, - Type outputType, Value input, - ArrayAttr kernel, ArrayAttr stride, - ArrayAttr pad) { +static void buildAvgPool2dOpWithQuantInfo( + OpBuilder &builder, OperationState &result, Type outputType, Value input, + DimensionListAttr kernel, DimensionListAttr stride, DimensionListAttr pad) { result.addOperands(input); result.addAttribute("kernel", kernel); result.addAttribute("stride", stride); @@ -1207,18 +1206,15 @@ } int32_t shift = adaptor.shift().getValue().getSExtValue(); - llvm::SmallVector newShape; - getI64Values(adaptor.output_size(), newShape); - outputShape[1] = newShape[0]; - outputShape[2] = newShape[1]; + ArrayRef outputSize = adaptor.output_size(); + outputShape[1] = outputSize[0]; + outputShape[2] = outputSize[1]; - llvm::SmallVector strideInt; - llvm::SmallVector offsetInt; + ArrayRef strideInt = adaptor.stride(); + ArrayRef offsetInt = adaptor.offset(); llvm::SmallVector strideFp; llvm::SmallVector offsetFp; - getI64Values(adaptor.offset(), offsetInt); getF64Values(adaptor.offset_fp(), offsetFp); - getI64Values(adaptor.stride(), strideInt); getF64Values(adaptor.stride_fp(), strideFp); // If we have a 0 zero in integers we know that the resize indexing needs to @@ -1442,13 +1438,9 @@ int32_t height = inputShape.getDimSize(1); int32_t width = inputShape.getDimSize(2); - llvm::SmallVector kernel; - llvm::SmallVector stride; - llvm::SmallVector pad; - - getI64Values(attributes.get("kernel").cast(), kernel); - getI64Values(attributes.get("stride").cast(), stride); - getI64Values(attributes.get("pad").cast(), pad); + ArrayRef kernel = attributes.get("kernel").cast(); + ArrayRef stride = attributes.get("stride").cast(); + ArrayRef pad = attributes.get("pad").cast(); if (height != -1) { int32_t padded = height + pad[0] + pad[1] - kernel[0]; @@ -1501,13 +1493,9 @@ : outputShape[3]; } - llvm::SmallVector dilation; - llvm::SmallVector padding; - llvm::SmallVector stride; - - getI64Values(adaptor.dilation(), dilation); - getI64Values(adaptor.pad(), padding); - getI64Values(adaptor.stride(), stride); + ArrayRef dilation = adaptor.dilation(); + ArrayRef padding = adaptor.pad(); + ArrayRef stride = adaptor.stride(); if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { @@ -1569,13 +1557,9 @@ (outputShape[4] == -1) ? biasShape.getDimSize(0) : outputShape[4]; } - llvm::SmallVector dilation; - llvm::SmallVector padding; - llvm::SmallVector stride; - - getI64Values(adaptor.dilation(), dilation); - getI64Values(adaptor.pad(), padding); - getI64Values(adaptor.stride(), stride); + ArrayRef dilation = adaptor.dilation(); + ArrayRef padding = adaptor.pad(); + ArrayRef stride = adaptor.stride(); if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { @@ -1669,13 +1653,9 @@ : outputShape[3]; } - llvm::SmallVector dilation; - llvm::SmallVector padding; - llvm::SmallVector stride; - - getI64Values(adaptor.dilation(), dilation); - getI64Values(adaptor.pad(), padding); - getI64Values(adaptor.stride(), stride); + ArrayRef dilation = adaptor.dilation(); + ArrayRef padding = adaptor.pad(); + ArrayRef stride = adaptor.stride(); if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { @@ -1702,8 +1682,7 @@ ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes); - llvm::SmallVector outputShape; - getI64Values(adaptor.out_shape(), outputShape); + SmallVector outputShape = llvm::to_vector(adaptor.out_shape()); int32_t inputWidth = ShapedType::kDynamicSize; int32_t inputHeight = ShapedType::kDynamicSize; @@ -1738,13 +1717,9 @@ : outputShape[3]; } - llvm::SmallVector dilation; - llvm::SmallVector padding; - llvm::SmallVector stride; - - getI64Values(adaptor.dilation(), dilation); - getI64Values(adaptor.out_pad(), padding); - getI64Values(adaptor.stride(), stride); + ArrayRef dilation = adaptor.dilation(); + ArrayRef padding = adaptor.out_pad(); + ArrayRef stride = adaptor.stride(); if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp @@ -24,14 +24,6 @@ namespace { -template -static void getValuesFromIntArrayAttribute(ArrayAttr attr, - SmallVector &arrayValues) { - for (Attribute val : attr.getValue()) { - arrayValues.push_back(val.cast().getValue().getSExtValue()); - } -} - template TosaOp createOpAndInfer(PatternRewriter &rewriter, Location loc, Type resultTy, Args &&...args) { @@ -94,13 +86,9 @@ ShapedType biasTy = bias.getType().cast(); ShapedType resultTy = op->getResult(0).getType().cast(); - llvm::SmallVector pad; - llvm::SmallVector stride; - llvm::SmallVector dilation; - - getValuesFromIntArrayAttribute(op.out_pad().cast(), pad); - getValuesFromIntArrayAttribute(op.stride().cast(), stride); - getValuesFromIntArrayAttribute(op.dilation().cast(), dilation); + ArrayRef pad = op.out_pad(); + ArrayRef stride = op.stride(); + ArrayRef dilation = op.dilation(); // If striding is all 1 we can modify padding and reverse the kernel along // the x/y direction to make it a regular convolution. This is much simpler @@ -132,14 +120,13 @@ if (op.quantization_info().hasValue()) { conv2d = rewriter.create( loc, resultTy, input, reverse2, bias, - rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride), - rewriter.getI64ArrayAttr(dilation), - op.quantization_info().getValue()); + rewriter.getDimListAttr(convPad), rewriter.getDimListAttr(stride), + rewriter.getDimListAttr(dilation), op.quantization_info().getValue()); } else { conv2d = rewriter.create( loc, resultTy, input, reverse2, bias, - rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride), - rewriter.getI64ArrayAttr(dilation)); + rewriter.getDimListAttr(convPad), rewriter.getDimListAttr(stride), + rewriter.getDimListAttr(dilation)); } rewriter.replaceOp(op, conv2d); @@ -168,13 +155,9 @@ Type biasETy = biasTy.getElementType(); Type resultETy = resultTy.getElementType(); - llvm::SmallVector pad; - llvm::SmallVector stride; - llvm::SmallVector dilation; - - getValuesFromIntArrayAttribute(op.out_pad().cast(), pad); - getValuesFromIntArrayAttribute(op.stride().cast(), stride); - getValuesFromIntArrayAttribute(op.dilation().cast(), dilation); + ArrayRef pad = op.out_pad(); + ArrayRef stride = op.stride(); + ArrayRef dilation = op.dilation(); // If striding is all 1 we can modify padding and reverse the kernel along // the x/y direction to make it a regular convolution. This is much simpler @@ -302,18 +285,18 @@ conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), input, weight, zeroBias, - /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}), - /*stride=*/rewriter.getI64ArrayAttr({1, 1}), - /*dilation=*/rewriter.getI64ArrayAttr({1, 1}), + /*pad=*/rewriter.getDimListAttr({0, 0, 0, 0}), + /*stride=*/rewriter.getDimListAttr({1, 1}), + /*dilation=*/rewriter.getDimListAttr({1, 1}), op.quantization_info().getValue()) .getResult(); } else { conv2d = createOpAndInfer( rewriter, loc, UnrankedTensorType::get(resultETy), input, weight, zeroBias, - /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}), - /*stride=*/rewriter.getI64ArrayAttr({1, 1}), - /*dilation=*/rewriter.getI64ArrayAttr({1, 1})) + /*pad=*/rewriter.getDimListAttr({0, 0, 0, 0}), + /*stride=*/rewriter.getDimListAttr({1, 1}), + /*dilation=*/rewriter.getDimListAttr({1, 1})) .getResult(); } diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaOptimization.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaOptimization.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaOptimization.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaOptimization.cpp @@ -51,11 +51,8 @@ } // Stride must be 1 for this optimization. - for (Attribute stride : op.stride().getValue()) { - if (!stride.cast().getValue().isOne()) { - return failure(); - } - } + if (!llvm::all_of(op.stride(), [](int64_t dim) { return dim == 1; })) + return failure(); // Only works for a 1x1 kernel. ArrayRef weightShape = weightType.getShape(); @@ -145,11 +142,8 @@ } // Stride must be 1 for this optimization. - for (Attribute stride : op.stride().getValue()) { - if (!stride.cast().getValue().isOne()) { - return failure(); - } - } + if (!llvm::all_of(op.stride(), [](int64_t dim) { return dim == 1; })) + return failure(); // Only works for a 1x1 kernel. ArrayRef weightShape = weightType.getShape(); diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp --- a/mlir/lib/IR/AsmPrinter.cpp +++ b/mlir/lib/IR/AsmPrinter.cpp @@ -1907,7 +1907,7 @@ } void AsmPrinter::Impl::printDimensionListAttr(DimensionListAttr attr) { - llvm::interleaveComma(attr.getDims(), os); + llvm::interleaveComma(attr, os); } void AsmPrinter::Impl::printDenseStringElementsAttr( diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -227,6 +227,10 @@ return getArrayAttr(attrs); } +DimensionListAttr Builder::getDimListAttr(ArrayRef values) { + return DimensionListAttr::get(getContext(), values); +} + ArrayAttr Builder::getIndexArrayAttr(ArrayRef values) { auto attrs = llvm::to_vector<8>( llvm::map_range(values, [this](int64_t v) -> Attribute { diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -146,7 +146,7 @@ // CHECK-DAG: [[FILL:%.+]] = linalg.fill([[CONST]], [[INIT]]) // CHECK-DAG: [[KERNEL:%.+]] = linalg.init_tensor [3, 3] // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg0, [[KERNEL]] : tensor<1x6x34x62xf32>, tensor<3x3xf32>) outs([[FILL]] : tensor<1x4x32x62xf32>) - %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xf32>) -> (tensor<1x4x32x62xf32>) + %0 = "tosa.max_pool2d"(%arg0) {pad = dims<0, 0, 0, 0>, kernel = dims<3, 3>, stride = dims<1, 1>} : (tensor<1x6x34x62xf32>) -> (tensor<1x4x32x62xf32>) return } @@ -160,7 +160,7 @@ // CHECK-DAG: [[FILL:%.+]] = linalg.fill([[INITVAL]], [[INIT]]) // CHECK-DAG: [[KERNEL:%.+]] = linalg.init_tensor [3, 3] // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins([[PAD]], [[KERNEL]] : tensor<1x6x35x62xf32>, tensor<3x3xf32>) outs([[FILL]] : tensor<1x4x33x62xf32>) - %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 1], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xf32>) -> (tensor<1x4x33x62xf32>) + %0 = "tosa.max_pool2d"(%arg0) {pad = dims<0, 0, 0, 1>, kernel = dims<3, 3>, stride = dims<1, 1>} : (tensor<1x6x34x62xf32>) -> (tensor<1x4x33x62xf32>) return } @@ -168,7 +168,7 @@ func @max_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> () { // CHECK: arith.constant -128 // CHECK: linalg.pooling_nhwc_max - %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xi8>) -> (tensor<1x4x32x62xi8>) + %0 = "tosa.max_pool2d"(%arg0) {pad = dims<0, 0, 0, 0>, kernel = dims<3, 3>, stride = dims<1, 1>} : (tensor<1x6x34x62xi8>) -> (tensor<1x4x32x62xi8>) return } @@ -176,7 +176,7 @@ func @max_pool_i16(%arg0: tensor<1x6x34x62xi16>) -> () { // CHECK: arith.constant -32768 // CHECK: linalg.pooling_nhwc_max - %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xi16>) -> (tensor<1x4x32x62xi16>) + %0 = "tosa.max_pool2d"(%arg0) {pad = dims<0, 0, 0, 0>, kernel = dims<3, 3>, stride = dims<1, 1>} : (tensor<1x6x34x62xi16>) -> (tensor<1x4x32x62xi16>) return } @@ -184,7 +184,7 @@ func @max_pool_i32(%arg0: tensor<1x6x34x62xi32>) -> () { // CHECK: arith.constant -2147483648 // CHECK: linalg.pooling_nhwc_max - %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor<1x6x34x62xi32>) -> (tensor<1x4x32x62xi32>) + %0 = "tosa.max_pool2d"(%arg0) {pad = dims<0, 0, 0, 0>, kernel = dims<3, 3>, stride = dims<1, 1>} : (tensor<1x6x34x62xi32>) -> (tensor<1x4x32x62xi32>) return } // ----- @@ -244,7 +244,7 @@ // CHECK: [[CF:%.+]] = arith.sitofp [[CI]] // CHECK: [[RESULT:%.+]] = arith.divf %arg1, [[CF]] // CHECK: linalg.yield [[RESULT]] - %0 = "tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = [4, 4], stride = [1, 1]} : (tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) + %0 = "tosa.avg_pool2d"(%arg0) {pad = dims<1, 1, 1, 1>, kernel = dims<4, 4>, stride = dims<1, 1>} : (tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) return %0 : tensor<1x5x33x62xf32> } @@ -273,7 +273,7 @@ // CHECK: %[[CLMP_MAX:.+]] = select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]] // CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]] // CHECK: linalg.yield %[[TRUNC]] - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi8> + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<4, 4>, pad = dims<0, 0, 0, 0>, quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = dims<4, 4>} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi8> return } @@ -302,7 +302,7 @@ // CHECK: %[[CLMP_MAX:.+]] = select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]] // CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]] // CHECK: linalg.yield %[[TRUNC]] - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi16>) -> tensor<1x32x32x2xi16> + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<4, 4>, pad = dims<0, 0, 0, 0>, quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = dims<4, 4>} : (tensor<1x128x128x2xi16>) -> tensor<1x32x32x2xi16> return } @@ -323,7 +323,7 @@ // CHECK: %[[B:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[CONV]] : tensor<28xf32>, tensor<1x45x40x28xf32>) outs(%[[B_IN]] : tensor<1x45x40x28xf32>) // CHECK: arith.addf // CHECK: linalg.yield - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [2, 1]} : (tensor<1x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>) -> (tensor<1x45x40x28xf32>) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<2, 1>} : (tensor<1x49x42x27xf32>, tensor<28x3x3x27xf32>, tensor<28xf32>) -> (tensor<1x45x40x28xf32>) return } @@ -335,7 +335,7 @@ // CHECK: linalg.pad_tensor %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0] // CHECK: linalg.yield %[[C0]] // CHECK: linalg.conv_2d_nhwc_hwcf - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [1, 1, 1, 1], stride = [1, 1], dilation = [2, 1]} : (tensor<1x47x40x28xf32>, tensor<28x3x3x28xf32>, tensor<28xf32>) -> (tensor<1x45x40x28xf32>) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<1, 1, 1, 1>, stride = dims<1, 1>, dilation = dims<2, 1>} : (tensor<1x47x40x28xf32>, tensor<28x3x3x28xf32>, tensor<28xf32>) -> (tensor<1x45x40x28xf32>) return } @@ -347,7 +347,7 @@ // CHECK: linalg.pad_tensor %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0] // CHECK: linalg.yield %[[C22]] // CHECK: linalg.conv_2d_nhwc_hwcf_q - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [1, 1, 1, 1], quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, stride = [1, 1]} : (tensor<1x12x12x1xi8>, tensor<1024x3x3x1xi8>, tensor<1024xi32>) -> tensor<1x12x12x1024xi32> + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<1, 1, 1, 1>, quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, stride = dims<1, 1>} : (tensor<1x12x12x1xi8>, tensor<1024x3x3x1xi8>, tensor<1024xi32>) -> tensor<1x12x12x1024xi32> return } @@ -369,7 +369,7 @@ // CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32 // CHECK: linalg.yield [[ADD]] : f32 // CHECK: } -> tensor<1x5x5x33xf32> - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1] } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>) + %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1> } : (tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>) return } @@ -391,7 +391,7 @@ // CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32 // CHECK: linalg.yield [[ADD]] : f32 // CHECK: } -> tensor<1x5x5x33xf32> - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = [0, 0, 0, 0], stride = [2, 2], dilation = [1, 1] } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>) + %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) { pad = dims<0, 0, 0, 0>, stride = dims<2, 2>, dilation = dims<1, 1> } : (tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>, tensor<33xf32>) -> (tensor<1x5x5x33xf32>) return } @@ -419,7 +419,7 @@ // CHECK: [[ADD:%.+]] = arith.addi %arg3, %arg4 : i32 // CHECK: linalg.yield [[ADD]] : i32 // CHECK: } -> tensor<1x12x12x512xi32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 1, 1], quantization_info = {input_zp = -128 : i32, weight_zp = 42 : i32}, stride = [1, 1], dilation = [1, 1] } : (tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x12x12x512xi32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = dims<1, 1, 1, 1>, quantization_info = {input_zp = -128 : i32, weight_zp = 42 : i32}, stride = dims<1, 1>, dilation = dims<1, 1> } : (tensor<1x12x12x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x12x12x512xi32> return } @@ -443,6 +443,6 @@ // CHECK: [[ADD:%.+]] = arith.addi %arg3, %arg4 : i32 // CHECK: linalg.yield [[ADD]] : i32 // CHECK: } -> tensor<1x10x10x512xi32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, weight_zp = 42 : i32}, stride = [1, 1], dilation = [2, 2] } : (tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x10x10x512xi32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, quantization_info = {input_zp = -128 : i32, weight_zp = 42 : i32}, stride = dims<1, 1>, dilation = dims<2, 2> } : (tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, tensor<512xi32>) -> tensor<1x10x10x512xi32> return } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -1309,7 +1309,7 @@ // CHECK-DAG: %[[IDX:.+]] = arith.index_cast %[[VAL29]] // CHECK-DAG: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[IDX0]], %[[IDY]], %[[IDX]], %[[IDX3]]] // CHECK: linalg.yield %[[EXTRACT]] - %output = "tosa.resize"(%input) { output_size = [4, 4], stride = [0, 0], offset = [0, 0], stride_fp = [0.5 : f32, 0.5 : f32], offset_fp = [0.1 : f32, 0.2 : f32], shift = 0 : i32, mode = "NEAREST_NEIGHBOR" } : (tensor<1x2x2x1xf32>) -> (tensor<1x4x4x1xf32>) + %output = "tosa.resize"(%input) { output_size = dims<4, 4>, stride = dims<0, 0>, offset = dims<0, 0>, stride_fp = [0.5 : f32, 0.5 : f32], offset_fp = [0.1 : f32, 0.2 : f32], shift = 0 : i32, mode = "NEAREST_NEIGHBOR" } : (tensor<1x2x2x1xf32>) -> (tensor<1x4x4x1xf32>) return } @@ -1392,7 +1392,7 @@ // CHECK: %[[WHI:.+]] = arith.mulf %[[HI]], %[[DY]] // CHECK: %[[RESULT:.+]] = arith.addf %[[WLO]], %[[WHI]] // CHECK: linalg.yield %[[RESULT]] - %output = "tosa.resize"(%input) { output_size = [4, 4], stride = [0, 0], offset = [0, 0], stride_fp = [0.5 : f32, 0.5 : f32], offset_fp = [0.1 : f32, 0.2 : f32], shift = 0 : i32, mode = "BILINEAR" } : (tensor<1x2x2x1xf32>) -> (tensor<1x4x4x1xf32>) + %output = "tosa.resize"(%input) { output_size = dims<4, 4>, stride = dims<0, 0>, offset = dims<0, 0>, stride_fp = [0.5 : f32, 0.5 : f32], offset_fp = [0.1 : f32, 0.2 : f32], shift = 0 : i32, mode = "BILINEAR" } : (tensor<1x2x2x1xf32>) -> (tensor<1x4x4x1xf32>) return } @@ -1460,7 +1460,7 @@ // CHECK-DAG: %[[IDX:.+]] = arith.index_cast %[[VAL29]] // CHECK: %[[EXTRACT:.+]] = tensor.extract %arg0[%[[IDX0]], %[[IDY]], %[[IDX]], %[[IDX3]]] // CHECK: linalg.yield %[[EXTRACT]] - %output = "tosa.resize"(%input) { output_size = [4, 4], stride = [128, 128], offset = [1, 2], stride_fp = [0. : f32, 0. : f32], offset_fp = [0. : f32, 0. : f32], shift = 8 : i32, mode = "NEAREST_NEIGHBOR" } : (tensor<1x2x2x1xi32>) -> (tensor<1x4x4x1xi32>) + %output = "tosa.resize"(%input) { output_size = dims<4, 4>, stride = dims<128, 128>, offset = dims<1, 2>, stride_fp = [0. : f32, 0. : f32], offset_fp = [0. : f32, 0. : f32], shift = 8 : i32, mode = "NEAREST_NEIGHBOR" } : (tensor<1x2x2x1xi32>) -> (tensor<1x4x4x1xi32>) return } @@ -1545,6 +1545,6 @@ // CHECK: %[[WHI:.+]] = arith.muli %[[HI]], %[[DY]] // CHECK: %[[RESULT:.+]] = arith.addi %[[WLO]], %[[WHI]] // CHECK: linalg.yield %[[RESULT]] - %output = "tosa.resize"(%input) { output_size = [4, 4], stride = [128, 128], offset = [1, 2], stride_fp = [0. : f32, 0. : f32], offset_fp = [0. : f32, 0. : f32], shift = 8 : i32, mode = "BILINEAR" } : (tensor<1x2x2x1xi8>) -> (tensor<1x4x4x1xi32>) + %output = "tosa.resize"(%input) { output_size = dims<4, 4>, stride = dims<128, 128>, offset = dims<1, 2>, stride_fp = [0. : f32, 0. : f32], offset_fp = [0. : f32, 0. : f32], shift = 8 : i32, mode = "BILINEAR" } : (tensor<1x2x2x1xi8>) -> (tensor<1x4x4x1xi32>) return } diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -73,7 +73,7 @@ // CHECK: "tosa.conv2d" %weight = "tosa.const"() {value = dense<[[[[1.0, 1.0]]], [[[1.0, 1.0]]], [[[1.0, 1.0]]]]> : tensor<3x1x1x2xf32>} : ()-> tensor<3x1x1x2xf32> %bias = "tosa.const"() {value = dense<0.0> : tensor<3xf32>} : ()-> tensor<3xf32> - %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = [0, 0, 0, 0], stride = [2, 2], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> + %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<2, 2>, dilation = dims<1, 1>} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> return %0 : tensor<4x10x10x3xf32> } @@ -84,7 +84,7 @@ // CHECK: "tosa.conv2d" %weight = "tosa.const"() {value = dense<[[[[1.0], [1.0]], [[1.0], [1.0]]]]> : tensor<1x2x2x1xf32>} : ()-> tensor<1x2x2x1xf32> %bias = "tosa.const"() {value = dense<0.0> : tensor<1xf32>} : ()-> tensor<1xf32> - %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x10x10x1xf32> + %0 = "tosa.conv2d"(%arg0, %weight, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x10x10x1xf32> return %0 : tensor<4x10x10x1xf32> } @@ -93,7 +93,7 @@ // CHECK-LABEL: @depthwise_conv2d_stride_2 func @depthwise_conv2d_stride_2(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { // CHECK: "tosa.depthwise_conv2d" - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [2, 2], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, stride = dims<2, 2>, dilation = dims<1, 1>} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> return %0 : tensor<4x10x10x6xf32> } @@ -102,7 +102,7 @@ // CHECK-LABEL: @depthwise_conv2d_weight_2x2 func @depthwise_conv2d_weight_2x2(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<2x2x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> { // CHECK: "tosa.depthwise_conv2d" - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<2x2x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<4x10x10x2xf32>, tensor<2x2x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> return %0 : tensor<4x10x10x6xf32> } @@ -112,7 +112,7 @@ func @max_pool2d_is_noop(%arg0: tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> { // CHECK-NOT: "tosa.max_pool2d" // CHECK: return %arg0 - %0 = "tosa.max_pool2d"(%arg0) {kernel = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> + %0 = "tosa.max_pool2d"(%arg0) {kernel = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<10x1x1x3xf32>) -> tensor<10x1x1x3xf32> return %0 : tensor<10x1x1x3xf32> } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -3,7 +3,7 @@ func @test_conv2d(%arg0: tensor<1x29x29x4xf32>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{expect both input and weight to be float or not together, got 'f32' and 'i8'}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<1x29x29x4xf32>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -12,7 +12,7 @@ func @test_conv2d(%arg0: tensor<*xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{expect a ranked tensor for input, got of type 'tensor<*xi8>' at index: 0}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<*xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -21,7 +21,7 @@ func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<*xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{expect a ranked tensor for weight, got of type 'tensor<*xi8>' at index: 1}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<1x29x29x4xi8>, tensor<*xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } @@ -31,7 +31,7 @@ func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { // expected-error@+1 {{'tosa.conv2d' op quantizationattr is required for quantized type, and not allowed for float type}} - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<1x29x29x4xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> return %0 : tensor<1x27x27x16xi8> } diff --git a/mlir/test/Dialect/Tosa/operation_optimization.mlir b/mlir/test/Dialect/Tosa/operation_optimization.mlir --- a/mlir/test/Dialect/Tosa/operation_optimization.mlir +++ b/mlir/test/Dialect/Tosa/operation_optimization.mlir @@ -14,7 +14,7 @@ // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = [4, 10, 10, 3]} // CHECK-SAME: -> tensor<4x10x10x3xf32> // CHECK: return %[[VAR3]] - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> return %0 : tensor<4x10x10x3xf32> } @@ -33,7 +33,7 @@ // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = [4, 10, 10, 3]} // CHECK-SAME: -> tensor<4x10x10x3xi32> // CHECK: return %[[VAR3]] - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1], quantization_info = {input_zp = 42 : i32, weight_zp = 24 : i32}} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x10x10x3xi32> + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>, quantization_info = {input_zp = 42 : i32, weight_zp = 24 : i32}} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x10x10x3xi32> return %0 : tensor<4x10x10x3xi32> } @@ -53,7 +53,7 @@ // CHECK: %[[VAR4:.*]] = "tosa.add"(%[[VAR3]], %arg2) // CHECK-SAME: -> tensor<4x10x10x6xf32> // CHECK: return %[[VAR4]] - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x10x10x6xf32> return %0 : tensor<4x10x10x6xf32> } @@ -62,7 +62,7 @@ // CHECK-LABEL: @depthwise_conv2d_as_mul_q func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<1x1x2x3xi8>, %arg2: tensor<6xi32>) -> tensor<4x10x10x6xi32> { // CHECK: "tosa.depthwise_conv2d" - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1], quantization_info = {input_zp = 0 : i32, weight_zp = 0 : i32}} : (tensor<4x10x10x2xi8>, tensor<1x1x2x3xi8>, tensor<6xi32>) -> tensor<4x10x10x6xi32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>, quantization_info = {input_zp = 0 : i32, weight_zp = 0 : i32}} : (tensor<4x10x10x2xi8>, tensor<1x1x2x3xi8>, tensor<6xi32>) -> tensor<4x10x10x6xi32> return %0 : tensor<4x10x10x6xi32> } diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -12,42 +12,42 @@ // ----- // CHECK-LABEL: avg_pool2d_f32 func @test_avg_pool2d_f32(%arg0: tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> { - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<2, 2>, pad = dims<0, 1, 0, 1>, stride = dims<1, 1>} : (tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> return %0 : tensor<1x7x7x9xf32> } // ----- // CHECK-LABEL: avg_pool2d_i8 func @test_avg_pool2d_i8(%arg0: tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> { - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<2, 2>, pad = dims<0, 1, 0, 1>, stride = dims<1, 1>} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> return %0 : tensor<1x7x7x9xi8> } // ----- // CHECK-LABEL: avg_pool2d_i16 func @test_avg_pool2d_i16(%arg0: tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> { - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<2, 2>, pad = dims<0, 1, 0, 1>, stride = dims<1, 1>} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> return %0 : tensor<1x7x7x9xi16> } // ----- // CHECK-LABEL: avg_pool2d_q8 func @test_avg_pool2d_q8(%arg0: tensor<1x7x7x9x!quant.uniform>) -> tensor<1x7x7x9x!quant.uniform> { - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9x!quant.uniform>) -> tensor<1x7x7x9x!quant.uniform> + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<2, 2>, pad = dims<0, 1, 0, 1>, stride = dims<1, 1>} : (tensor<1x7x7x9x!quant.uniform>) -> tensor<1x7x7x9x!quant.uniform> return %0 : tensor<1x7x7x9x!quant.uniform> } // ----- // CHECK-LABEL: conv2d func @test_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> { - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> return %0 : tensor<1x4x4x8xf32> } // ----- // CHECK-LABEL: depthwise_conv2d func @test_depthwise_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<1x1x4x2xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> { - %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> + %2 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>) -> tensor<1x4x4x8xf32> return %2 : tensor<1x4x4x8xf32> } @@ -68,14 +68,14 @@ // ----- // CHECK-LABEL: max_pool2d func @test_max_pool2d(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> { - %0 = "tosa.max_pool2d"(%arg0) {kernel = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> + %0 = "tosa.max_pool2d"(%arg0) {kernel = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> return %0 : tensor<1x32x32x8xf32> } // ----- /// CHECK-LABEL: transpose_conv2d func @test_transpose_conv2d(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [1, 32, 32, 16], stride = [1, 1]} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<1, 32, 32, 16>, stride = dims<1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> return %0 : tensor<1x32x32x16xf32> } @@ -456,7 +456,7 @@ // ----- // CHECK-LABEL: resize func @test_resize(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> { - %1 = "tosa.resize"(%arg0) {output_size = [64, 64], stride = [1024, 1024], offset = [0, 0], shift = 10 : i32, stride_fp = [0.0 : f32, 0.0 : f32], offset_fp = [0.0 : f32, 0.0 : f32], mode = "BILINEAR"} : (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> + %1 = "tosa.resize"(%arg0) {output_size = dims<64, 64>, stride = dims<1024, 1024>, offset = dims<0, 0>, shift = 10 : i32, stride_fp = [0.0 : f32, 0.0 : f32], offset_fp = [0.0 : f32, 0.0 : f32], mode = "BILINEAR"} : (tensor<1x32x32x8xf32>) -> tensor<1x64x64x8xf32> return %1 : tensor<1x64x64x8xf32> } diff --git a/mlir/test/Dialect/Tosa/quant-test.mlir b/mlir/test/Dialect/Tosa/quant-test.mlir --- a/mlir/test/Dialect/Tosa/quant-test.mlir +++ b/mlir/test/Dialect/Tosa/quant-test.mlir @@ -12,7 +12,7 @@ // CHECK-LABEL: test_build_mult_and_shift func @test_build_mult_and_shift(%arg0: tensor<1x32x32x8x!quant.uniform>, %arg1 : tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, %arg2 : tensor<16xi32>) -> tensor<1x32x32x16x!quant.uniform> { // CHECK: tosa.conv2d - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = [1, 1, 2, 2], dilation = [2, 1], stride = [1, 1], quantization_info = {input_zp = -1 : i32, weight_zp = 0 : i32}} : (tensor<1x32x32x8x!quant.uniform>, tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, tensor<16xi32>) -> tensor<1x32x32x16x!quant.uniform> + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = dims<1, 1, 2, 2>, dilation = dims<2, 1>, stride = dims<1, 1>, quantization_info = {input_zp = -1 : i32, weight_zp = 0 : i32}} : (tensor<1x32x32x8x!quant.uniform>, tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, tensor<16xi32>) -> tensor<1x32x32x16x!quant.uniform> return %0 : tensor<1x32x32x16x!quant.uniform> } diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir @@ -4,8 +4,8 @@ func @transpose_conv2d(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x?x?x5xf32> { // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64} // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64} - // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = [1, 1], pad = [2, 2, 5, 5], stride = [1, 1]} - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32> + // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = dims<1, 1>, pad = dims<2, 2, 5, 5>, stride = dims<1, 1>} + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32> %1 = tensor.cast %0 : tensor<2x18x19x5xf32> to tensor<2x?x?x5xf32> return %1 : tensor<2x?x?x5xf32> } @@ -16,24 +16,24 @@ func @transpose_conv2d_quantized(%arg0: tensor<2x16x14x3xi8>, %arg1: tensor<5x3x6x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x18x19x5xi32>) { // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64} // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64} - // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = [1, 1], pad = [2, 2, 5, 5], quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, stride = [1, 1]} - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xi8>, tensor<5x3x6x3xi8>, tensor<5xi32>) -> tensor<2x18x19x5xi32> + // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = dims<1, 1>, pad = dims<2, 2, 5, 5>, quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, stride = dims<1, 1>} + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x16x14x3xi8>, tensor<5x3x6x3xi8>, tensor<5xi32>) -> tensor<2x18x19x5xi32> return %0 : tensor<2x18x19x5xi32> } -// ---- +// ----- // CHECK-LABEL: @transpose_conv2d_dilated func @transpose_conv2d_dilated(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x?x?x5xf32> { // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64} // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64} - // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = [2, 3], pad = [4, 4, 15, 15], stride = [1, 1]} - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [2, 3], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x20x29x5xf32> + // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = dims<2, 3>, pad = dims<4, 4, 15, 15>, stride = dims<1, 1>} + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<2, 3>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x20x29x5xf32> %1 = tensor.cast %0 : tensor<2x20x29x5xf32> to tensor<2x?x?x5xf32> return %1 : tensor<2x?x?x5xf32> } -// ---- +// ----- // CHECK-LABEL: @transpose_conv2d_strided func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor<5x3x5x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x?x?x5xf32> { @@ -54,18 +54,18 @@ // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor<30xf32>} - // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = [2, 18, 16, 2, 3, 5]} // CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]]) // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) {new_shape = [2, 36, 48, 5]} // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) {size = [2, 35, 47, 5], start = [0, 0, 0, 0]} // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %arg2) - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<2, 3>} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32> %1 = tensor.cast %0 : tensor<2x35x47x5xf32> to tensor<2x?x?x5xf32> return %1 : tensor<2x?x?x5xf32> } -// ---- +// ----- // CHECK-LABEL: @transpose_conv2d_strided_quantized func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1: tensor<5x3x5x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x35x47x5xi32>) { @@ -86,12 +86,12 @@ // Manipulate the final shape. // CHECK-DAG: %[[BIAS:.+]] = "tosa.const"() {value = dense<0> : tensor<30xi32>} - // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = [1, 1], pad = [0, 0, 0, 0], quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, stride = [1, 1]} + // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, stride = dims<1, 1>} // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = [2, 18, 16, 2, 3, 5]} // CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]]) // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) {new_shape = [2, 36, 48, 5]} // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) {size = [2, 35, 47, 5], start = [0, 0, 0, 0]} // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %arg2) - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, out_shape = [-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, quantization_info = {input_zp = -22 : i32, weight_zp = 42 : i32}, out_shape = dims<-1, -1, -1, -1>, stride = dims<2, 3>} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32> return %0 : tensor<2x35x47x5xi32> } diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -675,10 +675,10 @@ // CHECK-LABEL: @test_pool_static func @test_pool_static(%arg0: tensor<3x5x6x7xf32>) { // CHECK: -> tensor<3x2x4x7xf32> - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<3x5x6x7xf32>) -> tensor + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<3x5x6x7xf32>) -> tensor // CHECK: -> tensor<3x2x4x7xf32> - %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<3x5x6x7xf32>) -> tensor + %1 = "tosa.max_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<3x5x6x7xf32>) -> tensor return } @@ -687,7 +687,7 @@ // CHECK-LABEL: @conv2d_static func @conv2d_static(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x6x4x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) return } @@ -696,7 +696,7 @@ // CHECK-LABEL: @conv2d_dynamic_input func @conv2d_dynamic_input(%input: tensor, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) return } @@ -705,10 +705,10 @@ // CHECK-LABEL: @test_pool_dynamic_input func @test_pool_dynamic_input(%arg0: tensor) { // CHECK: -> tensor - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor) -> tensor + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor) -> tensor // CHECK: -> tensor - %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor) -> tensor + %1 = "tosa.max_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor) -> tensor return } @@ -717,10 +717,10 @@ // CHECK-LABEL: @test_pool_padded func @test_pool_padded(%arg0: tensor<3x5x6x7xf32>) { // CHECK: -> tensor<3x5x11x7xf32> - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [1, 2, 3, 4], stride = [1, 1]} : (tensor<3x5x6x7xf32>) -> tensor + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<1, 2, 3, 4>, stride = dims<1, 1>} : (tensor<3x5x6x7xf32>) -> tensor // CHECK: -> tensor<3x5x11x7xf32> - %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [1, 2, 3, 4], stride = [1, 1]} : (tensor<3x5x6x7xf32>) -> tensor + %1 = "tosa.max_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<1, 2, 3, 4>, stride = dims<1, 1>} : (tensor<3x5x6x7xf32>) -> tensor return } @@ -729,7 +729,7 @@ // CHECK-LABEL: @conv2d_dynamic_weight func @conv2d_dynamic_weight(%input: tensor<2x8x9x3xf32>, %weights: tensor, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x?x?x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor, tensor<5xf32>) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor, tensor<5xf32>) -> (tensor) return } @@ -738,7 +738,7 @@ // CHECK-LABEL: @conv2d_dynamic_bias func @conv2d_dynamic_bias(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor) -> () { // CHECK: -> tensor<2x6x4x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor) -> (tensor) return } @@ -747,10 +747,10 @@ // CHECK-LABEL: @test_pool_stride func @test_pool_stride(%arg0: tensor<3x11x12x7xf32>) { // CHECK: -> tensor<3x4x4x7xf32> - %0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [2, 3]} : (tensor<3x11x12x7xf32>) -> tensor + %0 = "tosa.avg_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<0, 0, 0, 0>, stride = dims<2, 3>} : (tensor<3x11x12x7xf32>) -> tensor // CHECK: -> tensor<3x4x4x7xf32> - %1 = "tosa.max_pool2d"(%arg0) {kernel = [4, 3], pad = [0, 0, 0, 0], stride = [2, 3]} : (tensor<3x11x12x7xf32>) -> tensor + %1 = "tosa.max_pool2d"(%arg0) {kernel = dims<4, 3>, pad = dims<0, 0, 0, 0>, stride = dims<2, 3>} : (tensor<3x11x12x7xf32>) -> tensor return } @@ -759,7 +759,7 @@ // CHECK-LABEL: @conv2d_padded func @conv2d_padded(%input: tensor<2x8x9x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x9x11x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [1, 2, 3, 4], stride = [1, 1], dilation = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<1, 2, 3, 4>, stride = dims<1, 1>, dilation = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) return } @@ -768,7 +768,7 @@ // CHECK-LABEL: @conv2d_dilated func @conv2d_dilated(%input: tensor<2x12x14x3xf32>, %weights: tensor<5x3x6x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x6x4x5xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [1, 1], dilation = [3, 2]} : (tensor<2x12x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<1, 1>, dilation = dims<3, 2>} : (tensor<2x12x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> (tensor) return } @@ -777,7 +777,7 @@ // CHECK-LABEL: @conv2d_strided func @conv2d_strided(%input: tensor<1x13x14x1xf32>, %weights: tensor<1x1x1x1xf32>, %bias: tensor<1xf32>) -> () { // CHECK: -> tensor<1x5x7x1xf32> - %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = [0, 0, 0, 0], stride = [3, 2], dilation = [1, 1]} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> (tensor) + %0 = "tosa.conv2d"(%input, %weights, %bias) {pad = dims<0, 0, 0, 0>, stride = dims<3, 2>, dilation = dims<1, 1>} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> (tensor) return } @@ -786,7 +786,7 @@ // CHECK-LABEL: @conv3d_static func @conv3d_static(%input: tensor<2x8x9x10x3xf32>, %weights: tensor<5x3x6x4x3xf32>, %bias: tensor<5xf32>) -> () { // CHECK: -> tensor<2x6x4x7x5xf32> - %0 = "tosa.conv3d"(%input, %weights, %bias) {dilation = [1, 1, 1], pad = [0, 0, 0, 0, 0, 0], stride = [1, 1, 1]} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> (tensor) + %0 = "tosa.conv3d"(%input, %weights, %bias) {dilation = dims<1, 1, 1>, pad = dims<0, 0, 0, 0, 0, 0>, stride = dims<1, 1, 1>} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> (tensor) return } @@ -795,7 +795,7 @@ // CHECK-LABEL: @conv3d_dynamic_input func @conv3d_dynamic_input(%arg0: tensor, %arg1: tensor<5x3x6x4x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = [1, 1, 1], pad = [0, 0, 0, 0, 0, 0], stride = [1, 1, 1]} : (tensor, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor + %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1, 1>, pad = dims<0, 0, 0, 0, 0, 0>, stride = dims<1, 1, 1>} : (tensor, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor return } @@ -804,7 +804,7 @@ // CHECK-LABEL: @conv3d_dynamic_weight func @conv3d_dynamic_weight(%arg0: tensor<2x8x9x10x3xf32>, %arg1: tensor, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x?x?x?x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = [1, 1, 1], pad = [0, 0, 0, 0, 0, 0], stride = [1, 1, 1]} : (tensor<2x8x9x10x3xf32>, tensor, tensor<5xf32>) -> tensor + %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1, 1>, pad = dims<0, 0, 0, 0, 0, 0>, stride = dims<1, 1, 1>} : (tensor<2x8x9x10x3xf32>, tensor, tensor<5xf32>) -> tensor return } @@ -813,7 +813,7 @@ // CHECK-LABEL: @conv3d_dynamic_bias func @conv3d_dynamic_bias(%arg0: tensor<2x8x9x10x3xf32>, %arg1: tensor<5x3x6x4x3xf32>, %arg2: tensor) { // CHECK: -> tensor<2x6x4x7x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = [1, 1, 1], pad = [0, 0, 0, 0, 0, 0], stride = [1, 1, 1]} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor) -> tensor + %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1, 1>, pad = dims<0, 0, 0, 0, 0, 0>, stride = dims<1, 1, 1>} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor) -> tensor return } @@ -822,7 +822,7 @@ // CHECK-LABEL: @conv3d_padded func @conv3d_padded(%arg0: tensor<2x8x9x10x3xf32>, %arg1: tensor<5x3x6x4x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x9x11x18x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = [1, 1, 1], pad = [1, 2, 3, 4, 5, 6], stride = [1, 1, 1]} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor + %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1, 1>, pad = dims<1, 2, 3, 4, 5, 6>, stride = dims<1, 1, 1>} : (tensor<2x8x9x10x3xf32>, tensor<5x3x6x4x3xf32>, tensor<5xf32>) -> tensor return } @@ -831,7 +831,7 @@ // CHECK-LABEL: @conv3d_dilated func @conv3d_dilated(%arg0: tensor<2x12x14x16x3xf32>, %arg1: tensor<5x3x6x2x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x6x4x12x5xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = [3, 2, 4], pad = [0, 0, 0, 0, 0, 0], stride = [1, 1, 1]} : (tensor<2x12x14x16x3xf32>, tensor<5x3x6x2x3xf32>, tensor<5xf32>) -> tensor + %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = dims<3, 2, 4>, pad = dims<0, 0, 0, 0, 0, 0>, stride = dims<1, 1, 1>} : (tensor<2x12x14x16x3xf32>, tensor<5x3x6x2x3xf32>, tensor<5xf32>) -> tensor return } @@ -840,7 +840,7 @@ // CHECK-LABEL: @conv3d_strided func @conv3d_strided(%arg0: tensor<1x13x14x15x1xf32>, %arg1: tensor<1x1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x5x7x4x1xf32> - %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = [1, 1, 1], pad = [0, 0, 0, 0, 0, 0], stride = [3, 2, 4]} : (tensor<1x13x14x15x1xf32>, tensor<1x1x1x1x1xf32>, tensor<1xf32>) -> tensor + %0 = "tosa.conv3d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1, 1>, pad = dims<0, 0, 0, 0, 0, 0>, stride = dims<3, 2, 4>} : (tensor<1x13x14x15x1xf32>, tensor<1x1x1x1x1xf32>, tensor<1xf32>) -> tensor return } @@ -849,7 +849,7 @@ // CHECK-LABEL: @depthwise_conv2d_static func @depthwise_conv2d_static(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x6x4x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> return } @@ -858,7 +858,7 @@ // CHECK-LABEL: @depthwise_conv2d_dynamic_input func @depthwise_conv2d_dynamic_input(%arg0: tensor, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor return } @@ -867,7 +867,7 @@ // CHECK-LABEL: @depthwise_conv2d_dynamic_weight func @depthwise_conv2d_dynamic_weight(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x?x?x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor, tensor<15xf32>) -> tensor<2x?x?x15xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor, tensor<15xf32>) -> tensor<2x?x?x15xf32> return } @@ -876,7 +876,7 @@ // CHECK-LABEL: @depthwise_conv2d_dynamic_bias func @depthwise_conv2d_dynamic_bias(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor) { // CHECK: -> tensor<2x6x4x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor) -> tensor<2x6x4x15xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor) -> tensor<2x6x4x15xf32> return } @@ -885,7 +885,7 @@ // CHECK-LABEL: @depthwise_conv2d_padded func @depthwise_conv2d_padded(%arg0: tensor<2x8x9x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x9x11x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [1, 2, 3, 4], stride = [1, 1]} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x9x11x15xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<1, 2, 3, 4>, stride = dims<1, 1>} : (tensor<2x8x9x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x9x11x15xf32> return } @@ -894,7 +894,7 @@ // CHECK-LABEL: @depthwise_conv2d_dilated func @depthwise_conv2d_dilated(%arg0: tensor<2x12x14x3xf32>, %arg1: tensor<3x6x3x5xf32>, %arg2: tensor<15xf32>) { // CHECK: -> tensor<2x6x4x15xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [3, 2], pad = [0, 0, 0, 0], stride = [1, 1]} : (tensor<2x12x14x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<3, 2>, pad = dims<0, 0, 0, 0>, stride = dims<1, 1>} : (tensor<2x12x14x3xf32>, tensor<3x6x3x5xf32>, tensor<15xf32>) -> tensor<2x6x4x15xf32> return } @@ -903,7 +903,7 @@ // CHECK-LABEL: @depthwise_conv2d_strided func @depthwise_conv2d_strided(%arg0: tensor<1x13x14x1xf32>, %arg1: tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x5x7x1xf32> - %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [3, 2]} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x5x7x1xf32> + %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, pad = dims<0, 0, 0, 0>, stride = dims<3, 2>} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x5x7x1xf32> return } @@ -912,7 +912,7 @@ // CHECK-LABEL: @transpose_conv2d_out_shape func @transpose_conv2d_out_shape(%arg0: tensor<2x?x?x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x8x9x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, 8, 9, -1], stride = [1, 1]} : (tensor<2x?x?x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, 8, 9, -1>, stride = dims<1, 1>} : (tensor<2x?x?x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> return } @@ -921,7 +921,7 @@ // CHECK-LABEL: @transpose_conv2d_static func @transpose_conv2d_static(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x18x19x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -930,7 +930,7 @@ // CHECK-LABEL: @transpose_conv2d_static_dilated func @transpose_conv2d_static_dilated(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x20x29x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [2, 3], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<2, 3>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -939,7 +939,7 @@ // CHECK-LABEL: @transpose_conv2d_static_strided func @transpose_conv2d_static_strided(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x33x45x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<2, 3>} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -948,7 +948,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_input func @transpose_conv2d_dynamic_input(%arg0: tensor, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -957,7 +957,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_weights func @transpose_conv2d_dynamic_weights(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x?x?x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x6x4x3xf32>, tensor, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -966,7 +966,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_bias func @transpose_conv2d_dynamic_bias(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor) { // CHECK: -> tensor<2x8x9x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor<2x8x9x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor<2x8x9x5xf32> return } @@ -975,7 +975,7 @@ // CHECK-LABEL: @transpose_conv2d_padded func @transpose_conv2d_padded(%arg0: tensor<2x9x11x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x10x13x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [1, 3], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x9x11x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<1, 3>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x9x11x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> return } @@ -984,7 +984,7 @@ // CHECK-LABEL: @transpose_conv2d_dilated func @transpose_conv2d_dilated(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x12x14x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [3, 2], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x12x14x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<3, 2>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<1, 1>} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x12x14x5xf32> return } @@ -993,7 +993,7 @@ // CHECK-LABEL: @transpose_conv2d_strided func @transpose_conv2d_strided(%arg0: tensor<1x5x7x1xf32>, %arg1: tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x13x13x1xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], out_pad = [0, 0], out_shape = [-1, -1, -1, -1], stride = [3, 2]} : (tensor<1x5x7x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {dilation = dims<1, 1>, out_pad = dims<0, 0>, out_shape = dims<-1, -1, -1, -1>, stride = dims<3, 2>} : (tensor<1x5x7x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> return } @@ -1002,7 +1002,7 @@ // CHECK-LABEL: @resize_output_size func @resize_output_size(%arg0: tensor<2x?x?x3xi32>) { // CHECK: -> tensor<2x4x5x3xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [0, 1], offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = [4, 5], shift = 8 : i32, stride = [1, 1], stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<2x?x?x3xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<0, 1>, offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = dims<4, 5>, shift = 8 : i32, stride = dims<1, 1>, stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<2x?x?x3xi32>) -> tensor return } @@ -1011,7 +1011,7 @@ // CHECK-LABEL: @resize_int_horizontal func @resize_int_horizontal(%arg0: tensor<1x2x4x1xi32>) { // CHECK: -> tensor<1x2x7x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [0, 0], offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = [-1, -1], shift = 8 : i32, stride = [256, 128], stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<0, 0>, offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = dims<-1, -1>, shift = 8 : i32, stride = dims<256, 128>, stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor return } @@ -1020,7 +1020,7 @@ // CHECK-LABEL: @resize_int_vertical func @resize_int_vertical(%arg0: tensor<1x2x4x1xi32>) { // CHECK: -> tensor<1x3x4x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [0, 0], offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = [-1, -1], shift = 8 : i32, stride = [128, 256], stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<0, 0>, offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = dims<-1, -1>, shift = 8 : i32, stride = dims<128, 256>, stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor return } @@ -1029,7 +1029,7 @@ // CHECK-LABEL: @resize_int_offsetted func @resize_int_offsetted(%arg0: tensor<1x2x4x1xi32>) { // CHECK: -> tensor<1x4x6x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [64, 64], offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = [-1, -1], shift = 8 : i32, stride = [64, 128], stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<64, 64>, offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = dims<-1, -1>, shift = 8 : i32, stride = dims<64, 128>, stride_fp = [0.000000e+00 : f32, 0.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor return } @@ -1038,7 +1038,7 @@ // CHECK-LABEL: @resize_fp_horizontal func @resize_fp_horizontal(%arg0: tensor<1x2x4x1xi32>) { // CHECK: -> tensor<1x2x7x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [0, 0], offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = [-1, -1], shift = 0 : i32, stride = [0, 0], stride_fp = [1.000000e+00 : f32, 5.000000e-01 : f32]} : (tensor<1x2x4x1xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<0, 0>, offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = dims<-1, -1>, shift = 0 : i32, stride = dims<0, 0>, stride_fp = [1.000000e+00 : f32, 5.000000e-01 : f32]} : (tensor<1x2x4x1xi32>) -> tensor return } @@ -1047,13 +1047,13 @@ // CHECK-LABEL: @resize_fp_vertical func @resize_fp_vertical(%arg0: tensor<1x2x4x1xi32>) { // CHECK: -> tensor<1x3x4x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [0, 0], offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = [-1, -1], shift = 0 : i32, stride = [0, 0], stride_fp = [5.000000e-01 : f32, 1.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<0, 0>, offset_fp = [0.000000e+00 : f32, 0.000000e+00 : f32], output_size = dims<-1, -1>, shift = 0 : i32, stride = dims<0, 0>, stride_fp = [5.000000e-01 : f32, 1.000000e+00 : f32]} : (tensor<1x2x4x1xi32>) -> tensor return } // CHECK-LABEL: @resize_fp_offsetted func @resize_fp_offsetted(%arg0: tensor<1x2x4x1xi32>) { // CHECK: -> tensor<1x4x6x1xi32> - %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = [0, 0], offset_fp = [2.500000e-01 : f32, 2.500000e-01 : f32], output_size = [-1, -1], shift = 0 : i32, stride = [0, 0], stride_fp = [2.500000e-01 : f32, 5.000000e-01 : f32]} : (tensor<1x2x4x1xi32>) -> tensor + %0 = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", offset = dims<0, 0>, offset_fp = [2.500000e-01 : f32, 2.500000e-01 : f32], output_size = dims<-1, -1>, shift = 0 : i32, stride = dims<0, 0>, stride_fp = [2.500000e-01 : f32, 5.000000e-01 : f32]} : (tensor<1x2x4x1xi32>) -> tensor return } diff --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp --- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp +++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp @@ -148,8 +148,8 @@ auto newTosaConv2DOp = rewriter.create( op->getLoc(), newTosaConv2DOpType, tosaConv2DOp.input(), - tosaConv2DOp.weight(), tosaConv2DOp.bias(), tosaConv2DOp.pad(), - tosaConv2DOp.stride(), tosaConv2DOp.dilation()); + tosaConv2DOp.weight(), tosaConv2DOp.bias(), tosaConv2DOp.padAttr(), + tosaConv2DOp.strideAttr(), tosaConv2DOp.dilationAttr()); // Create rescale to quantized type double inputScale = inputQType.getScale(); diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -6902,6 +6902,7 @@ name = "TosaDialectTdFiles", srcs = glob(["include/mlir/Dialect/Tosa/IR/*.td"]), deps = [ + ":BuiltinDialectTdFiles", ":InferTypeOpInterfaceTdFiles", ":LoopLikeInterfaceTdFiles", ":OpBaseTdFiles", @@ -6930,11 +6931,17 @@ "include/mlir/Dialect/Tosa/IR/TosaStructs.cpp.inc", ), ( - ["-gen-dialect-decls"], + [ + "-gen-dialect-decls", + "-dialect=tosa", + ], "include/mlir/Dialect/Tosa/IR/TosaOpsDialect.h.inc", ), ( - ["-gen-dialect-defs"], + [ + "-gen-dialect-defs", + "-dialect=tosa", + ], "include/mlir/Dialect/Tosa/IR/TosaOpsDialect.cpp.inc", ), (