diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td --- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td +++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td @@ -128,7 +128,7 @@ static constexpr LenType singleton() { return 1; } /// Character has a LEN value which is not a compile-time known constant. - static constexpr LenType unknownLen() { return -1; } + static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; } /// Character LEN is a runtime value. bool hasDynamicLen() { return getLen() == unknownLen(); } diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp --- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp +++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp @@ -410,7 +410,8 @@ auto affineApply = rewriter.create(acoOp.getLoc(), affineMap, indexArgs); auto arrayElementType = coordinateArrayElement(acoOp); - auto newType = mlir::MemRefType::get({-1}, arrayElementType); + auto newType = + mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType); auto arrayConvert = rewriter.create(acoOp.getLoc(), newType, acoOp.getMemref()); return std::make_pair(affineApply, arrayConvert); diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp --- a/mlir/lib/AsmParser/TypeParser.cpp +++ b/mlir/lib/AsmParser/TypeParser.cpp @@ -510,7 +510,7 @@ if (consumeIf(Token::question)) { if (!allowDynamic) return emitError(loc, "expected static shape"); - dimensions.push_back(-1); + dimensions.push_back(ShapedType::kDynamicSize); } else { int64_t value; if (failed(parseIntegerInDimensionList(value))) diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -843,7 +843,7 @@ bool isDynamic) { if (isDynamic) { // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1 - intermediateShape = {-1}; + intermediateShape = {ShapedType::kDynamicSize}; return true; } @@ -1778,7 +1778,8 @@ // Broadcast the newly added dimensions to their appropriate multiple. SmallVector genericShape; for (int i = 0; i < rank; i++) { - genericShape.push_back(multiples[i]); + int64_t dim = multiples[i]; + genericShape.push_back(dim == -1 ? ShapedType::kDynamicSize : dim); genericShape.push_back(inputShape[i]); } diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -30,16 +30,16 @@ PatternRewriter &rewriter) const final { Location loc = sliceOp.getLoc(); Value input = sliceOp.getInput(); - SmallVector strides; + SmallVector strides, sizes; auto starts = sliceOp.getStart(); - auto sizes = sliceOp.getSize(); strides.resize(sliceOp.getType().template cast().getRank(), 1); SmallVector dynSizes; - for (const auto &i : llvm::enumerate(sizes)) { + for (const auto &i : llvm::enumerate(sliceOp.getSize())) { int64_t size = i.value().cast().getInt(); size_t index = i.index(); - if (size != ShapedType::kDynamicSize) + sizes.push_back(size == -1 ? ShapedType::kDynamicSize : size); + if (!ShapedType::isDynamic(sizes.back())) continue; auto dim = rewriter.create(loc, input, index); @@ -51,7 +51,8 @@ auto newSliceOp = rewriter.create( sliceOp.getLoc(), sliceOp.getType(), input, ValueRange({}), dynSizes, - ValueRange({}), starts, sizes, rewriter.getI64ArrayAttr(strides)); + ValueRange({}), starts, rewriter.getI64ArrayAttr(sizes), + rewriter.getI64ArrayAttr(strides)); rewriter.replaceOp(sliceOp, newSliceOp.getResult()); return success(); diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -1796,7 +1796,7 @@ bool isDynDim = isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context); if (isDynDim) { - newShape[d] = -1; + newShape[d] = ShapedType::kDynamicSize; } else { // The lower bound for the shape is always zero. Optional ubConst = diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp @@ -266,11 +266,12 @@ // We parsed a generic dimension list, but vectors only support two forms: // - single non-dynamic entry in the list (fixed vector); - // - two elements, the first dynamic (indicated by -1) and the second + // - two elements, the first dynamic (indicated by ShapedType::kDynamicSize) + // and the second // non-dynamic (scalable vector). if (dims.empty() || dims.size() > 2 || - ((dims.size() == 2) ^ (dims[0] == -1)) || - (dims.size() == 2 && dims[1] == -1)) { + ((dims.size() == 2) ^ (dims[0] == ShapedType::kDynamicSize)) || + (dims.size() == 2 && dims[1] == ShapedType::kDynamicSize)) { parser.emitError(dimPos) << "expected '? x x ' or ' x '"; return Type(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -64,7 +64,8 @@ } // Fallback dynamic buffer. - auto dynamicBufferType = MemRefType::get(-1, b.getIntegerType(8)); + auto dynamicBufferType = + MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8)); Value mul = b.createOrFold( b.create(width), allocSize); if (options.useAlloca) @@ -242,7 +243,7 @@ partialSizes.push_back( b.createOrFold(loc, subView, resultDimIdx++)); } - SmallVector dynSizes(fullSizes.size(), -1); + SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamicSize); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -185,7 +185,7 @@ for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { int64_t dimSize = memrefType.getDimSize(dim); // If this is already static dimension, keep it. - if (dimSize != -1) { + if (dimSize != ShapedType::kDynamicSize) { newShapeConstants.push_back(dimSize); continue; } @@ -197,7 +197,7 @@ newShapeConstants.push_back(constantIndexOp.value()); } else { // Dynamic shape dimension not folded; copy dynamicSize from old memref. - newShapeConstants.push_back(-1); + newShapeConstants.push_back(ShapedType::kDynamicSize); dynamicSizes.push_back(dynamicSize); } dynamicDimPos++; @@ -666,7 +666,8 @@ for (unsigned i = 0, e = aT.getRank(); i != e; ++i) { int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i); - if (aDim != -1 && bDim != -1 && aDim != bDim) + if (aDim != ShapedType::kDynamicSize && + bDim != ShapedType::kDynamicSize && aDim != bDim) return false; } return true; diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -512,7 +512,7 @@ "sum of all the concatenation dimensions of the input tensors."); } } else { - int prev = dstDim; + int64_t prev = dstDim; for (auto src : getInputs()) { auto d = src.getType().cast().getShape()[i]; if (prev != ShapedType::kDynamicSize && d != prev) diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -434,7 +434,7 @@ } // Determine the dimension size along the concatenation axis. - int concatDimSize = 0; + int64_t concatDimSize = 0; for (auto operand : operands) { ShapeAdaptor operandShape = operands.getShape(operand); @@ -645,7 +645,7 @@ // Any non dynamic dimension can be multiplied to a known size. outputShape.reserve(multiples.size()); for (int i = 0, s = inputShape.getRank(); i < s; i++) { - int dim = inputShape.getDimSize(i); + int64_t dim = inputShape.getDimSize(i); if (dim != ShapedType::kDynamicSize) dim *= multipleValues[i]; outputShape.push_back(dim); @@ -655,6 +655,12 @@ return success(); } +static SmallVector ConvertToMlirShape(ArrayRef shape) { + return to_vector(llvm::map_range(shape, [](int64_t dim) { + return dim == -1 ? ShapedType::kDynamicSize : dim; + })); +} + LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( MLIRContext *context, ::llvm::Optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, @@ -665,6 +671,7 @@ ArrayAttr newShape = adaptor.getNewShape(); llvm::SmallVector newShapeValue; getI64Values(newShape, newShapeValue); + newShapeValue = ConvertToMlirShape(newShapeValue); // We cannot infer from the total number of elements so we must take the // shape attribute as exact. @@ -679,14 +686,14 @@ int64_t numElements = inputShape.getNumElements(); int64_t staticMul = 1; for (auto val : newShapeValue) { - if (val != ShapedType::kDynamicSize) { + if (!ShapedType::isDynamic(val)) { staticMul *= val; } } // Determine the length of the dynamic dimension. for (auto &val : newShapeValue) { - if (val == ShapedType::kDynamicSize) + if (ShapedType::isDynamic(val)) val = numElements / staticMul; } @@ -800,8 +807,8 @@ outputShape[0] = inputShape.getDimSize(0); outputShape[3] = inputShape.getDimSize(3); - int32_t inputHeight = inputShape.getDimSize(1); - int32_t inputWidth = inputShape.getDimSize(2); + int64_t inputHeight = inputShape.getDimSize(1); + int64_t inputWidth = inputShape.getDimSize(2); if ((inputHeight == ShapedType::kDynamicSize) || (inputWidth == ShapedType::kDynamicSize)) @@ -961,7 +968,7 @@ SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); llvm::SmallVector outputShape; - outputShape.resize(4, -1); + outputShape.resize(4, ShapedType::kDynamicSize); // We only know the rank if the input type is unranked. if (!inputShape) { @@ -973,8 +980,8 @@ outputShape[0] = inputShape.getDimSize(0); outputShape[3] = inputShape.getDimSize(3); - int32_t height = inputShape.getDimSize(1); - int32_t width = inputShape.getDimSize(2); + int64_t height = inputShape.getDimSize(1); + int64_t width = inputShape.getDimSize(2); llvm::SmallVector kernel; llvm::SmallVector stride; @@ -984,13 +991,13 @@ getI64Values(attributes.get("stride").cast(), stride); getI64Values(attributes.get("pad").cast(), pad); - if (height != -1) { - int32_t padded = height + pad[0] + pad[1] - kernel[0]; + if (!ShapedType::isDynamic(height)) { + int64_t padded = height + pad[0] + pad[1] - kernel[0]; outputShape[1] = padded / stride[0] + 1; } - if (width != -1) { - int32_t padded = width + pad[2] + pad[3] - kernel[1]; + if (!ShapedType::isDynamic(width)) { + int64_t padded = width + pad[2] + pad[3] - kernel[1]; outputShape[2] = padded / stride[1] + 1; } @@ -1005,10 +1012,10 @@ llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); Conv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. @@ -1045,17 +1052,17 @@ if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { - int32_t inputSize = inputHeight + padding[0] + padding[1]; - int32_t filterSize = (weightHeight - 1) * dilation[0] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputHeight + padding[0] + padding[1]; + int64_t filterSize = (weightHeight - 1) * dilation[0] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[1] = (unstridedResult - 1) / stride[0] + 1; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { - int32_t inputSize = inputWidth + padding[2] + padding[3]; - int32_t filterSize = (weightWidth - 1) * dilation[1] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputWidth + padding[2] + padding[3]; + int64_t filterSize = (weightWidth - 1) * dilation[1] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[2] = (unstridedResult - 1) / stride[1] + 1; } @@ -1072,13 +1079,13 @@ llvm::SmallVector outputShape(5, ShapedType::kDynamicSize); Conv3DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t inputDepth = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t inputDepth = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; - int32_t weightDepth = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; + int64_t weightDepth = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1163,13 +1170,13 @@ llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t inputChannels = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t inputChannels = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; - int32_t depthChannels = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; + int64_t depthChannels = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1216,17 +1223,17 @@ if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { - int32_t inputSize = inputHeight + padding[0] + padding[1]; - int32_t filterSize = (weightHeight - 1) * dilation[0] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputHeight + padding[0] + padding[1]; + int64_t filterSize = (weightHeight - 1) * dilation[0] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[1] = (unstridedResult - 1) / stride[0] + 1; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { - int32_t inputSize = inputWidth + padding[2] + padding[3]; - int32_t filterSize = (weightWidth - 1) * dilation[1] + 1; - int32_t unstridedResult = inputSize - filterSize + 1; + int64_t inputSize = inputWidth + padding[2] + padding[3]; + int64_t filterSize = (weightWidth - 1) * dilation[1] + 1; + int64_t unstridedResult = inputSize - filterSize + 1; outputShape[2] = (unstridedResult - 1) / stride[1] + 1; } @@ -1243,11 +1250,12 @@ TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes); llvm::SmallVector outputShape; getI64Values(adaptor.getOutShape(), outputShape); + outputShape = ConvertToMlirShape(outputShape); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1285,16 +1293,18 @@ if (!ShapedType::isDynamic(inputHeight) && !ShapedType::isDynamic(weightHeight)) { - int32_t calculateSize = + int64_t calculateSize = (inputHeight - 1) * stride[0] - padding[0] - padding[1] + weightHeight; - outputShape[1] = outputShape[1] == -1 ? calculateSize : outputShape[1]; + outputShape[1] = + ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1]; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { - int32_t calculateSize = + int64_t calculateSize = (inputWidth - 1) * stride[1] - padding[2] - padding[3] + weightWidth; - outputShape[2] = outputShape[2] == -1 ? calculateSize : outputShape[2]; + outputShape[2] = + ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2]; } inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -19,6 +19,12 @@ namespace { +SmallVector ConvertFromMlirShape(ArrayRef shape) { + return to_vector(llvm::map_range(shape, [](int64_t dim) { + return ShapedType::isDynamic(dim) ? -1 : dim; + })); +} + struct Conv2DIsFullyConnected : public OpRewritePattern { explicit Conv2DIsFullyConnected(MLIRContext *context) : OpRewritePattern(context) {} @@ -52,16 +58,17 @@ // Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC]. ArrayRef inputShape = inputType.getShape(); - int64_t combined = inputShape[0] * inputShape[1] * inputShape[2]; - if (combined < 0) - combined = ShapedType::kDynamicSize; + int64_t combined = ShapedType::kDynamicSize; + if (numDynamic == 0) + combined = inputShape[0] * inputShape[1] * inputShape[2]; llvm::SmallVector revisedInputShape{combined, inputShape[3]}; auto revisedInputShapeType = RankedTensorType::get(revisedInputShape, inputType.getElementType()); auto reshapedInput = rewriter .create( op.getLoc(), revisedInputShapeType, input, - rewriter.getI64ArrayAttr(revisedInputShape)) + rewriter.getI64ArrayAttr( + ConvertFromMlirShape(revisedInputShape))) .getResult(); // Reshape kernel to [OC,KH,KW,IC] -> [OC, IC]. @@ -73,7 +80,8 @@ auto reshapedWeight = rewriter .create( op.getLoc(), revisedWeightShapeType, weight, - rewriter.getI64ArrayAttr(revisedWeightShape)) + rewriter.getI64ArrayAttr( + ConvertFromMlirShape(revisedWeightShape))) .getResult(); // Perform a fully connected network over the reshaped input and weight. @@ -102,7 +110,7 @@ inputShape[2], weightShape[0]}; rewriter.replaceOpWithNewOp( op, resultType, fullyConnectedValue, - rewriter.getI64ArrayAttr(outputShape)); + rewriter.getI64ArrayAttr(ConvertFromMlirShape(outputShape))); return success(); } }; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp @@ -214,6 +214,7 @@ if (!shapeInterface) continue; + shapeInterface.dump(); SmallVector returnedShapes; ValueShapeRange range(op.getOperands(), operandShape); diff --git a/mlir/lib/Dialect/Traits.cpp b/mlir/lib/Dialect/Traits.cpp --- a/mlir/lib/Dialect/Traits.cpp +++ b/mlir/lib/Dialect/Traits.cpp @@ -80,7 +80,7 @@ // Check each dimension is consistent. for (; i1 != e1 && i2 != e2; ++i1, ++i2, ++iR) { - if (*i1 == -1 || *i2 == -1) { + if (*i1 == ShapedType::kDynamicSize || *i2 == ShapedType::kDynamicSize) { // One or both dimensions is unknown. Follow TensorFlow behavior: // - If either dimension is greater than 1, we assume that the program is // correct, and the other dimension will be broadcast to match it. @@ -94,7 +94,7 @@ } else if (*i2 == 1) { *iR = *i1; } else { - *iR = -1; + *iR = ShapedType::kDynamicSize; } } else { if (*i1 == *i2 || *i2 == 1) { @@ -199,7 +199,8 @@ // then it is compatible, else if the inferred dim is 1 then it is also // compatible. But if the existing dim is 1 and the inferred is greater than // 1 then flag. - return dim1 == dim2 || dim1 == -1 || dim2 == -1 || dim1 == 1; + return dim1 == dim2 || dim1 == ShapedType::kDynamicSize || + dim2 == ShapedType::kDynamicSize || dim1 == 1; }; if (inferred.size() != existing.size()) return false; diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -335,7 +335,7 @@ ArrayRef shape, Type elementType, Attribute encoding) { for (int64_t s : shape) - if (s < -1) + if (s < 0 && s != ShapedType::kDynamicSize) return emitError() << "invalid tensor dimension size"; if (auto v = encoding.dyn_cast_or_null()) if (failed(v.verifyEncoding(shape, elementType, emitError))) @@ -656,9 +656,9 @@ if (!BaseMemRefType::isValidElementType(elementType)) return emitError() << "invalid memref element type"; - // Negative sizes are not allowed except for `-1` that means dynamic size. + // Negative sizes are not allowed except for `kDynamicSize`. for (int64_t s : shape) - if (s < -1) + if (s < 0 && s != ShapedType::kDynamicSize) return emitError() << "invalid memref size"; assert(layout && "missing layout specification"); diff --git a/mlir/python/mlir/dialects/_tensor_ops_ext.py b/mlir/python/mlir/dialects/_tensor_ops_ext.py --- a/mlir/python/mlir/dialects/_tensor_ops_ext.py +++ b/mlir/python/mlir/dialects/_tensor_ops_ext.py @@ -4,6 +4,7 @@ try: from ..ir import * + import numpy except ImportError as e: raise RuntimeError("Error loading imports from extension module") from e @@ -30,7 +31,7 @@ if isinstance(s, int): static_sizes.append(s) else: - static_sizes.append(-1) + static_sizes.append(ShapedType.get_dynamic_size()) dynamic_sizes.append(s) result_type = RankedTensorType.get(static_sizes, element_type) op = self.build_generic( diff --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py --- a/mlir/test/python/dialects/linalg/ops.py +++ b/mlir/test/python/dialects/linalg/ops.py @@ -23,7 +23,8 @@ # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: %[[RES:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : tensor<12x?xf32>) -> tensor<12x?xf32> # CHECK-NEXT: return %[[RES]] : tensor<12x?xf32> - @func.FuncOp.from_py_func(RankedTensorType.get((12, -1), f32)) + @func.FuncOp.from_py_func( + RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32)) def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.fill(zero, outs=[out]) @@ -33,7 +34,8 @@ # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : memref<12x?xf32>) # CHECK-NEXT: return - @func.FuncOp.from_py_func(MemRefType.get((12, -1), f32)) + @func.FuncOp.from_py_func( + MemRefType.get((12, ShapedType.get_dynamic_size()), f32)) def fill_buffer(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.fill(zero, outs=[out]) diff --git a/mlir/test/python/dialects/shape.py b/mlir/test/python/dialects/shape.py --- a/mlir/test/python/dialects/shape.py +++ b/mlir/test/python/dialects/shape.py @@ -20,7 +20,7 @@ f32 = F32Type.get() with InsertionPoint(module.body): @func.FuncOp.from_py_func( - RankedTensorType.get((12, -1), f32)) + RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32)) def const_shape_tensor(arg): return shape.ConstShapeOp( DenseElementsAttr.get(np.array([10, 20], dtype=np.int64), type=IndexType.get())) diff --git a/mlir/test/python/dialects/tensor.py b/mlir/test/python/dialects/tensor.py --- a/mlir/test/python/dialects/tensor.py +++ b/mlir/test/python/dialects/tensor.py @@ -21,7 +21,10 @@ indexType = IndexType.get() with InsertionPoint(module.body): - @func.FuncOp.from_py_func(RankedTensorType.get((-1, -1), f32Type)) + @func.FuncOp.from_py_func( + RankedTensorType.get( + (ShapedType.get_dynamic_size(), ShapedType.get_dynamic_size()), + f32Type)) # CHECK: func @tensor_static_dim # CHECK-SAME: %[[ARG0:.+]]: tensor # CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index diff --git a/mlir/test/python/dialects/vector.py b/mlir/test/python/dialects/vector.py --- a/mlir/test/python/dialects/vector.py +++ b/mlir/test/python/dialects/vector.py @@ -35,7 +35,9 @@ module = Module.create() with InsertionPoint(module.body): vector_type = VectorType.get([2, 3], F32Type.get()) - memref_type = MemRefType.get([-1, -1], F32Type.get()) + memref_type = MemRefType.get( + [ShapedType.get_dynamic_size(), + ShapedType.get_dynamic_size()], F32Type.get()) index_type = IndexType.get() mask_type = VectorType.get(vector_type.shape, IntegerType.get_signless(1)) identity_map = AffineMap.get_identity(vector_type.rank) diff --git a/mlir/unittests/Dialect/BroadcastShapeTest.cpp b/mlir/unittests/Dialect/BroadcastShapeTest.cpp --- a/mlir/unittests/Dialect/BroadcastShapeTest.cpp +++ b/mlir/unittests/Dialect/BroadcastShapeTest.cpp @@ -47,9 +47,10 @@ TEST(BroadcastShapeTest, InterleavingUnknowns) { SmallVector result; - ASSERT_TRUE( - getBroadcastedShape({1, 2, -1, -1, -1}, {-1, -1, -1, 4, 1}, result)); - EXPECT_THAT(result, ElementsAre(-1, 2, -1, 4, -1)); + int64_t dyn = mlir::ShapedType::kDynamicSize; + ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1}, + result)); + EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn)); } TEST(BroadcastShapeTest, IncompatibleLowDim) {