diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td --- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td +++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td @@ -128,7 +128,7 @@ static constexpr LenType singleton() { return 1; } /// Character has a LEN value which is not a compile-time known constant. - static constexpr LenType unknownLen() { return -1; } + static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; } /// Character LEN is a runtime value. bool hasDynamicLen() { return getLen() == unknownLen(); } diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp --- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp +++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp @@ -410,7 +410,8 @@ auto affineApply = rewriter.create(acoOp.getLoc(), affineMap, indexArgs); auto arrayElementType = coordinateArrayElement(acoOp); - auto newType = mlir::MemRefType::get({-1}, arrayElementType); + auto newType = + mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType); auto arrayConvert = rewriter.create(acoOp.getLoc(), newType, acoOp.getMemref()); return std::make_pair(affineApply, arrayConvert); diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -87,7 +87,8 @@ // TODO: merge these two special values in a single one used everywhere. // Unfortunately, uses of `-1` have crept deep into the codebase now and are // hard to track. - static constexpr int64_t kDynamicSize = -1; + static constexpr int64_t kDynamicSize = + std::numeric_limits::min(); static constexpr int64_t kDynamicStrideOrOffset = std::numeric_limits::min(); diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp --- a/mlir/lib/AsmParser/TypeParser.cpp +++ b/mlir/lib/AsmParser/TypeParser.cpp @@ -510,7 +510,7 @@ if (consumeIf(Token::question)) { if (!allowDynamic) return emitError(loc, "expected static shape"); - dimensions.push_back(-1); + dimensions.push_back(ShapedType::kDynamicSize); } else { int64_t value; if (failed(parseIntegerInDimensionList(value))) diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -843,7 +843,7 @@ bool isDynamic) { if (isDynamic) { // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1 - intermediateShape = {-1}; + intermediateShape = {ShapedType::kDynamicSize}; return true; } @@ -1784,7 +1784,7 @@ SmallVector dynDims; for (int i = 0; i < inputTy.getRank(); i++) { - if (inputTy.isDynamicDim(i) || multiples[i] == -1) { + if (inputTy.isDynamicDim(i) || multiples[i] == ShapedType::kDynamicSize) { dynDims.push_back(rewriter.create(loc, input, i)); } } diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -1796,7 +1796,7 @@ bool isDynDim = isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context); if (isDynDim) { - newShape[d] = -1; + newShape[d] = ShapedType::kDynamicSize; } else { // The lower bound for the shape is always zero. Optional ubConst = diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp @@ -266,11 +266,12 @@ // We parsed a generic dimension list, but vectors only support two forms: // - single non-dynamic entry in the list (fixed vector); - // - two elements, the first dynamic (indicated by -1) and the second + // - two elements, the first dynamic (indicated by ShapedType::kDynamicSize) + // and the second // non-dynamic (scalable vector). if (dims.empty() || dims.size() > 2 || - ((dims.size() == 2) ^ (dims[0] == -1)) || - (dims.size() == 2 && dims[1] == -1)) { + ((dims.size() == 2) ^ (dims[0] == ShapedType::kDynamicSize)) || + (dims.size() == 2 && dims[1] == ShapedType::kDynamicSize)) { parser.emitError(dimPos) << "expected '? x x ' or ' x '"; return Type(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -64,7 +64,8 @@ } // Fallback dynamic buffer. - auto dynamicBufferType = MemRefType::get(-1, b.getIntegerType(8)); + auto dynamicBufferType = + MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8)); Value mul = b.createOrFold( b.create(width), allocSize); if (options.useAlloca) @@ -242,7 +243,7 @@ partialSizes.push_back( b.createOrFold(loc, subView, resultDimIdx++)); } - SmallVector dynSizes(fullSizes.size(), -1); + SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamicSize); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -185,7 +185,7 @@ for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { int64_t dimSize = memrefType.getDimSize(dim); // If this is already static dimension, keep it. - if (dimSize != -1) { + if (dimSize != ShapedType::kDynamicSize) { newShapeConstants.push_back(dimSize); continue; } @@ -197,7 +197,7 @@ newShapeConstants.push_back(constantIndexOp.value()); } else { // Dynamic shape dimension not folded; copy dynamicSize from old memref. - newShapeConstants.push_back(-1); + newShapeConstants.push_back(ShapedType::kDynamicSize); dynamicSizes.push_back(dynamicSize); } dynamicDimPos++; @@ -666,7 +666,8 @@ for (unsigned i = 0, e = aT.getRank(); i != e; ++i) { int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i); - if (aDim != -1 && bDim != -1 && aDim != bDim) + if (aDim != ShapedType::kDynamicSize && + bDim != ShapedType::kDynamicSize && aDim != bDim) return false; } return true; diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -600,7 +600,7 @@ "sum of all the concatenation dimensions of the input tensors."); } } else { - int prev = dstDim; + int64_t prev = dstDim; for (auto src : getInputs()) { auto d = src.getType().cast().getShape()[i]; if (prev != ShapedType::kDynamicSize && d != prev) diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -434,7 +434,7 @@ } // Determine the dimension size along the concatenation axis. - int concatDimSize = 0; + int64_t concatDimSize = 0; for (auto operand : operands) { ShapeAdaptor operandShape = operands.getShape(operand); @@ -645,7 +645,7 @@ // Any non dynamic dimension can be multiplied to a known size. outputShape.reserve(multiples.size()); for (int i = 0, s = inputShape.getRank(); i < s; i++) { - int dim = inputShape.getDimSize(i); + int64_t dim = inputShape.getDimSize(i); if (dim != ShapedType::kDynamicSize) dim *= multipleValues[i]; outputShape.push_back(dim); @@ -800,8 +800,8 @@ outputShape[0] = inputShape.getDimSize(0); outputShape[3] = inputShape.getDimSize(3); - int32_t inputHeight = inputShape.getDimSize(1); - int32_t inputWidth = inputShape.getDimSize(2); + int64_t inputHeight = inputShape.getDimSize(1); + int64_t inputWidth = inputShape.getDimSize(2); if ((inputHeight == ShapedType::kDynamicSize) || (inputWidth == ShapedType::kDynamicSize)) @@ -961,7 +961,7 @@ SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); llvm::SmallVector outputShape; - outputShape.resize(4, -1); + outputShape.resize(4, ShapedType::kDynamicSize); // We only know the rank if the input type is unranked. if (!inputShape) { @@ -973,8 +973,8 @@ outputShape[0] = inputShape.getDimSize(0); outputShape[3] = inputShape.getDimSize(3); - int32_t height = inputShape.getDimSize(1); - int32_t width = inputShape.getDimSize(2); + int64_t height = inputShape.getDimSize(1); + int64_t width = inputShape.getDimSize(2); llvm::SmallVector kernel; llvm::SmallVector stride; @@ -984,12 +984,12 @@ getI64Values(attributes.get("stride").cast(), stride); getI64Values(attributes.get("pad").cast(), pad); - if (height != -1) { + if (height != ShapedType::kDynamicSize) { int32_t padded = height + pad[0] + pad[1] - kernel[0]; outputShape[1] = padded / stride[0] + 1; } - if (width != -1) { + if (width != ShapedType::kDynamicSize) { int32_t padded = width + pad[2] + pad[3] - kernel[1]; outputShape[2] = padded / stride[1] + 1; } @@ -1005,10 +1005,10 @@ llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); Conv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. @@ -1072,13 +1072,13 @@ llvm::SmallVector outputShape(5, ShapedType::kDynamicSize); Conv3DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t inputDepth = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t inputDepth = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; - int32_t weightDepth = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; + int64_t weightDepth = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1163,13 +1163,13 @@ llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t inputChannels = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t inputChannels = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; - int32_t depthChannels = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; + int64_t depthChannels = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1244,10 +1244,10 @@ llvm::SmallVector outputShape; getI64Values(adaptor.getOutShape(), outputShape); - int32_t inputWidth = ShapedType::kDynamicSize; - int32_t inputHeight = ShapedType::kDynamicSize; - int32_t weightWidth = ShapedType::kDynamicSize; - int32_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamicSize; + int64_t inputHeight = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamicSize; + int64_t weightHeight = ShapedType::kDynamicSize; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1287,14 +1287,18 @@ !ShapedType::isDynamic(weightHeight)) { int32_t calculateSize = (inputHeight - 1) * stride[0] - padding[0] - padding[1] + weightHeight; - outputShape[1] = outputShape[1] == -1 ? calculateSize : outputShape[1]; + outputShape[1] = outputShape[1] == ShapedType::kDynamicSize + ? calculateSize + : outputShape[1]; } if (!ShapedType::isDynamic(inputWidth) && !ShapedType::isDynamic(weightWidth)) { int32_t calculateSize = (inputWidth - 1) * stride[1] - padding[2] - padding[3] + weightWidth; - outputShape[2] = outputShape[2] == -1 ? calculateSize : outputShape[2]; + outputShape[2] = outputShape[2] == ShapedType::kDynamicSize + ? calculateSize + : outputShape[2]; } inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -52,9 +52,10 @@ // Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC]. ArrayRef inputShape = inputType.getShape(); - int64_t combined = inputShape[0] * inputShape[1] * inputShape[2]; - if (combined < 0) - combined = ShapedType::kDynamicSize; + int64_t combined = ShapedType::kDynamicSize; + if (numDynamic == 0) + combined = inputShape[0] * inputShape[1] * inputShape[2]; + llvm::SmallVector revisedInputShape{combined, inputShape[3]}; auto revisedInputShapeType = RankedTensorType::get(revisedInputShape, inputType.getElementType()); diff --git a/mlir/lib/Dialect/Traits.cpp b/mlir/lib/Dialect/Traits.cpp --- a/mlir/lib/Dialect/Traits.cpp +++ b/mlir/lib/Dialect/Traits.cpp @@ -80,7 +80,7 @@ // Check each dimension is consistent. for (; i1 != e1 && i2 != e2; ++i1, ++i2, ++iR) { - if (*i1 == -1 || *i2 == -1) { + if (*i1 == ShapedType::kDynamicSize || *i2 == ShapedType::kDynamicSize) { // One or both dimensions is unknown. Follow TensorFlow behavior: // - If either dimension is greater than 1, we assume that the program is // correct, and the other dimension will be broadcast to match it. @@ -94,7 +94,7 @@ } else if (*i2 == 1) { *iR = *i1; } else { - *iR = -1; + *iR = ShapedType::kDynamicSize; } } else { if (*i1 == *i2 || *i2 == 1) { @@ -199,7 +199,8 @@ // then it is compatible, else if the inferred dim is 1 then it is also // compatible. But if the existing dim is 1 and the inferred is greater than // 1 then flag. - return dim1 == dim2 || dim1 == -1 || dim2 == -1 || dim1 == 1; + return dim1 == dim2 || dim1 == ShapedType::kDynamicSize || + dim2 == ShapedType::kDynamicSize || dim1 == 1; }; if (inferred.size() != existing.size()) return false; diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -335,7 +335,7 @@ ArrayRef shape, Type elementType, Attribute encoding) { for (int64_t s : shape) - if (s < -1) + if (s < 0 && s != ShapedType::kDynamicSize) return emitError() << "invalid tensor dimension size"; if (auto v = encoding.dyn_cast_or_null()) if (failed(v.verifyEncoding(shape, elementType, emitError))) @@ -656,9 +656,9 @@ if (!BaseMemRefType::isValidElementType(elementType)) return emitError() << "invalid memref element type"; - // Negative sizes are not allowed except for `-1` that means dynamic size. + // Negative sizes are not allowed except for `kDynamicSize`. for (int64_t s : shape) - if (s < -1) + if (s < 0 && s != ShapedType::kDynamicSize) return emitError() << "invalid memref size"; assert(layout && "missing layout specification"); diff --git a/mlir/python/mlir/dialects/_tensor_ops_ext.py b/mlir/python/mlir/dialects/_tensor_ops_ext.py --- a/mlir/python/mlir/dialects/_tensor_ops_ext.py +++ b/mlir/python/mlir/dialects/_tensor_ops_ext.py @@ -4,6 +4,7 @@ try: from ..ir import * + import numpy except ImportError as e: raise RuntimeError("Error loading imports from extension module") from e @@ -30,7 +31,7 @@ if isinstance(s, int): static_sizes.append(s) else: - static_sizes.append(-1) + static_sizes.append(ShapedType.get_dynamic_size()) dynamic_sizes.append(s) result_type = RankedTensorType.get(static_sizes, element_type) op = self.build_generic( diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -559,7 +559,7 @@ // CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]] func.func @test_reshape_downrank_dyn(%arg0: tensor<2x?xf32>) -> tensor { // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]] - %0 = "tosa.reshape"(%arg0) {new_shape = [-1]} : (tensor<2x?xf32>) -> tensor + %0 = "tosa.reshape"(%arg0) {new_shape = [-9223372036854775808]} : (tensor<2x?xf32>) -> tensor // CHECK: return [[RESHAPE]] return %0 : tensor } @@ -581,7 +581,7 @@ // CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]] func.func @test_reshape_uprank_dyn(%arg0: tensor) -> tensor<2x?xf32> { // CHECK: [[RESHAPE:%.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1]] - %0 = "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor) -> tensor<2x?xf32> + %0 = "tosa.reshape"(%arg0) {new_shape = [2, -9223372036854775808]} : (tensor) -> tensor<2x?xf32> // CHECK: return [[RESHAPE]] return %0 : tensor<2x?xf32> } @@ -605,7 +605,7 @@ func.func @test_reshape_samerank_dyn(%arg0: tensor) -> tensor<2x?xf32> { // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]] // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]] - %0 = "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor) -> tensor<2x?xf32> + %0 = "tosa.reshape"(%arg0) {new_shape = [2, -9223372036854775808]} : (tensor) -> tensor<2x?xf32> // CHECK-NEXT: return %[[RESHAPE2]] return %0 : tensor<2x?xf32> } @@ -627,7 +627,7 @@ func.func @test_reshape_downrank_6D_dyn(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor { // CHECK: tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1, 2, 3, 4, 5]] // CHECK: tensor.expand_shape %{{.*}} {{\[}}[0, 1, 2]] - %0 = "tosa.reshape"(%arg0) {new_shape = [-1, 5, 77]} : (tensor<1x2x?x5x7x11xf32>) -> tensor + %0 = "tosa.reshape"(%arg0) {new_shape = [-9223372036854775808, 5, 77]} : (tensor<1x2x?x5x7x11xf32>) -> tensor return %0 : tensor } @@ -1302,7 +1302,7 @@ // CHECK: linalg.yield %[[ARG1]] : i8 // CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[GENERIC]] {{\[}}[0, 1, 2, 3]] // CHECK: tensor.expand_shape %[[COLLAPSED]] {{\[}}[0, 1]] - %0 = "tosa.tile"(%arg0) {multiples = [2, -1]} : (tensor<2x3xi8>) -> (tensor<2x?xi8>) + %0 = "tosa.tile"(%arg0) {multiples = [2, -9223372036854775808]} : (tensor<2x3xi8>) -> (tensor<2x?xi8>) return } diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -16,6 +16,6 @@ // CHECK: %[[C2:.+]] = arith.constant 2 : index // CHECK: %[[SUB:.+]] = arith.subi %[[DIM]], %[[C2]] // CHECK: tensor.extract_slice %arg0[2] [%[[SUB]]] [1] - %0 = "tosa.slice"(%arg0) {start = [2], size = [-1]} : (tensor) -> (tensor) + %0 = "tosa.slice"(%arg0) {start = [2], size = [-9223372036854775808]} : (tensor) -> (tensor) return %0 : tensor } diff --git a/mlir/test/Dialect/Linalg/transform-op-split.mlir b/mlir/test/Dialect/Linalg/transform-op-split.mlir --- a/mlir/test/Dialect/Linalg/transform-op-split.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-split.mlir @@ -179,7 +179,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !pdl.operation): // expected-error @below {{expects either a dynamic or a static split point to be provided}} - %0:2 = "transform.structured.split"(%arg1) { dimension = 1, static_split_point = -1 } : (!pdl.operation) -> (!pdl.operation, !pdl.operation) + %0:2 = "transform.structured.split"(%arg1) { dimension = 1, static_split_point = -9223372036854775808 } : (!pdl.operation) -> (!pdl.operation, !pdl.operation) } // ----- diff --git a/mlir/test/Dialect/Shape/canonicalize.mlir b/mlir/test/Dialect/Shape/canonicalize.mlir --- a/mlir/test/Dialect/Shape/canonicalize.mlir +++ b/mlir/test/Dialect/Shape/canonicalize.mlir @@ -815,7 +815,7 @@ // CHECK-NEXT: consume.witness // CHECK-NEXT: return %cs0 = shape.const_shape [8, 1] : !shape.shape - %cs1 = shape.const_shape [1, -1] : !shape.shape + %cs1 = shape.const_shape [1, -9223372036854775808] : !shape.shape %0 = shape.cstr_broadcastable %cs0, %cs0, %cs1 : !shape.shape, !shape.shape, !shape.shape "consume.witness"(%0) : (!shape.witness) -> () return @@ -830,7 +830,7 @@ // CHECK-NEXT: return %cs0 = shape.const_shape [8, 1] : !shape.shape %cs1 = shape.const_shape [1, 8] : !shape.shape - %cs2 = shape.const_shape [1, -1] : !shape.shape + %cs2 = shape.const_shape [1, -9223372036854775808] : !shape.shape %0 = shape.cstr_broadcastable %cs0, %cs1, %cs2 : !shape.shape, !shape.shape, !shape.shape "consume.witness"(%0) : (!shape.witness) -> () return @@ -844,8 +844,8 @@ // CHECK-NEXT: consume.witness // CHECK-NEXT: return %cs0 = shape.const_shape [8, 1] : !shape.shape - %cs1 = shape.const_shape [1, -1] : !shape.shape - %cs2 = shape.const_shape [8, -1] : !shape.shape + %cs1 = shape.const_shape [1, -9223372036854775808] : !shape.shape + %cs2 = shape.const_shape [8, -9223372036854775808] : !shape.shape %0 = shape.cstr_broadcastable %cs0, %cs1, %cs2 : !shape.shape, !shape.shape, !shape.shape "consume.witness"(%0) : (!shape.witness) -> () return diff --git a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir --- a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir +++ b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir @@ -44,10 +44,10 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<384x1x1x64xi8>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<384xi32>) -> tensor { func.func @conv_with_dynamic_dim(%arg0: tensor, %arg1: tensor<384x1x1x64xi8>, %arg2: tensor<384xi32>) -> tensor { -// CHECK: %[[VAL_3:.*]] = "tosa.reshape"(%[[VAL_0]]) {new_shape = [-1, 64]} : (tensor) -> tensor +// CHECK: %[[VAL_3:.*]] = "tosa.reshape"(%[[VAL_0]]) {new_shape = [-9223372036854775808, 64]} : (tensor) -> tensor // CHECK: %[[VAL_4:.*]] = "tosa.reshape"(%[[VAL_1]]) {new_shape = [384, 64]} : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8> // CHECK: %[[VAL_5:.*]] = "tosa.fully_connected"(%[[VAL_3]], %[[VAL_4]], %[[VAL_2]]) {quantization_info = #tosa.conv_quant} : (tensor, tensor<384x64xi8>, tensor<384xi32>) -> tensor -// CHECK: %[[VAL_6:.*]] = "tosa.reshape"(%[[VAL_5]]) {new_shape = [-1, 14, 14, 384]} : (tensor) -> tensor +// CHECK: %[[VAL_6:.*]] = "tosa.reshape"(%[[VAL_5]]) {new_shape = [-9223372036854775808, 14, 14, 384]} : (tensor) -> tensor // CHECK: return %[[VAL_6]] : tensor // CHECK: } %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant, stride = [1, 1]} : (tensor, tensor<384x1x1x64xi8>, tensor<384xi32>) -> tensor diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -375,11 +375,11 @@ // CHECK: "tosa.reshape"(%arg0) {new_shape = [16]} : (tensor<4x4xi32>) -> tensor<16xi32> %0 = "tosa.reshape"(%arg0) {new_shape = [16]} : (tensor<4x4xi32>) -> tensor - // CHECK: "tosa.reshape"(%arg0) {new_shape = [-1]} : (tensor<4x4xi32>) -> tensor<16xi32> - %1 = "tosa.reshape"(%arg0) {new_shape = [-1]} : (tensor<4x4xi32>) -> tensor + // CHECK: "tosa.reshape"(%arg0) {new_shape = [-9223372036854775808]} : (tensor<4x4xi32>) -> tensor<16xi32> + %1 = "tosa.reshape"(%arg0) {new_shape = [-9223372036854775808]} : (tensor<4x4xi32>) -> tensor - // CHECK: "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor<4x4xi32>) -> tensor<2x8xi32> - %2 = "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor<4x4xi32>) -> tensor + // CHECK: "tosa.reshape"(%arg0) {new_shape = [2, -9223372036854775808]} : (tensor<4x4xi32>) -> tensor<2x8xi32> + %2 = "tosa.reshape"(%arg0) {new_shape = [2, -9223372036854775808]} : (tensor<4x4xi32>) -> tensor return } @@ -390,11 +390,11 @@ // CHECK: %0 = "tosa.reshape"(%arg0) {new_shape = [16]} : (tensor<4x?xi32>) -> tensor<16xi32> %0 = "tosa.reshape"(%arg0) {new_shape = [16]} : (tensor<4x?xi32>) -> tensor - // CHECK: %1 = "tosa.reshape"(%arg0) {new_shape = [-1]} : (tensor<4x?xi32>) -> tensor - %1 = "tosa.reshape"(%arg0) {new_shape = [-1]} : (tensor<4x?xi32>) -> tensor + // CHECK: %1 = "tosa.reshape"(%arg0) {new_shape = [-9223372036854775808]} : (tensor<4x?xi32>) -> tensor + %1 = "tosa.reshape"(%arg0) {new_shape = [-9223372036854775808]} : (tensor<4x?xi32>) -> tensor - // CHECK: %2 = "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor<4x?xi32>) -> tensor<2x?xi32> - %2 = "tosa.reshape"(%arg0) {new_shape = [2, -1]} : (tensor<4x?xi32>) -> tensor + // CHECK: %2 = "tosa.reshape"(%arg0) {new_shape = [2, -9223372036854775808]} : (tensor<4x?xi32>) -> tensor<2x?xi32> + %2 = "tosa.reshape"(%arg0) {new_shape = [2, -9223372036854775808]} : (tensor<4x?xi32>) -> tensor return } @@ -897,7 +897,7 @@ // CHECK-LABEL: @transpose_conv2d_out_shape func.func @transpose_conv2d_out_shape(%arg0: tensor<2x?x?x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x8x9x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, 8, 9, -1], stride = [1, 1]} : (tensor<2x?x?x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, 8, 9, -9223372036854775808], stride = [1, 1]} : (tensor<2x?x?x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x8x9x5xf32> return } @@ -906,7 +906,7 @@ // CHECK-LABEL: @transpose_conv2d_static func.func @transpose_conv2d_static(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x18x19x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [1, 1]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -915,7 +915,7 @@ // CHECK-LABEL: @transpose_conv2d_static_strided func.func @transpose_conv2d_static_strided(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x33x45x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = [2, 3]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [2, 3]} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -924,7 +924,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_input func.func @transpose_conv2d_dynamic_input(%arg0: tensor, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [1, 1]} : (tensor, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor return } @@ -933,7 +933,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_weights func.func @transpose_conv2d_dynamic_weights(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x?x?x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor, tensor<5xf32>) -> tensor<2x?x?x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor, tensor<5xf32>) -> tensor<2x?x?x5xf32> return } @@ -942,7 +942,7 @@ // CHECK-LABEL: @transpose_conv2d_dynamic_bias func.func @transpose_conv2d_dynamic_bias(%arg0: tensor<2x6x4x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor) { // CHECK: -> tensor<2x8x9x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor<2x8x9x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [1, 1]} : (tensor<2x6x4x3xf32>, tensor<5x3x6x3xf32>, tensor) -> tensor<2x8x9x5xf32> return } @@ -951,14 +951,14 @@ // CHECK-LABEL: @transpose_conv2d_padded func.func @transpose_conv2d_padded(%arg0: tensor<2x9x11x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) { // CHECK: -> tensor<2x10x13x5xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [1, 0, 3, 0], out_shape = [-1, -1, -1, -1], stride = [1, 1]} : (tensor<2x9x11x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [1, 0, 3, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [1, 1]} : (tensor<2x9x11x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x10x13x5xf32> return } // CHECK-LABEL: @transpose_conv2d_strided func.func @transpose_conv2d_strided(%arg0: tensor<1x5x7x1xf32>, %arg1: tensor<1x1x1x1xf32>, %arg2: tensor<1xf32>) { // CHECK: -> tensor<1x13x13x1xf32> - %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-1, -1, -1, -1], stride = [3, 2]} : (tensor<1x5x7x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> + %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [-9223372036854775808, -9223372036854775808, -9223372036854775808, -9223372036854775808], stride = [3, 2]} : (tensor<1x5x7x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor<1x13x13x1xf32> return } diff --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py --- a/mlir/test/python/dialects/linalg/ops.py +++ b/mlir/test/python/dialects/linalg/ops.py @@ -23,7 +23,8 @@ # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: %[[RES:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : tensor<12x?xf32>) -> tensor<12x?xf32> # CHECK-NEXT: return %[[RES]] : tensor<12x?xf32> - @func.FuncOp.from_py_func(RankedTensorType.get((12, -1), f32)) + @func.FuncOp.from_py_func( + RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32)) def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.fill(zero, outs=[out]) @@ -33,7 +34,8 @@ # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : memref<12x?xf32>) # CHECK-NEXT: return - @func.FuncOp.from_py_func(MemRefType.get((12, -1), f32)) + @func.FuncOp.from_py_func( + MemRefType.get((12, ShapedType.get_dynamic_size()), f32)) def fill_buffer(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.fill(zero, outs=[out]) diff --git a/mlir/test/python/dialects/shape.py b/mlir/test/python/dialects/shape.py --- a/mlir/test/python/dialects/shape.py +++ b/mlir/test/python/dialects/shape.py @@ -20,7 +20,7 @@ f32 = F32Type.get() with InsertionPoint(module.body): @func.FuncOp.from_py_func( - RankedTensorType.get((12, -1), f32)) + RankedTensorType.get((12, ShapedType.get_dynamic_size()), f32)) def const_shape_tensor(arg): return shape.ConstShapeOp( DenseElementsAttr.get(np.array([10, 20], dtype=np.int64), type=IndexType.get())) diff --git a/mlir/test/python/dialects/tensor.py b/mlir/test/python/dialects/tensor.py --- a/mlir/test/python/dialects/tensor.py +++ b/mlir/test/python/dialects/tensor.py @@ -21,7 +21,10 @@ indexType = IndexType.get() with InsertionPoint(module.body): - @func.FuncOp.from_py_func(RankedTensorType.get((-1, -1), f32Type)) + @func.FuncOp.from_py_func( + RankedTensorType.get( + (ShapedType.get_dynamic_size(), ShapedType.get_dynamic_size()), + f32Type)) # CHECK: func @tensor_static_dim # CHECK-SAME: %[[ARG0:.+]]: tensor # CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index diff --git a/mlir/test/python/dialects/vector.py b/mlir/test/python/dialects/vector.py --- a/mlir/test/python/dialects/vector.py +++ b/mlir/test/python/dialects/vector.py @@ -35,7 +35,9 @@ module = Module.create() with InsertionPoint(module.body): vector_type = VectorType.get([2, 3], F32Type.get()) - memref_type = MemRefType.get([-1, -1], F32Type.get()) + memref_type = MemRefType.get( + [ShapedType.get_dynamic_size(), + ShapedType.get_dynamic_size()], F32Type.get()) index_type = IndexType.get() mask_type = VectorType.get(vector_type.shape, IntegerType.get_signless(1)) identity_map = AffineMap.get_identity(vector_type.rank) diff --git a/mlir/unittests/Dialect/BroadcastShapeTest.cpp b/mlir/unittests/Dialect/BroadcastShapeTest.cpp --- a/mlir/unittests/Dialect/BroadcastShapeTest.cpp +++ b/mlir/unittests/Dialect/BroadcastShapeTest.cpp @@ -47,9 +47,10 @@ TEST(BroadcastShapeTest, InterleavingUnknowns) { SmallVector result; - ASSERT_TRUE( - getBroadcastedShape({1, 2, -1, -1, -1}, {-1, -1, -1, 4, 1}, result)); - EXPECT_THAT(result, ElementsAre(-1, 2, -1, 4, -1)); + int64_t dyn = mlir::ShapedType::kDynamicSize; + ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1}, + result)); + EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn)); } TEST(BroadcastShapeTest, IncompatibleLowDim) {