diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td --- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td +++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td @@ -128,7 +128,7 @@ static constexpr LenType singleton() { return 1; } /// Character has a LEN value which is not a compile-time known constant. - static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamicSize; } + static constexpr LenType unknownLen() { return mlir::ShapedType::kDynamic; } /// Character LEN is a runtime value. bool hasDynamicLen() { return getLen() == unknownLen(); } @@ -482,9 +482,9 @@ // Does the sequence have unknown shape? (`array<* x T>`) bool hasUnknownShape() const { return getShape().empty(); } - // The value `kDynamicSize` represents an unknown extent for a dimension + // The value `kDynamic` represents an unknown extent for a dimension static constexpr Extent getUnknownExtent() { - return mlir::ShapedType::kDynamicSize; + return mlir::ShapedType::kDynamic; } }]; } diff --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp --- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp +++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp @@ -411,7 +411,7 @@ affineMap, indexArgs); auto arrayElementType = coordinateArrayElement(acoOp); auto newType = - mlir::MemRefType::get({mlir::ShapedType::kDynamicSize}, arrayElementType); + mlir::MemRefType::get({mlir::ShapedType::kDynamic}, arrayElementType); auto arrayConvert = rewriter.create(acoOp.getLoc(), newType, acoOp.getMemref()); return std::make_pair(affineApply, arrayConvert); diff --git a/mlir/include/mlir/Dialect/Arith/Utils/Utils.h b/mlir/include/mlir/Dialect/Arith/Utils/Utils.h --- a/mlir/include/mlir/Dialect/Arith/Utils/Utils.h +++ b/mlir/include/mlir/Dialect/Arith/Utils/Utils.h @@ -54,9 +54,9 @@ SmallVector mixedOffsets(op.getMixedOffsets()); SmallVector mixedSizes(op.getMixedSizes()); SmallVector mixedStrides(op.getMixedStrides()); - canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset); + canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamic); canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic); - canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset); + canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamic); // Create the new op in canonical form. ResultTypeFunc resultTypeFunc; diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td @@ -658,7 +658,7 @@ /*desc=*/[{ Like `getShape`, but only returns statically-known information, without generating any new IR. For each shape dimension, returns >=0 if that - dimension is statically known, or ShapeType::kDynamicSize otherwise. + dimension is statically known, or ShapeType::kDynamic otherwise. }], /*retTy=*/"SmallVector", /*methodName=*/"getStaticShape", @@ -675,7 +675,7 @@ /*desc=*/[{ Returns the statically-known loop ranges. Composes `getShapesToLoopsMap()` with the result of `getStaticShape`. - Returns ShapeType::kDynamicSize for non-statically-known loop ranges. + Returns ShapeType::kDynamic for non-statically-known loop ranges. This is expected to be called by a valid Linalg op }], /*retTy=*/"SmallVector", diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td @@ -438,7 +438,7 @@ static split point attribute when it is known at transform IR construction time or as the handle to an operation producing a single index-typed value when it is computed by payload IR. In the latter case, the static split - point must be set to `ShapedType::kDynamicSize` and the dynamic size handle + point must be set to `ShapedType::kDynamic` and the dynamic size handle must point to as many value-producing operations as there are structured operations pointed to by the target handle. @@ -809,9 +809,9 @@ case the tile value must be computed by the payload IR and the handle to the operation computing it must be provided through `dynamic_sizes`. When the sizes are not known statically, the corresponding entry in the - `static_sizes` attribute must be set to `ShapedType::kDynamicSize`. Only + `static_sizes` attribute must be set to `ShapedType::kDynamic`. Only the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should - be as many handles as `ShapedType::kDynamicSize` values in the + be as many handles as `ShapedType::kDynamic` values in the `static_sizes` attribute. A static size of `0` indicates that the dimension should not be tiled. No loop will be generated for such dimensions. If all tile sizes are `0`, this transform is effectively a no-op. @@ -950,10 +950,10 @@ $target oilist( `num_threads` custom($num_threads, $static_num_threads, - "ShapedType::kDynamicSize") | + "ShapedType::kDynamic") | `tile_sizes` custom($tile_sizes, $static_tile_sizes, - "ShapedType::kDynamicSize")) + "ShapedType::kDynamic")) (`(` `mapping` `=` $mapping^ `)`)? attr-dict }]; let hasVerifier = 1; @@ -981,9 +981,9 @@ case the tile value must be computed by the payload IR and the handle to the operation computing it must be provided through `dynamic_sizes`. When the sizes are not known statically, the corresponding entry in the - `static_sizes` attribute must be set to `ShapedType::kDynamicSize`. Only + `static_sizes` attribute must be set to `ShapedType::kDynamic`. Only the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should - be as many handles as `ShapedType::kDynamicSize` values in the + be as many handles as `ShapedType::kDynamic` values in the `static_sizes` attribute. A static size of `0` indicates that the dimension should not be tiled. No loop will be generated for such dimensions. If all tile sizes are `0`, this transform is effectively a no-op. diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -1268,13 +1268,13 @@ let assemblyFormat = [{ $source `to` `offset` `` `:` custom($offsets, $static_offsets, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") `` `,` `sizes` `` `:` custom($sizes, $static_sizes, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") `` `,` `strides` `` `:` custom($strides, $static_strides, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") attr-dict `:` type($source) `to` type($result) }]; @@ -1739,8 +1739,8 @@ The representation based on offsets, sizes and strides support a partially-static specification via attributes specified through the `static_offsets`, `static_sizes` and `static_strides` arguments. A special - sentinel value ShapedType::kDynamicSize and - ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has + sentinel value ShapedType::kDynamic and + ShapedType::kDynamic encodes that the corresponding entry has a dynamic value. A subview operation may additionally reduce the rank of the resulting view @@ -1866,11 +1866,11 @@ let assemblyFormat = [{ $source `` custom($offsets, $static_offsets, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") custom($sizes, $static_sizes, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") custom($strides, $static_strides, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") attr-dict `:` type($source) `to` type($result) }]; diff --git a/mlir/include/mlir/Dialect/Shape/IR/Shape.h b/mlir/include/mlir/Dialect/Shape/IR/Shape.h --- a/mlir/include/mlir/Dialect/Shape/IR/Shape.h +++ b/mlir/include/mlir/Dialect/Shape/IR/Shape.h @@ -35,7 +35,7 @@ /// Alias type for extent tensors. RankedTensorType getExtentTensorType(MLIRContext *ctx, - int64_t rank = ShapedType::kDynamicSize); + int64_t rank = ShapedType::kDynamic); // Check if a type is an extent tensor, e.g., tensor. bool isExtentTensorType(Type); diff --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td --- a/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td +++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeBase.td @@ -91,7 +91,7 @@ def Shape_ExtentTensorType : 1DTensorOf<[Index]>, - BuildableType<"::mlir::RankedTensorType::get({ShapedType::kDynamicSize}, " + BuildableType<"::mlir::RankedTensorType::get({ShapedType::kDynamic}, " "$_builder.getType<::mlir::IndexType>())"> { let description = [{ The extent tensor is a tensor of rank one with arbitrarily many index diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -278,8 +278,8 @@ The representation based on offsets, sizes and strides support a partially-static specification via attributes specified through the `static_offsets`, `static_sizes` and `static_strides` arguments. A special - sentinel value ShapedType::kDynamicSize and - ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has + sentinel value ShapedType::kDynamic and + ShapedType::kDynamic encodes that the corresponding entry has a dynamic value. After buffer allocation, the "extract_slice" op is expected to lower into a @@ -335,11 +335,11 @@ let assemblyFormat = [{ $source `` custom($offsets, $static_offsets, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") custom($sizes, $static_sizes, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") custom($strides, $static_strides, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") attr-dict `:` type($source) `to` type($result) }]; @@ -772,8 +772,8 @@ The representation based on offsets, sizes and strides support a partially-static specification via attributes specified through the `static_offsets`, `static_sizes` and `static_strides` arguments. A special - sentinel value ShapedType::kDynamicSize and - ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has + sentinel value ShapedType::kDynamic and + ShapedType::kDynamic encodes that the corresponding entry has a dynamic value. After buffer allocation, the "insert_slice" op is expected to lower into a @@ -819,11 +819,11 @@ let assemblyFormat = [{ $source `into` $dest `` custom($offsets, $static_offsets, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") custom($sizes, $static_sizes, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") custom($strides, $static_strides, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") attr-dict `:` type($source) `into` type($dest) }]; @@ -1222,9 +1222,9 @@ $source (`nofold` $nofold^)? `low` `` custom($low, $static_low, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") `high` `` custom($high, $static_high, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") $region attr-dict `:` type($source) `to` type($result) }]; @@ -1377,8 +1377,8 @@ The representation based on offsets, sizes and strides support a partially-static specification via attributes specified through the `static_offsets`, `static_sizes` and `static_strides` arguments. A special - sentinel value ShapedType::kDynamicSize and - ShapedType::kDynamicStrideOrOffset encodes that the corresponding entry has + sentinel value ShapedType::kDynamic and + ShapedType::kDynamic encodes that the corresponding entry has a dynamic value. After buffer allocation, the "parallel_insert_slice" op is expected to lower @@ -1412,11 +1412,11 @@ let assemblyFormat = [{ $source `into` $dest `` custom($offsets, $static_offsets, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") custom($sizes, $static_sizes, - "ShapedType::kDynamicSize") + "ShapedType::kDynamic") custom($strides, $static_strides, - "ShapedType::kDynamicStrideOrOffset") + "ShapedType::kDynamic") attr-dict `:` type($source) `into` type($dest) }]; diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/ShapeUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/ShapeUtils.h --- a/mlir/include/mlir/Dialect/Tosa/Utils/ShapeUtils.h +++ b/mlir/include/mlir/Dialect/Tosa/Utils/ShapeUtils.h @@ -111,14 +111,14 @@ return result; result.hasRank = true; - result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamicSize); + result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamic); for (auto i : llvm::seq(0, result.sizes.size())) { int64_t lhsSize = lhs.sizes[i]; int64_t rhsSize = rhs.sizes[i]; int64_t &resultSize = result.sizes[i]; - if (lhsSize == ShapedType::kDynamicSize) { + if (lhsSize == ShapedType::kDynamic) { resultSize = rhsSize; - } else if (rhsSize == ShapedType::kDynamicSize) { + } else if (rhsSize == ShapedType::kDynamic) { resultSize = lhsSize; } else if (lhsSize == rhsSize) { resultSize = lhsSize; @@ -155,7 +155,7 @@ } result.hasRank = true; - result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamicSize); + result.sizes.resize(lhs.sizes.size(), ShapedType::kDynamic); for (int i = 0, e = lhs.sizes.size(); i < e; i++) { if (lhs.sizes[i] == rhs.sizes[i]) { result.sizes[i] = lhs.sizes[i]; @@ -170,7 +170,7 @@ // Whether the value has known rank. bool hasRank; // If `hasRank`, the sizes along each rank. Unknown sizes are represented as - // `ShapedType::kDynamicSize`. + // `ShapedType::kDynamic`. llvm::SmallVector sizes; // The dtype of a tensor. // This is equal to nullptr if we don't know that it is a specific concrete diff --git a/mlir/include/mlir/IR/BuiltinAttributes.h b/mlir/include/mlir/IR/BuiltinAttributes.h --- a/mlir/include/mlir/IR/BuiltinAttributes.h +++ b/mlir/include/mlir/IR/BuiltinAttributes.h @@ -1034,7 +1034,7 @@ namespace mlir { -/// Given a list of strides (in which MemRefType::getDynamicStrideOrOffset() +/// Given a list of strides (in which ShapedType::kDynamic /// represents a dynamic value), return the single result AffineMap which /// represents the linearized strided layout map. Dimensions correspond to the /// offset followed by the strides in order. Symbols are inserted for each diff --git a/mlir/include/mlir/IR/BuiltinAttributes.td b/mlir/include/mlir/IR/BuiltinAttributes.td --- a/mlir/include/mlir/IR/BuiltinAttributes.td +++ b/mlir/include/mlir/IR/BuiltinAttributes.td @@ -1009,7 +1009,7 @@ Strides must be positive and the offset must be non-negative. Both the strides and the offset may be _dynamic_, i.e. their value may not be known at compile time. This is expressed as a `?` in the assembly syntax and as - `ShapedType::kDynamicStrideOrOffset` in the code. Stride and offset values + `ShapedType::kDynamic` in the code. Stride and offset values must satisfy the constraints above at runtime, the behavior is undefined otherwise. diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -54,7 +54,7 @@ A shape is a list of sizes corresponding to the dimensions of the container. If the number of dimensions in the shape is unknown, the shape is "unranked". If the number of dimensions is known, the shape "ranked". The sizes of the - dimensions of the shape must be positive, or kDynamicSize (in which case the + dimensions of the shape must be positive, or kDynamic (in which case the size of the dimension is dynamic, or not statically known). }]; let methods = [ @@ -84,20 +84,11 @@ ]; let extraClassDeclaration = [{ - // TODO: merge these two special values in a single one used everywhere. - // Unfortunately, uses of `-1` have crept deep into the codebase now and are - // hard to track. - static constexpr int64_t kDynamicSize = - std::numeric_limits::min(); - static constexpr int64_t kDynamicStrideOrOffset = + static constexpr int64_t kDynamic = std::numeric_limits::min(); - /// Whether the given dimension size indicates a dynamic dimension. - static constexpr bool isDynamic(int64_t dSize) { - return dSize == kDynamicSize; - } - static constexpr bool isDynamicStrideOrOffset(int64_t dStrideOrOffset) { - return dStrideOrOffset == kDynamicStrideOrOffset; + static constexpr bool isDynamic(int64_t dValue) { + return dValue == kDynamic; } /// Return the number of elements present in the given shape. diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -429,7 +429,7 @@ /// symbols. /// /// A stride specification is a list of integer values that are either static -/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode +/// or dynamic (encoded with ShapedType::kDynamic). Strides encode /// the distance in the number of elements between successive entries along a /// particular dimension. LogicalResult getStridesAndOffset(MemRefType t, diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -584,12 +584,6 @@ /// New `Attribute getMemorySpace()` method should be used instead. unsigned getMemorySpaceAsInt() const; - // TODO: merge these two special values in a single one used everywhere. - // Unfortunately, uses of `-1` have crept deep into the codebase now and are - // hard to track. - static int64_t getDynamicStrideOrOffset() { - return ShapedType::kDynamicStrideOrOffset; - } }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h @@ -94,7 +94,7 @@ /// The components consist of /// - A ranked or unranked shape with the dimension specification match those /// of ShapeType's getShape() (e.g., dynamic dimension represented using -/// ShapedType::kDynamicSize) +/// ShapedType::kDynamic) /// - A element type, may be unset (nullptr) /// - A attribute, may be unset (nullptr) /// Used by ShapedType type inferences. diff --git a/mlir/include/mlir/Interfaces/ViewLikeInterface.td b/mlir/include/mlir/Interfaces/ViewLikeInterface.td --- a/mlir/include/mlir/Interfaces/ViewLikeInterface.td +++ b/mlir/include/mlir/Interfaces/ViewLikeInterface.td @@ -50,8 +50,8 @@ `getArrayAttrMaxRanks()`[0] (resp. [1], [2]). 3. if an entry of `static_offsets` (resp. `static_sizes`, `static_strides`) is equal to a special sentinel value, namely - `ShapedType::kDynamicStrideOrOffset` (resp. `ShapedType::kDynamicSize`, - `ShapedType::kDynamicStrideOrOffset`), then the corresponding entry is + `ShapedType::kDynamic` (resp. `ShapedType::kDynamic`, + `ShapedType::kDynamic`), then the corresponding entry is a dynamic offset (resp. size, stride). 4. a variadic `offset` (resp. `sizes`, `strides`) operand must be present for each dynamic offset (resp. size, stride). @@ -206,7 +206,7 @@ /*defaultImplementation=*/[{ ::llvm::APInt v = *(static_offsets() .template getAsValueRange<::mlir::IntegerAttr>().begin() + idx); - return ::mlir::ShapedType::isDynamicStrideOrOffset(v.getSExtValue()); + return ::mlir::ShapedType::isDynamic(v.getSExtValue()); }] >, InterfaceMethod< @@ -234,7 +234,7 @@ /*defaultImplementation=*/[{ ::llvm::APInt v = *(static_strides() .template getAsValueRange<::mlir::IntegerAttr>().begin() + idx); - return ::mlir::ShapedType::isDynamicStrideOrOffset(v.getSExtValue()); + return ::mlir::ShapedType::isDynamic(v.getSExtValue()); }] >, InterfaceMethod< @@ -296,7 +296,7 @@ assert($_op.isDynamicOffset(idx) && "expected dynamic offset"); auto numDynamic = getNumDynamicEntriesUpToIdx( static_offsets().template cast<::mlir::ArrayAttr>(), - ::mlir::ShapedType::isDynamicStrideOrOffset, + ::mlir::ShapedType::isDynamic, idx); return $_op.getOffsetSizeAndStrideStartOperandIndex() + numDynamic; }] @@ -331,7 +331,7 @@ assert($_op.isDynamicStride(idx) && "expected dynamic stride"); auto numDynamic = getNumDynamicEntriesUpToIdx( static_strides().template cast<::mlir::ArrayAttr>(), - ::mlir::ShapedType::isDynamicStrideOrOffset, + ::mlir::ShapedType::isDynamic, idx); return $_op.getOffsetSizeAndStrideStartOperandIndex() + offsets().size() + sizes().size() + numDynamic; diff --git a/mlir/lib/AsmParser/AttributeParser.cpp b/mlir/lib/AsmParser/AttributeParser.cpp --- a/mlir/lib/AsmParser/AttributeParser.cpp +++ b/mlir/lib/AsmParser/AttributeParser.cpp @@ -1170,7 +1170,7 @@ // fit into int64_t limits. auto parseStrideOrOffset = [&]() -> Optional { if (consumeIf(Token::question)) - return ShapedType::kDynamicStrideOrOffset; + return ShapedType::kDynamic; SMLoc loc = getToken().getLoc(); auto emitWrongTokenError = [&] { diff --git a/mlir/lib/AsmParser/TypeParser.cpp b/mlir/lib/AsmParser/TypeParser.cpp --- a/mlir/lib/AsmParser/TypeParser.cpp +++ b/mlir/lib/AsmParser/TypeParser.cpp @@ -514,7 +514,7 @@ if (consumeIf(Token::question)) { if (!allowDynamic) return emitError(loc, "expected static shape"); - dimensions.push_back(ShapedType::kDynamicSize); + dimensions.push_back(ShapedType::kDynamic); } else { int64_t value; if (failed(parseIntegerInDimensionList(value))) diff --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp --- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp +++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp @@ -165,18 +165,18 @@ return unwrap(type).cast().getDimSize(static_cast(dim)); } -int64_t mlirShapedTypeGetDynamicSize() { return ShapedType::kDynamicSize; } +int64_t mlirShapedTypeGetDynamicSize() { return ShapedType::kDynamic; } bool mlirShapedTypeIsDynamicSize(int64_t size) { return ShapedType::isDynamic(size); } bool mlirShapedTypeIsDynamicStrideOrOffset(int64_t val) { - return ShapedType::isDynamicStrideOrOffset(val); + return ShapedType::isDynamic(val); } int64_t mlirShapedTypeGetDynamicStrideOrOffset() { - return ShapedType::kDynamicStrideOrOffset; + return ShapedType::kDynamic; } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp --- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp +++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp @@ -203,7 +203,7 @@ size_t i = pair.index(); Value index = pair.value(); Value strideOp; - if (ShapedType::isDynamicStrideOrOffset(strides[i])) { + if (ShapedType::isDynamic(strides[i])) { strideOp = rewriter.create( loc, memrefDescriptor.stride(rewriter, loc, i), byteWidthConst); } else { @@ -226,7 +226,7 @@ Value sgprOffset = adaptor.getSgprOffset(); if (!sgprOffset) sgprOffset = createI32Constant(rewriter, loc, 0); - if (ShapedType::isDynamicStrideOrOffset(offset)) + if (ShapedType::isDynamic(offset)) sgprOffset = rewriter.create( loc, memrefDescriptor.offset(rewriter, loc), sgprOffset); else if (offset > 0) diff --git a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp --- a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp +++ b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp @@ -57,9 +57,9 @@ auto result = getStridesAndOffset(type, strides, offset); (void)result; assert(succeeded(result) && "unexpected failure in stride computation"); - assert(!ShapedType::isDynamicStrideOrOffset(offset) && + assert(!ShapedType::isDynamic(offset) && "expected static offset"); - assert(!llvm::any_of(strides, ShapedType::isDynamicStrideOrOffset) && + assert(!llvm::any_of(strides, ShapedType::isDynamic) && "expected static strides"); auto convertedType = typeConverter.convertType(type); diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp --- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp +++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp @@ -80,14 +80,14 @@ Value index; if (offset != 0) // Skip if offset is zero. - index = ShapedType::isDynamicStrideOrOffset(offset) + index = ShapedType::isDynamic(offset) ? memRefDescriptor.offset(rewriter, loc) : createIndexConstant(rewriter, loc, offset); for (int i = 0, e = indices.size(); i < e; ++i) { Value increment = indices[i]; if (strides[i] != 1) { // Skip if stride is 1. - Value stride = ShapedType::isDynamicStrideOrOffset(strides[i]) + Value stride = ShapedType::isDynamic(strides[i]) ? memRefDescriptor.stride(rewriter, loc, i) : createIndexConstant(rewriter, loc, strides[i]); increment = rewriter.create(loc, increment, stride); @@ -123,14 +123,14 @@ SmallVectorImpl &strides, Value &sizeBytes) const { assert(isConvertibleAndHasIdentityMaps(memRefType) && "layout maps must have been normalized away"); - assert(count(memRefType.getShape(), ShapedType::kDynamicSize) == + assert(count(memRefType.getShape(), ShapedType::kDynamic) == static_cast(dynamicSizes.size()) && "dynamicSizes size doesn't match dynamic sizes count in memref shape"); sizes.reserve(memRefType.getRank()); unsigned dynamicIndex = 0; for (int64_t size : memRefType.getShape()) { - sizes.push_back(size == ShapedType::kDynamicSize + sizes.push_back(size == ShapedType::kDynamic ? dynamicSizes[dynamicIndex++] : createIndexConstant(rewriter, loc, size)); } @@ -146,14 +146,14 @@ if (size == 0) continue; bool useSizeAsStride = stride == 1; - if (size == ShapedType::kDynamicSize) - stride = ShapedType::kDynamicSize; - if (stride != ShapedType::kDynamicSize) + if (size == ShapedType::kDynamic) + stride = ShapedType::kDynamic; + if (stride != ShapedType::kDynamic) stride *= size; if (useSizeAsStride) runningStride = sizes[i]; - else if (stride == ShapedType::kDynamicSize) + else if (stride == ShapedType::kDynamic) runningStride = rewriter.create(loc, runningStride, sizes[i]); else diff --git a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp --- a/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp +++ b/mlir/lib/Conversion/LLVMCommon/TypeConverter.cpp @@ -386,10 +386,10 @@ return false; for (int64_t stride : strides) - if (ShapedType::isDynamicStrideOrOffset(stride)) + if (ShapedType::isDynamic(stride)) return false; - return !ShapedType::isDynamicStrideOrOffset(offset); + return !ShapedType::isDynamic(offset); } /// Convert a memref type to a bare pointer to the memref element type. diff --git a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp --- a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp +++ b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp @@ -27,9 +27,8 @@ static MemRefType makeStridedLayoutDynamic(MemRefType type) { return MemRefType::Builder(type).setLayout(StridedLayoutAttr::get( - type.getContext(), ShapedType::kDynamicStrideOrOffset, - SmallVector(type.getRank(), - ShapedType::kDynamicStrideOrOffset))); + type.getContext(), ShapedType::kDynamic, + SmallVector(type.getRank(), ShapedType::kDynamic))); } /// Helper function to extract the operand types that are passed to the diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -33,7 +33,7 @@ namespace { bool isStaticStrideOrOffset(int64_t strideOrOffset) { - return !ShapedType::isDynamicStrideOrOffset(strideOrOffset); + return !ShapedType::isDynamic(strideOrOffset); } LLVM::LLVMFuncOp getFreeFn(LLVMTypeConverter *typeConverter, ModuleOp module) { @@ -145,7 +145,7 @@ [&](MemRefType type, function_ref getDynamicSize) -> Value { // Compute number of elements. int64_t size = type.getShape()[0]; - Value numElements = ((size == ShapedType::kDynamicSize) + Value numElements = ((size == ShapedType::kDynamic) ? getDynamicSize() : createIndexConstant(rewriter, loc, size)); Type indexType = getIndexType(); @@ -1219,7 +1219,7 @@ Value stride = nullptr; int64_t targetRank = targetMemRefType.getRank(); for (auto i : llvm::reverse(llvm::seq(0, targetRank))) { - if (!ShapedType::isDynamicStrideOrOffset(strides[i])) { + if (!ShapedType::isDynamic(strides[i])) { // If the stride for this dimension is dynamic, then use the product // of the sizes of the inner dimensions. stride = createIndexConstant(rewriter, loc, strides[i]); @@ -1761,7 +1761,7 @@ // Offset. auto llvmIndexType = typeConverter->convertType(rewriter.getIndexType()); - if (!ShapedType::isDynamicStrideOrOffset(offset)) { + if (!ShapedType::isDynamic(offset)) { targetMemRef.setConstantOffset(rewriter, loc, offset); } else { Value baseOffset = sourceMemRef.offset(rewriter, loc); @@ -1806,7 +1806,7 @@ // constants. int64_t staticSize = subViewOp.getSource().getType().cast().getShape()[i]; - if (staticSize != ShapedType::kDynamicSize) { + if (staticSize != ShapedType::kDynamic) { size = rewriter.create( loc, llvmIndexType, rewriter.getI64IntegerAttr(staticSize)); } else { @@ -1828,7 +1828,7 @@ : rewriter.create( loc, llvmIndexType, rewriter.getI64IntegerAttr(subViewOp.getStaticSize(i))); - if (!ShapedType::isDynamicStrideOrOffset(strides[i])) { + if (!ShapedType::isDynamic(strides[i])) { stride = rewriter.create( loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i])); } else { @@ -1932,7 +1932,7 @@ ArrayRef strides, Value nextSize, Value runningStride, unsigned idx) const { assert(idx < strides.size()); - if (!ShapedType::isDynamicStrideOrOffset(strides[idx])) + if (!ShapedType::isDynamic(strides[idx])) return createIndexConstant(rewriter, loc, strides[idx]); if (nextSize) return runningStride diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -844,7 +844,7 @@ bool isDynamic) { if (isDynamic) { // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1 - intermediateShape = {ShapedType::kDynamicSize}; + intermediateShape = {ShapedType::kDynamic}; return true; } @@ -1886,7 +1886,7 @@ SmallVector genericShape; for (int i = 0; i < rank; i++) { int64_t dim = multiples[i]; - genericShape.push_back(dim == -1 ? ShapedType::kDynamicSize : dim); + genericShape.push_back(dim == -1 ? ShapedType::kDynamic : dim); genericShape.push_back(inputShape[i]); } diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp --- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp +++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp @@ -38,7 +38,7 @@ for (const auto &i : llvm::enumerate(sliceOp.getSize())) { int64_t size = i.value().cast().getInt(); size_t index = i.index(); - sizes.push_back(size == -1 ? ShapedType::kDynamicSize : size); + sizes.push_back(size == -1 ? ShapedType::kDynamic : size); if (!ShapedType::isDynamic(sizes.back())) continue; diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -108,7 +108,7 @@ strides.back() != 1) return llvm::None; int64_t stride = strides[strides.size() - 2]; - if (stride == ShapedType::kDynamicStrideOrOffset) + if (stride == ShapedType::kDynamic) return llvm::None; return stride; } diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -17,7 +17,6 @@ #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/TypeUtilities.h" -#include "mlir/Support/MathExtras.h" #include "mlir/Target/LLVMIR/TypeToLLVM.h" #include "mlir/Transforms/DialectConversion.h" @@ -963,8 +962,8 @@ auto sizes = memRefType.getShape(); for (int index = 0, e = strides.size() - 1; index < e; ++index) { if (ShapedType::isDynamic(sizes[index + 1]) || - ShapedType::isDynamicStrideOrOffset(strides[index]) || - ShapedType::isDynamicStrideOrOffset(strides[index + 1])) + ShapedType::isDynamic(strides[index]) || + ShapedType::isDynamic(strides[index + 1])) return None; if (strides[index] != strides[index + 1] * sizes[index + 1]) return None; @@ -1009,7 +1008,7 @@ if (!targetStrides) return failure(); // Only support static strides for now, regardless of contiguity. - if (llvm::any_of(*targetStrides, ShapedType::isDynamicStrideOrOffset)) + if (llvm::any_of(*targetStrides, ShapedType::isDynamic)) return failure(); auto int64Ty = IntegerType::get(rewriter.getContext(), 64); diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp --- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp @@ -380,7 +380,7 @@ // If no constant bound is found, then it can always be bound by the // memref's dim size if the latter has a constant size along this dim. auto dimSize = memRefType.getDimSize(d); - if (dimSize == ShapedType::kDynamicSize) + if (dimSize == ShapedType::kDynamic) return None; diffConstant = dimSize; // Lower bound becomes 0. diff --git a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp --- a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp @@ -92,7 +92,7 @@ // Put together alloc operands for any dynamic dimensions of the memref. SmallVector allocOperands; for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) { - if (dim.value() == ShapedType::kDynamicSize) + if (dim.value() == ShapedType::kDynamic) allocOperands.push_back(bOuter.createOrFold( forOp.getLoc(), oldMemRef, dim.index())); } diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -1796,7 +1796,7 @@ bool isDynDim = isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims, context); if (isDynDim) { - newShape[d] = ShapedType::kDynamicSize; + newShape[d] = ShapedType::kDynamic; } else { // The lower bound for the shape is always zero. Optional ubConst = diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -767,9 +767,9 @@ auto memorySpaceAttr = IntegerAttr::get( IntegerType::get(tensorType.getContext(), 64), memorySpace); auto rankedTensorType = tensorType.cast(); - int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset; + int64_t dynamicOffset = ShapedType::kDynamic; SmallVector dynamicStrides(rankedTensorType.getRank(), - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); auto stridedLayout = StridedLayoutAttr::get(tensorType.getContext(), dynamicOffset, dynamicStrides); return MemRefType::get(rankedTensorType.getShape(), diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -46,8 +46,7 @@ failed(getStridesAndOffset(target, targetStrides, targetOffset))) return false; auto dynamicToStatic = [](int64_t a, int64_t b) { - return a == MemRefType::getDynamicStrideOrOffset() && - b != MemRefType::getDynamicStrideOrOffset(); + return ShapedType::isDynamic(a) && !ShapedType::isDynamic(b); }; if (dynamicToStatic(sourceOffset, targetOffset)) return false; @@ -69,7 +68,7 @@ auto loc = value.getLoc(); SmallVector dynamicOperands; for (int i = 0; i < destType.getRank(); ++i) { - if (destType.getShape()[i] != ShapedType::kDynamicSize) + if (destType.getShape()[i] != ShapedType::kDynamic) continue; auto index = b.createOrFold(loc, i); Value size = b.create(loc, value, index); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp @@ -28,9 +28,9 @@ SmallVector strides; if (failed(getStridesAndOffset(type, strides, offset))) return false; - if (!llvm::all_of(strides, ShapedType::isDynamicStrideOrOffset)) + if (!llvm::all_of(strides, ShapedType::isDynamic)) return false; - if (!ShapedType::isDynamicStrideOrOffset(offset)) + if (!ShapedType::isDynamic(offset)) return false; return true; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp @@ -147,7 +147,7 @@ // We parsed a generic dimension list, but vectors only support two forms: // - single non-dynamic entry in the list (fixed vector); - // - two elements, the first dynamic (indicated by ShapedType::kDynamicSize) + // - two elements, the first dynamic (indicated by ShapedType::kDynamic) // and the second // non-dynamic (scalable vector). if (dims.empty() || dims.size() > 2 || diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1046,7 +1046,7 @@ } staticSplitPoint = - parser.getBuilder().getI64IntegerAttr(ShapedType::kDynamicSize); + parser.getBuilder().getI64IntegerAttr(ShapedType::kDynamic); } result.addAttribute( @@ -1062,7 +1062,7 @@ void SplitOp::print(OpAsmPrinter &printer) { printer << " " << getTarget() << " after "; int64_t staticSplitSize = static_cast(getStaticSplitPoint()); - if (staticSplitSize != ShapedType::kDynamicSize) + if (staticSplitSize != ShapedType::kDynamic) printer << staticSplitSize; else printer << getDynamicSplitPoint(); @@ -1073,7 +1073,7 @@ LogicalResult SplitOp::verify() { if ((static_cast(getStaticSplitPoint()) != - ShapedType::kDynamicSize) ^ + ShapedType::kDynamic) ^ (getDynamicSplitPoint() == nullptr)) { return emitOpError() << "expects either a dynamic or a static split " "point to be provided"; @@ -1304,7 +1304,7 @@ unsigned dynamicPos = 0; Builder builder(getContext()); for (int64_t size : tileSizes) { - if (size == ShapedType::kDynamicSize) { + if (size == ShapedType::kDynamic) { results.push_back(dynamic[dynamicPos++]); } else { results.push_back(builder.getIndexAttr(size)); @@ -1322,7 +1322,7 @@ if (parser.parseOperand(target) || parser.resolveOperand(target, pdlOperationType, result.operands) || parseDynamicIndexList(parser, dynamicSizes, staticSizes, - ShapedType::kDynamicSize) || + ShapedType::kDynamic) || parser.resolveOperands(dynamicSizes, pdlOperationType, result.operands) || parser.parseOptionalAttrDict(result.attributes)) return ParseResult::failure(); @@ -1337,7 +1337,7 @@ void TileOp::print(OpAsmPrinter &p) { p << ' ' << getTarget(); printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes(), - ShapedType::kDynamicSize); + ShapedType::kDynamic); p.printOptionalAttrDict((*this)->getAttrs(), {getStaticSizesAttrName()}); } @@ -1375,7 +1375,7 @@ SmallVector staticTileSizes; SmallVector dynamicTileSizes; dispatchIndexOpFoldResults(mixedTileSizes, dynamicTileSizes, staticTileSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); // Call the default builder which sets up the proper operands segment sizes // attributes for multiple variadic operands. In the absence of this, horrible // bugs ensue. @@ -1410,7 +1410,7 @@ SmallVector staticNumThreads; SmallVector dynamicNumThreads; dispatchIndexOpFoldResults(mixedNumThreads, dynamicNumThreads, - staticNumThreads, ShapedType::kDynamicSize); + staticNumThreads, ShapedType::kDynamic); // Call the default builder which sets up the proper operands segment sizes // attributes for multiple variadic operands. In the absence of this, horrible // bugs ensue. @@ -1663,7 +1663,7 @@ unsigned dynamicPos = 0; Builder builder(getContext()); for (int64_t size : tileSizes) { - if (size == ShapedType::kDynamicSize) { + if (size == ShapedType::kDynamic) { results.push_back(dynamic[dynamicPos++]); } else { results.push_back(builder.getIndexAttr(size)); @@ -1681,7 +1681,7 @@ if (parser.parseOperand(target) || parser.resolveOperand(target, pdlOperationType, result.operands) || parseDynamicIndexList(parser, dynamicSizes, staticSizes, - ShapedType::kDynamicSize) || + ShapedType::kDynamic) || parser.resolveOperands(dynamicSizes, pdlOperationType, result.operands) || parser.parseOptionalAttrDict(result.attributes)) return ParseResult::failure(); @@ -1696,7 +1696,7 @@ void TileToScfForOp::print(OpAsmPrinter &p) { p << ' ' << getTarget(); printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes(), - ShapedType::kDynamicSize); + ShapedType::kDynamic); p.printOptionalAttrDict((*this)->getAttrs(), {getStaticSizesAttrName()}); } diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -1706,7 +1706,7 @@ modifiedOutput = true; SmallVector dynamicDims; for (const auto &dim : llvm::enumerate(operandType.getShape())) { - if (dim.value() != ShapedType::kDynamicSize) + if (dim.value() != ShapedType::kDynamic) continue; dynamicDims.push_back(rewriter.createOrFold( loc, operandVal, dim.index())); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -156,10 +156,10 @@ continue; unsigned rank = tensorType.getRank(); SmallVector staticOffsetsVector( - rank, ShapedType::kDynamicStrideOrOffset); - SmallVector staticSizesVector(rank, ShapedType::kDynamicSize); + rank, ShapedType::kDynamic); + SmallVector staticSizesVector(rank, ShapedType::kDynamic); SmallVector staticStridesVector( - rank, ShapedType::kDynamicStrideOrOffset); + rank, ShapedType::kDynamic); resultTypes.push_back(tensor::ExtractSliceOp::inferResultType( tensorType, staticOffsetsVector, staticSizesVector, staticStridesVector)); diff --git a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp @@ -425,7 +425,7 @@ // Create the packed tensor into which we amortize // padding. - SmallVector packedShape(nPackedLoops, ShapedType::kDynamicSize); + SmallVector packedShape(nPackedLoops, ShapedType::kDynamic); // TODO: go grab dims when necessary, for now tensor::PadOp returns a static // tensor. llvm::append_range(packedShape, transposedTensorType->getShape()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -65,7 +65,7 @@ // Fallback dynamic buffer. auto dynamicBufferType = - MemRefType::get(ShapedType::kDynamicSize, b.getIntegerType(8)); + MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8)); Value mul = b.createOrFold( b.create(width), allocSize); if (options.useAlloca) @@ -93,7 +93,7 @@ Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize, layout, alignment); SmallVector dynSizes(boundingSubViewSize.size(), - ShapedType::kDynamicSize); + ShapedType::kDynamic); Value view = b.createOrFold( MemRefType::get(dynSizes, viewType.getElementType()), buffer, zero, boundingSubViewSize); @@ -243,7 +243,7 @@ partialSizes.push_back( b.createOrFold(loc, subView, resultDimIdx++)); } - SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamicSize); + SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamic); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); diff --git a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/SplitReduction.cpp @@ -44,7 +44,7 @@ unsigned reductionDim = dims[0]; SmallVector loopRanges = op.getStaticLoopRanges(); int64_t reductionDimSize = loopRanges[reductionDim]; - if (reductionDimSize == ShapedType::kDynamicSize || + if (reductionDimSize == ShapedType::kDynamic || reductionDimSize % ratio != 0) return b.notifyMatchFailure( op, "Reduction dimension not divisible by split ratio"); @@ -253,7 +253,7 @@ unsigned reductionDimPos = dims[0]; SmallVector loopRanges = op.getStaticLoopRanges(); int64_t reductionDimSize = loopRanges[reductionDimPos]; - if (reductionDimSize == ShapedType::kDynamicSize || + if (reductionDimSize == ShapedType::kDynamic || reductionDimSize % splitFactor != 0 || insertSplitDimension >= loopRanges.size()) return b.notifyMatchFailure( diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp @@ -280,7 +280,7 @@ for (int64_t idx : llvm::seq(0, oldShape.size() + 1)) { if (idx == insertSplitDimension) { dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); continue; } int64_t oldIdx = idx < insertSplitDimension ? idx : idx - 1; diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -218,7 +218,7 @@ SmallVector dynOperands; auto shapedType = val.getType().cast(); for (const auto &dim : llvm::enumerate(shapedType.getShape())) { - if (dim.value() == ShapedType::kDynamicSize) + if (dim.value() == ShapedType::kDynamic) dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index())); } return dynOperands; diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -31,22 +31,22 @@ namespace saturated_arith { struct Wrapper { static Wrapper stride(int64_t v) { - return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0} + return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v}; } static Wrapper offset(int64_t v) { - return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0} + return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v}; } static Wrapper size(int64_t v) { return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v}; } int64_t asOffset() { - return saturated ? ShapedType::kDynamicStrideOrOffset : v; + return saturated ? ShapedType::kDynamic : v; } - int64_t asSize() { return saturated ? ShapedType::kDynamicSize : v; } + int64_t asSize() { return saturated ? ShapedType::kDynamic : v; } int64_t asStride() { - return saturated ? ShapedType::kDynamicStrideOrOffset : v; + return saturated ? ShapedType::kDynamic : v; } bool operator==(Wrapper other) { return (saturated && other.saturated) || @@ -136,7 +136,7 @@ /// - `memRefTy == memref>` /// - `getAttributes == getConstantStrides` (i.e., a wrapper around /// `getStridesAndOffset`), and -/// - `isDynamic == isDynamicStrideOrOffset` +/// - `isDynamic == ShapedType::isDynamic` /// Will yield: `values == [2, 1]` static void constifyIndexValues( SmallVectorImpl &values, MemRefType memRefTy, @@ -296,7 +296,7 @@ newShapeConstants.push_back(constantIndexOp.value()); } else { // Dynamic shape dimension not folded; copy dynamicSize from old memref. - newShapeConstants.push_back(ShapedType::kDynamicSize); + newShapeConstants.push_back(ShapedType::kDynamic); dynamicSizes.push_back(dynamicSize); } dynamicDimPos++; @@ -705,16 +705,16 @@ // If cast is towards more static offset along any dimension, don't fold. if (sourceOffset != resultOffset) - if (ShapedType::isDynamicStrideOrOffset(sourceOffset) && - !ShapedType::isDynamicStrideOrOffset(resultOffset)) + if (ShapedType::isDynamic(sourceOffset) && + !ShapedType::isDynamic(resultOffset)) return false; // If cast is towards more static strides along any dimension, don't fold. for (auto it : llvm::zip(sourceStrides, resultStrides)) { auto ss = std::get<0>(it), st = std::get<1>(it); if (ss != st) - if (ShapedType::isDynamicStrideOrOffset(ss) && - !ShapedType::isDynamicStrideOrOffset(st)) + if (ShapedType::isDynamic(ss) && + !ShapedType::isDynamic(st)) return false; } @@ -747,8 +747,8 @@ // same. They are also compatible if either one is dynamic (see // description of MemRefCastOp for details). auto checkCompatible = [](int64_t a, int64_t b) { - return (a == MemRefType::getDynamicStrideOrOffset() || - b == MemRefType::getDynamicStrideOrOffset() || a == b); + return (ShapedType::isDynamic(a) || + ShapedType::isDynamic(b) || a == b); }; if (!checkCompatible(aOffset, bOffset)) return false; @@ -1445,7 +1445,7 @@ ExtractStridedMetadataOp::getConstifiedMixedStrides() { SmallVector values = getAsOpFoldResult(getStrides()); constifyIndexValues(values, getSource().getType(), getContext(), - getConstantStrides, ShapedType::isDynamicStrideOrOffset); + getConstantStrides, ShapedType::isDynamic); return values; } @@ -1453,7 +1453,7 @@ OpFoldResult offsetOfr = getAsOpFoldResult(getOffset()); SmallVector values(1, offsetOfr); constifyIndexValues(values, getSource().getType(), getContext(), - getConstantOffset, ShapedType::isDynamicStrideOrOffset); + getConstantOffset, ShapedType::isDynamic); return values[0]; } @@ -1772,11 +1772,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); build(b, result, resultType, source, dynamicOffsets, dynamicSizes, dynamicStrides, b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); @@ -1847,8 +1847,8 @@ // Match offset in result memref type and in static_offsets attribute. int64_t expectedOffset = extractFromI64ArrayAttr(getStaticOffsets()).front(); - if (!ShapedType::isDynamicStrideOrOffset(resultOffset) && - !ShapedType::isDynamicStrideOrOffset(expectedOffset) && + if (!ShapedType::isDynamic(resultOffset) && + !ShapedType::isDynamic(expectedOffset) && resultOffset != expectedOffset) return emitError("expected result type with offset = ") << resultOffset << " instead of " << expectedOffset; @@ -1858,8 +1858,8 @@ resultStrides, extractFromI64ArrayAttr(getStaticStrides())))) { int64_t resultStride = std::get<0>(en.value()); int64_t expectedStride = std::get<1>(en.value()); - if (!ShapedType::isDynamicStrideOrOffset(resultStride) && - !ShapedType::isDynamicStrideOrOffset(expectedStride) && + if (!ShapedType::isDynamic(resultStride) && + !ShapedType::isDynamic(expectedStride) && resultStride != expectedStride) return emitError("expected result type with stride = ") << expectedStride << " instead of " << resultStride @@ -1909,7 +1909,7 @@ SmallVector ReinterpretCastOp::getConstifiedMixedStrides() { SmallVector values = getMixedStrides(); constifyIndexValues(values, getType(), getContext(), getConstantStrides, - ShapedType::isDynamicStrideOrOffset); + ShapedType::isDynamic); return values; } @@ -1918,7 +1918,7 @@ assert(values.size() == 1 && "reinterpret_cast must have one and only one offset"); constifyIndexValues(values, getType(), getContext(), getConstantOffset, - ShapedType::isDynamicStrideOrOffset); + ShapedType::isDynamic); return values[0]; } @@ -2284,7 +2284,7 @@ // the corresponding stride may have to be skipped. (See above comment.) // Therefore, the result stride cannot be statically determined and must // be dynamic. - resultStrides.push_back(ShapedType::kDynamicStrideOrOffset); + resultStrides.push_back(ShapedType::kDynamic); } } @@ -2481,7 +2481,7 @@ if (resultMemRefType) { if (!resultMemRefType.getLayout().isIdentity()) return emitOpError("result memref type should have identity affine map"); - if (shapeSize == ShapedType::kDynamicSize) + if (shapeSize == ShapedType::kDynamic) return emitOpError("cannot use shape operand with dynamic length to " "reshape to statically-ranked memref type"); if (shapeSize != resultMemRefType.getRank()) @@ -2575,11 +2575,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); return SubViewOp::inferResultType(sourceMemRefType, staticOffsets, staticSizes, staticStrides); } @@ -2625,11 +2625,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); return SubViewOp::inferRankReducedResultType( resultShape, sourceRankedTensorType, staticOffsets, staticSizes, staticStrides); @@ -2646,11 +2646,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); auto sourceMemRefType = source.getType().cast(); // Structuring implementation this way avoids duplication between builders. if (!resultType) { diff --git a/mlir/lib/Dialect/MemRef/Transforms/SimplifyExtractStridedMetadata.cpp b/mlir/lib/Dialect/MemRef/Transforms/SimplifyExtractStridedMetadata.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/SimplifyExtractStridedMetadata.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/SimplifyExtractStridedMetadata.cpp @@ -81,7 +81,7 @@ detail::bindSymbolsList(rewriter.getContext(), symbols); AffineExpr expr = symbols.front(); - values[0] = ShapedType::isDynamicStrideOrOffset(sourceOffset) + values[0] = ShapedType::isDynamic(sourceOffset) ? getAsOpFoldResult(newExtractStridedMetadata.getOffset()) : rewriter.getIndexAttr(sourceOffset); SmallVector subOffsets = subview.getMixedOffsets(); @@ -91,7 +91,7 @@ for (unsigned i = 0; i < sourceRank; ++i) { // Compute the stride. OpFoldResult origStride = - ShapedType::isDynamicStrideOrOffset(sourceStrides[i]) + ShapedType::isDynamic(sourceStrides[i]) ? origStrides[i] : OpFoldResult(rewriter.getIndexAttr(sourceStrides[i])); strides.push_back(makeComposedFoldedAffineApply( @@ -273,7 +273,7 @@ "getStridesAndOffset must work on valid expand_shape"); OpFoldResult origStride = - ShapedType::isDynamicStrideOrOffset(strides[groupId]) + ShapedType::isDynamic(strides[groupId]) ? origStrides[groupId] : builder.getIndexAttr(strides[groupId]); @@ -425,7 +425,7 @@ int64_t innerMostDimForGroup = reassocGroup.back(); int64_t innerMostStrideForGroup = strides[innerMostDimForGroup]; collapsedStride.push_back( - ShapedType::isDynamicStrideOrOffset(innerMostStrideForGroup) + ShapedType::isDynamic(innerMostStrideForGroup) ? origStrides[innerMostDimForGroup] : builder.getIndexAttr(innerMostStrideForGroup)); @@ -483,7 +483,7 @@ unsigned reshapeRank = reshapeType.getRank(); OpFoldResult offsetOfr = - ShapedType::isDynamicStrideOrOffset(offset) + ShapedType::isDynamic(offset) ? getAsOpFoldResult(newExtractStridedMetadata.getOffset()) : rewriter.getIndexAttr(offset); diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp @@ -171,9 +171,9 @@ return elementSize; auto dims = memRefType.getShape(); - if (llvm::is_contained(dims, ShapedType::kDynamicSize) || - offset == MemRefType::getDynamicStrideOrOffset() || - llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset())) + if (llvm::is_contained(dims, ShapedType::kDynamic) || + ShapedType::isDynamic(offset) || + llvm::is_contained(strides, ShapedType::kDynamic)) return llvm::None; int64_t memrefSize = -1; @@ -749,8 +749,8 @@ int64_t offset; SmallVector strides; if (failed(getStridesAndOffset(baseType, strides, offset)) || - llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) || - offset == MemRefType::getDynamicStrideOrOffset()) { + llvm::is_contained(strides, ShapedType::kDynamic) || + ShapedType::isDynamic(offset)) { return nullptr; } @@ -780,8 +780,8 @@ int64_t offset; SmallVector strides; if (failed(getStridesAndOffset(baseType, strides, offset)) || - llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) || - offset == MemRefType::getDynamicStrideOrOffset()) { + llvm::is_contained(strides, ShapedType::kDynamic) || + ShapedType::isDynamic(offset)) { return nullptr; } diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -1763,7 +1763,7 @@ else { auto shapedTy = operands[0].getType().cast(); int64_t rank = - shapedTy.hasRank() ? shapedTy.getRank() : ShapedType::kDynamicSize; + shapedTy.hasRank() ? shapedTy.getRank() : ShapedType::kDynamic; Type indexTy = IndexType::get(context); Type extentTensorTy = RankedTensorType::get({rank}, indexTy); inferredReturnTypes.assign({extentTensorTy}); diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -345,7 +345,7 @@ // (e.g. 10 vs. 10, 10 vs. ?, or ? vs. ?), but reject direct mismatches or // matches that would need a runtime assert (e.g. 10 vs. 20 or ? vs. 10). for (unsigned d = 0, rank = tp1.getRank(); d < rank; d++) - if (shape1[d] != shape2[d] && shape2[d] != ShapedType::kDynamicSize) + if (shape1[d] != shape2[d] && shape2[d] != ShapedType::kDynamic) return emitError("unexpected conversion mismatch in dimension ") << d; return success(); } @@ -499,7 +499,7 @@ for (auto type : getInputs().getTypes()) { auto shape = type.cast().getShape(); for (auto dim : shape) { - if (dim == ShapedType::kDynamicSize) + if (ShapedType::isDynamic(dim)) return emitError("Only statically-sized input tensors are supported."); } } @@ -522,7 +522,7 @@ for (unsigned i = 0; i < rank; i++) { auto dstDim = dstTp.getShape()[i]; if (i == concatDim) { - if (dstDim != ShapedType::kDynamicSize) { + if (!ShapedType::isDynamic(dstDim)) { unsigned sumDim = 0; for (auto src : getInputs()) { // If we reach here, all inputs should have static shapes. @@ -540,7 +540,7 @@ int64_t prev = dstDim; for (auto src : getInputs()) { auto d = src.getType().cast().getShape()[i]; - if (prev != ShapedType::kDynamicSize && d != prev) + if (!ShapedType::isDynamic(prev) && d != prev) return emitError("All dimensions (expect for the concatenating one) " "should be equal."); prev = d; @@ -701,7 +701,7 @@ int64_t dim = mtp.getShape()[0]; // We can't check the size of dynamic dimension at compile-time, but all // xs and ys should have a dimension not less than n at runtime. - if (n && dim != ShapedType::kDynamicSize && dim < n.value()) + if (n && !ShapedType::isDynamic(dim) && dim < n.value()) return emitError(llvm::formatv("xs and ys need to have a dimension >= n" ": {0} < {1}", dim, n.value())); @@ -744,7 +744,7 @@ auto checkDim = [&](Value v, uint64_t min, const char *message) { MemRefType tp = v.getType().cast(); int64_t dim = tp.getShape()[0]; - if (dim != ShapedType::kDynamicSize && dim < (int64_t)min) { + if (!ShapedType::isDynamic(dim) && dim < (int64_t)min) { emitError(llvm::formatv("{0} got {1} < {2}", message, dim, min)); } }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -143,7 +143,7 @@ auto rank = rtp.getRank(); auto shape = rtp.getShape(); auto enc = getSparseTensorEncoding(rtp); - auto dynShape = {ShapedType::kDynamicSize}; + auto dynShape = {ShapedType::kDynamic}; // Scan all dimensions of current tensor. for (int64_t d = 0; d < rank; d++) { // This should be called only once at beginning. @@ -194,7 +194,7 @@ } else { // Annotated sparse tensors. // We also need the value buffer for annotated all dense `sparse` tensor. - auto dynShape = {ShapedType::kDynamicSize}; + auto dynShape = {ShapedType::kDynamic}; auto sparseTp = MemRefType::get(dynShape, elementType); valBuffer[t] = builder.create(loc, sparseTp, tensor); } @@ -809,12 +809,12 @@ // expanded from the i-th dimension in srcShape. // For example, if srcDim = 8, then the expanded shape could be <2x?x2>, // but not <2x?x?>. - if (staticDstShape[j] == ShapedType::kDynamicSize) { + if (staticDstShape[j] == ShapedType::kDynamic) { // The expanded dimension has dynamic size. We compute the dimension // by dividing srcDim by the product of the static dimensions. int64_t product = 1; for (unsigned k = start; k < start + map.size(); k++) { - if (staticDstShape[k] != ShapedType::kDynamicSize) { + if (staticDstShape[k] != ShapedType::kDynamic) { product *= staticDstShape[k]; } } @@ -922,7 +922,7 @@ Value mlir::sparse_tensor::genAlloca(OpBuilder &builder, Location loc, Value sz, Type tp) { - auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp); + auto memTp = MemRefType::get({ShapedType::kDynamic}, tp); return builder.create(loc, memTp, ValueRange{sz}); } @@ -939,7 +939,7 @@ auto memTp = MemRefType::get(shape, elemTp); SmallVector dynamicSizes; for (unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) { - if (shape[i] == ShapedType::kDynamicSize) + if (shape[i] == ShapedType::kDynamic) dynamicSizes.push_back(sizes[i]); } Value mem = builder.create(loc, memTp, dynamicSizes); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp @@ -722,7 +722,7 @@ auto mtp = v.getType().cast(); if (!mtp.isDynamicDim(0)) { auto newMtp = - MemRefType::get({ShapedType::kDynamicSize}, mtp.getElementType()); + MemRefType::get({ShapedType::kDynamic}, mtp.getElementType()); v = rewriter.create(loc, newMtp, v); } operands.push_back(v); @@ -786,7 +786,7 @@ Value c2 = constantIndex(rewriter, loc, 2); auto bufferType = - MemRefType::get({ShapedType::kDynamicSize}, value.getType()); + MemRefType::get({ShapedType::kDynamic}, value.getType()); scf::IfOp ifOp = rewriter.create(loc, bufferType, cond, /*else=*/true); // True branch. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -237,16 +237,16 @@ // order. Clients of this type know what field is what from the sparse // tensor type. if (isCompressedDim(rType, r)) { - fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, ptrType)); - fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, idxType)); + fields.push_back(MemRefType::get({ShapedType::kDynamic}, ptrType)); + fields.push_back(MemRefType::get({ShapedType::kDynamic}, idxType)); } else if (isSingletonDim(rType, r)) { - fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, idxType)); + fields.push_back(MemRefType::get({ShapedType::kDynamic}, idxType)); } else { assert(isDenseDim(rType, r)); // no fields } } // The values array. - fields.push_back(MemRefType::get({ShapedType::kDynamicSize}, eltType)); + fields.push_back(MemRefType::get({ShapedType::kDynamic}, eltType)); assert(fields.size() == lastField); return success(); } @@ -290,7 +290,7 @@ /// Creates allocation operation. static Value createAllocation(OpBuilder &builder, Location loc, Type type, Value sz, bool enableInit) { - auto memType = MemRefType::get({ShapedType::kDynamicSize}, type); + auto memType = MemRefType::get({ShapedType::kDynamic}, type); Value buffer = builder.create(loc, memType, sz); if (enableInit) { Value fillValue = @@ -794,7 +794,7 @@ assert(sz); // This for sure is a sparse tensor // Generate a memref for `sz` elements of type `t`. auto genAlloc = [&](Type t) { - auto memTp = MemRefType::get({ShapedType::kDynamicSize}, t); + auto memTp = MemRefType::get({ShapedType::kDynamic}, t); return rewriter.create(loc, memTp, ValueRange{*sz}); }; // Allocate temporary buffers for values/filled-switch and added. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -85,7 +85,7 @@ SparseTensorEncodingAttr &enc, ShapedType stp, Value src, unsigned i) { auto shape = stp.getShape(); - if (shape[i] == ShapedType::kDynamicSize) + if (shape[i] == ShapedType::kDynamic) return genLvlSizeCall(builder, loc, enc, src, i); return constantIndex(builder, loc, shape[i]); } @@ -104,7 +104,7 @@ Location loc, ShapedType stp) { auto shape = stp.getShape(); for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) { - uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i]; + uint64_t s = shape[i] == ShapedType::kDynamic ? 0 : shape[i]; sizes.push_back(constantIndex(builder, loc, s)); } } @@ -129,7 +129,7 @@ sizesFromSrc(builder, sizes, loc, srcs[0]); // Sum up on the `dim` if the dimension is dynamic. - if (dstShape[dim] != ShapedType::kDynamicSize) { + if (dstShape[dim] != ShapedType::kDynamic) { // Faithfully take the static size. sizes[dim] = constantIndex(builder, loc, dstShape[dim]); } else { @@ -151,7 +151,7 @@ /// `memref<$sz x $tp>`). Unlike temporary buffers on the stack, /// this buffer must be explicitly deallocated by client. static Value genAlloc(RewriterBase &rewriter, Location loc, Value sz, Type tp) { - auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp); + auto memTp = MemRefType::get({ShapedType::kDynamic}, tp); return rewriter.create(loc, memTp, ValueRange{sz}); } @@ -1037,7 +1037,7 @@ Location loc = op.getLoc(); // Query values array size for the actually stored values size. Type eltType = op.getTensor().getType().cast().getElementType(); - auto resTp = MemRefType::get({ShapedType::kDynamicSize}, eltType); + auto resTp = MemRefType::get({ShapedType::kDynamic}, eltType); Value values = genValuesCall(rewriter, loc, resTp, adaptor.getOperands()); rewriter.replaceOpWithNewOp(op, values, constantIndex(rewriter, loc, 0)); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -117,7 +117,7 @@ Location loc, ShapedType stp, Value tensor) { for (const auto &d : enumerate(stp.getShape())) { Value dim; - if (d.value() == ShapedType::kDynamicSize) + if (d.value() == ShapedType::kDynamic) dim = builder.create(loc, tensor, d.index()); else dim = constantIndex(builder, loc, d.value()); @@ -165,7 +165,7 @@ const SmallVectorImpl &sizes, SmallVectorImpl &dynSizes) { for (const auto &d : enumerate(tp.getShape())) { - if (d.value() == ShapedType::kDynamicSize) + if (d.value() == ShapedType::kDynamic) dynSizes.push_back(sizes[d.index()]); } } @@ -375,7 +375,7 @@ genReshapeDstShape(loc, rewriter, dstSizes, srcSizes, dstShape, op.getReassociationIndices()); for (auto &d : llvm::enumerate(dstShape)) { - if (d.value() == ShapedType::kDynamicSize) + if (d.value() == ShapedType::kDynamic) dstDynSizes.push_back(dstSizes[d.index()]); } } @@ -465,7 +465,7 @@ if (!rtp.hasStaticShape()) { ArrayRef rShape = rtp.getShape(); for (const auto &d : llvm::enumerate(rShape)) { - if (d.value() == ShapedType::kDynamicSize) { + if (d.value() == ShapedType::kDynamic) { Value v = createOrFoldDimOp(rewriter, loc, op.getOperand(0), d.index()); rewriter.create(loc, op.getOperand(0), d.index()); @@ -705,7 +705,7 @@ // Sort the COO tensor so that its elements are ordered via increasing // indices for the storage ordering of the dst tensor. SparseTensorEncodingAttr encSrc = getSparseTensorEncoding(srcTp); - auto dynShape = {ShapedType::kDynamicSize}; + auto dynShape = {ShapedType::kDynamic}; auto indTp = MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc)); uint64_t rank = dstTp.getRank(); @@ -888,7 +888,7 @@ .getResult(0); ArrayRef dstShape = dstTp.getShape(); for (auto &d : llvm::enumerate(dstShape)) { - if (d.value() == ShapedType::kDynamicSize) { + if (d.value() == ShapedType::kDynamic) { dynSizesArray.push_back(rewriter.create( loc, dimSizes, constantIndex(rewriter, loc, d.index()))); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -868,7 +868,7 @@ Value tensor = lhs->get(); Location loc = op.getLoc(); if (atStart) { - auto dynShape = {ShapedType::kDynamicSize}; + auto dynShape = {ShapedType::kDynamic}; Type etp = tensor.getType().cast().getElementType(); Type t1 = MemRefType::get(dynShape, etp); Type t2 = MemRefType::get(dynShape, builder.getI1Type()); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -518,7 +518,7 @@ SmallVector staticShape; SmallVector dynamicSizes; dispatchIndexOpFoldResults(sizes, dynamicSizes, staticShape, - ShapedType::kDynamicSize); + ShapedType::kDynamic); build(builder, result, staticShape, elementType, dynamicSizes, encoding); } @@ -1159,7 +1159,7 @@ } APInt index; if (!matchPattern(*operandsIt, m_ConstantInt(&index))) { - newShape.push_back(ShapedType::kDynamicSize); + newShape.push_back(ShapedType::kDynamic); newOperands.push_back(*operandsIt++); continue; } @@ -1333,8 +1333,8 @@ unsigned dim = m.getNumResults(); auto band = shape.slice(currentDim, dim); int64_t size = 1; - if (llvm::is_contained(band, ShapedType::kDynamicSize)) - size = ShapedType::kDynamicSize; + if (llvm::is_contained(band, ShapedType::kDynamic)) + size = ShapedType::kDynamic; else for (unsigned d = 0; d < dim; ++d) size *= shape[currentDim + d]; @@ -1526,11 +1526,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); return ExtractSliceOp::inferResultType(sourceShapedTensorType, staticOffsets, staticSizes, staticStrides); } @@ -1574,11 +1574,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); return ExtractSliceOp::inferCanonicalRankReducedResultType( desiredResultRank, sourceRankedTensorType, staticOffsets, staticSizes, staticStrides); @@ -1595,11 +1595,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); auto sourceRankedTensorType = source.getType().cast(); // Structuring implementation this way avoids duplication between builders. if (!resultType) { @@ -1846,13 +1846,13 @@ // Check if there are any dynamic parts, which are not supported. auto offsets = extractFromI64ArrayAttr(op.getStaticOffsets()); - if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset)) + if (llvm::is_contained(offsets, ShapedType::kDynamic)) return failure(); auto sizes = extractFromI64ArrayAttr(op.getStaticSizes()); - if (llvm::is_contained(sizes, ShapedType::kDynamicSize)) + if (llvm::is_contained(sizes, ShapedType::kDynamic)) return failure(); auto strides = extractFromI64ArrayAttr(op.getStaticStrides()); - if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset)) + if (llvm::is_contained(strides, ShapedType::kDynamic)) return failure(); // Compute the stride for each dimension. @@ -2014,11 +2014,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes, dynamicStrides, b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); @@ -2168,9 +2168,9 @@ SmallVector mixedOffsets(insertSliceOp.getMixedOffsets()); SmallVector mixedSizes(insertSliceOp.getMixedSizes()); SmallVector mixedStrides(insertSliceOp.getMixedStrides()); - canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset); + canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamic); canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic); - canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset); + canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamic); // Create the new op in canonical form. auto sourceType = ExtractSliceOp::inferCanonicalRankReducedResultType( @@ -2430,14 +2430,14 @@ SmallVector inferredShape; for (auto i : llvm::seq(0, rank)) { if (sourceType.isDynamicDim(i) || - staticLow[i] == ShapedType::kDynamicSize || - staticHigh[i] == ShapedType::kDynamicSize) { - inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize + staticLow[i] == ShapedType::kDynamic || + staticHigh[i] == ShapedType::kDynamic) { + inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamic : resultShape[i]); } else { int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i]; assert((resultShape.empty() || size == resultShape[i] || - resultShape[i] == ShapedType::kDynamicSize) && + resultShape[i] == ShapedType::kDynamic) && "mismatch between inferred shape and result shape"); inferredShape.push_back(size); } @@ -2462,7 +2462,7 @@ ArrayRef attrs) { auto sourceType = source.getType().cast(); unsigned rank = sourceType.getRank(); - SmallVector staticVector(rank, ShapedType::kDynamicSize); + SmallVector staticVector(rank, ShapedType::kDynamic); build(b, result, source, staticVector, staticVector, low, high, nofold, attrs); } @@ -2479,9 +2479,9 @@ // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1 // value as well. dispatchIndexOpFoldResults(low, dynamicLow, staticLow, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh, - ShapedType::kDynamicSize); + ShapedType::kDynamic); if (!resultType) { resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh); } @@ -2830,11 +2830,11 @@ SmallVector staticOffsets, staticSizes, staticStrides; SmallVector dynamicOffsets, dynamicSizes, dynamicStrides; dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); + ShapedType::kDynamic); dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, - ShapedType::kDynamicStrideOrOffset); + ShapedType::kDynamic); build(b, result, {}, source, dest, dynamicOffsets, dynamicSizes, dynamicStrides, b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @@ -113,7 +113,7 @@ if (auto constInt = getConstantIntValue(val)) { staticIndices.push_back(*constInt); } else { - staticIndices.push_back(ShapedType::kDynamicSize); + staticIndices.push_back(ShapedType::kDynamic); dynIndices.push_back(val); } }; @@ -216,7 +216,7 @@ // The shape of the result can be obtained from the sizes passed in. SmallVector dynDims; SmallVector shape; - dispatchIndexOpFoldResults(sizes, dynDims, shape, ShapedType::kDynamicSize); + dispatchIndexOpFoldResults(sizes, dynDims, shape, ShapedType::kDynamic); RankedTensorType resultType = RankedTensorType::get(shape, padOp.getResultType().getElementType()); diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp --- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp @@ -44,7 +44,7 @@ auto tensorTy = rankedTensor.getType().cast(); SmallVector dynamicDims; for (const auto &en : llvm::enumerate(tensorTy.getShape())) { - if (en.value() == ShapedType::kDynamicSize) + if (en.value() == ShapedType::kDynamic) dynamicDims.push_back( b.create(loc, rankedTensor, en.index())); } diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -413,13 +413,13 @@ // Copy the Operand's rank. if (!hasRankedInput) - outputShape.resize(operandShape.getRank(), ShapedType::kDynamicSize); + outputShape.resize(operandShape.getRank(), ShapedType::kDynamic); // Copy shapes until the dim is non-dynamic. for (int i = 0, s = operandShape.getRank(); i < s; i++) { if (i == axis || operandShape.isDynamicDim(i)) continue; - if (outputShape[i] == ShapedType::kDynamicSize) + if (outputShape[i] == ShapedType::kDynamic) outputShape[i] = operandShape.getDimSize(i); if (outputShape[i] != operandShape.getDimSize(i)) return failure(); @@ -441,7 +441,7 @@ // We need to know the length of the concatenation axis of all inputs to // determine the dimension size of the output shape. if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) { - concatDimSize = ShapedType::kDynamicSize; + concatDimSize = ShapedType::kDynamic; break; } @@ -485,7 +485,7 @@ // All shapes are dynamic. SmallVector outShape; - outShape.resize(2, ShapedType::kDynamicSize); + outShape.resize(2, ShapedType::kDynamic); if (inputShape.hasRank()) { outShape[0] = inputShape.getDimSize(0); @@ -496,7 +496,7 @@ } if (biasShape.hasRank()) { - outShape[1] = outShape[1] == ShapedType::kDynamicSize + outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0) : outShape[1]; } @@ -516,7 +516,7 @@ // All shapes are dynamic. SmallVector outShape; - outShape.resize(3, ShapedType::kDynamicSize); + outShape.resize(3, ShapedType::kDynamic); if (lhsShape.hasRank()) { outShape[0] = lhsShape.getDimSize(0); @@ -524,7 +524,7 @@ } if (rhsShape.hasRank()) { - outShape[0] = outShape[0] == ShapedType::kDynamicSize + outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0) : outShape[0]; outShape[2] = rhsShape.getDimSize(2); @@ -557,7 +557,7 @@ return success(); } - outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamicSize); + outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic); inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); return success(); } @@ -565,7 +565,7 @@ DenseIntElementsAttr paddings; // If the paddings value is not a constant, all dimensions must be dynamic. if (!matchPattern(operands[1], m_Constant(&paddings))) { - outputShape.resize(inputShape.getRank(), ShapedType::kDynamicSize); + outputShape.resize(inputShape.getRank(), ShapedType::kDynamic); inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); return success(); } @@ -578,7 +578,7 @@ outputShape.reserve(inputShape.getRank()); for (int i = 0, s = inputShape.getRank(); i < s; i++) { if (inputShape.isDynamicDim(i)) { - outputShape.push_back(ShapedType::kDynamicSize); + outputShape.push_back(ShapedType::kDynamic); continue; } @@ -630,7 +630,7 @@ ShapeAdaptor inputShape = operands.getShape(0); SmallVector outputShape; if (!inputShape.hasRank()) { - outputShape.resize(multiples.size(), ShapedType::kDynamicSize); + outputShape.resize(multiples.size(), ShapedType::kDynamic); inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); return success(); } @@ -646,7 +646,7 @@ outputShape.reserve(multiples.size()); for (int i = 0, s = inputShape.getRank(); i < s; i++) { int64_t dim = inputShape.getDimSize(i); - if (dim != ShapedType::kDynamicSize) + if (dim != ShapedType::kDynamic) dim *= multipleValues[i]; outputShape.push_back(dim); } @@ -657,7 +657,7 @@ static SmallVector convertToMlirShape(ArrayRef shape) { return to_vector(llvm::map_range(shape, [](int64_t dim) { - return dim == -1 ? ShapedType::kDynamicSize : dim; + return dim == -1 ? ShapedType::kDynamic : dim; })); } @@ -726,7 +726,7 @@ // can determine the output rank. SmallVector outputShape; if (!inputShape.hasRank()) { - outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamicSize); + outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamic); inferredReturnShapes.push_back(ShapedTypeComponents(outputShape)); return success(); } @@ -754,7 +754,7 @@ return success(); } - outputShape.resize(inputShape.getRank(), ShapedType::kDynamicSize); + outputShape.resize(inputShape.getRank(), ShapedType::kDynamic); // If the permuations are a constant we can directly determine the output // shape. if (ShapeAdaptor permShape = operands.getValueAsShape(1)) { @@ -773,7 +773,7 @@ ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape; - outputShape.resize(3, ShapedType::kDynamicSize); + outputShape.resize(3, ShapedType::kDynamic); ShapeAdaptor valuesShape = operands.getShape(0); if (valuesShape.hasRank()) { @@ -783,9 +783,9 @@ ShapeAdaptor indicesShape = operands.getShape(1); if (indicesShape.hasRank()) { - if (outputShape[0] == ShapedType::kDynamicSize) + if (outputShape[0] == ShapedType::kDynamic) outputShape[0] = indicesShape.getDimSize(0); - if (outputShape[1] == ShapedType::kDynamicSize) + if (outputShape[1] == ShapedType::kDynamic) outputShape[1] = indicesShape.getDimSize(1); } @@ -799,7 +799,7 @@ SmallVectorImpl &inferredReturnShapes) { ResizeOpAdaptor adaptor(operands, attributes); llvm::SmallVector outputShape; - outputShape.resize(4, ShapedType::kDynamicSize); + outputShape.resize(4, ShapedType::kDynamic); ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); if (!inputShape.hasRank()) @@ -810,8 +810,8 @@ int64_t inputHeight = inputShape.getDimSize(1); int64_t inputWidth = inputShape.getDimSize(2); - if ((inputHeight == ShapedType::kDynamicSize) || - (inputWidth == ShapedType::kDynamicSize)) + if ((inputHeight == ShapedType::kDynamic) || + (inputWidth == ShapedType::kDynamic)) return failure(); llvm::SmallVector scaleInt; @@ -841,7 +841,7 @@ ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape; - outputShape.resize(3, ShapedType::kDynamicSize); + outputShape.resize(3, ShapedType::kDynamic); ShapeAdaptor valuesInShape = operands.getShape(0); if (valuesInShape.hasRank()) { @@ -852,15 +852,15 @@ ShapeAdaptor indicesShape = operands.getShape(1); if (indicesShape.hasRank()) { - if (outputShape[0] == ShapedType::kDynamicSize) + if (outputShape[0] == ShapedType::kDynamic) outputShape[0] = indicesShape.getDimSize(0); } ShapeAdaptor inputShape = operands.getShape(2); if (inputShape.hasRank()) { - if (outputShape[0] == ShapedType::kDynamicSize) + if (outputShape[0] == ShapedType::kDynamic) outputShape[0] = inputShape.getDimSize(0); - if (outputShape[2] == ShapedType::kDynamicSize) + if (outputShape[2] == ShapedType::kDynamic) outputShape[2] = inputShape.getDimSize(2); } @@ -968,7 +968,7 @@ SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); llvm::SmallVector outputShape; - outputShape.resize(4, ShapedType::kDynamicSize); + outputShape.resize(4, ShapedType::kDynamic); // We only know the rank if the input type is unranked. if (!inputShape) { @@ -1009,13 +1009,13 @@ MLIRContext *context, ::llvm::Optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { - llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); + llvm::SmallVector outputShape(4, ShapedType::kDynamic); Conv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int64_t inputWidth = ShapedType::kDynamicSize; - int64_t inputHeight = ShapedType::kDynamicSize; - int64_t weightWidth = ShapedType::kDynamicSize; - int64_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamic; + int64_t inputHeight = ShapedType::kDynamic; + int64_t weightWidth = ShapedType::kDynamic; + int64_t weightHeight = ShapedType::kDynamic; // Input shape describes input width/height and batch. @@ -1076,16 +1076,16 @@ MLIRContext *context, ::llvm::Optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { - llvm::SmallVector outputShape(5, ShapedType::kDynamicSize); + llvm::SmallVector outputShape(5, ShapedType::kDynamic); Conv3DOp::Adaptor adaptor(operands.getValues(), attributes); - int64_t inputWidth = ShapedType::kDynamicSize; - int64_t inputHeight = ShapedType::kDynamicSize; - int64_t inputDepth = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamic; + int64_t inputHeight = ShapedType::kDynamic; + int64_t inputDepth = ShapedType::kDynamic; - int64_t weightWidth = ShapedType::kDynamicSize; - int64_t weightHeight = ShapedType::kDynamicSize; - int64_t weightDepth = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamic; + int64_t weightHeight = ShapedType::kDynamic; + int64_t weightDepth = ShapedType::kDynamic; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1167,16 +1167,16 @@ MLIRContext *context, ::llvm::Optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { - llvm::SmallVector outputShape(4, ShapedType::kDynamicSize); + llvm::SmallVector outputShape(4, ShapedType::kDynamic); DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes); - int64_t inputWidth = ShapedType::kDynamicSize; - int64_t inputHeight = ShapedType::kDynamicSize; - int64_t inputChannels = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamic; + int64_t inputHeight = ShapedType::kDynamic; + int64_t inputChannels = ShapedType::kDynamic; - int64_t weightWidth = ShapedType::kDynamicSize; - int64_t weightHeight = ShapedType::kDynamicSize; - int64_t depthChannels = ShapedType::kDynamicSize; + int64_t weightWidth = ShapedType::kDynamic; + int64_t weightHeight = ShapedType::kDynamic; + int64_t depthChannels = ShapedType::kDynamic; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); @@ -1252,10 +1252,10 @@ getI64Values(adaptor.getOutShape(), outputShape); outputShape = convertToMlirShape(outputShape); - int64_t inputWidth = ShapedType::kDynamicSize; - int64_t inputHeight = ShapedType::kDynamicSize; - int64_t weightWidth = ShapedType::kDynamicSize; - int64_t weightHeight = ShapedType::kDynamicSize; + int64_t inputWidth = ShapedType::kDynamic; + int64_t inputHeight = ShapedType::kDynamic; + int64_t weightWidth = ShapedType::kDynamic; + int64_t weightHeight = ShapedType::kDynamic; // Input shape describes input width/height and batch. ShapeAdaptor inputShape = operands.getShape(adaptor.getInput()); diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp @@ -58,7 +58,7 @@ // Reshape input to [N,IH,IW,IC] -> [N * IH * IW, IC]. ArrayRef inputShape = inputType.getShape(); - int64_t combined = ShapedType::kDynamicSize; + int64_t combined = ShapedType::kDynamic; if (numDynamic == 0) combined = inputShape[0] * inputShape[1] * inputShape[2]; llvm::SmallVector revisedInputShape{combined, inputShape[3]}; diff --git a/mlir/lib/Dialect/Traits.cpp b/mlir/lib/Dialect/Traits.cpp --- a/mlir/lib/Dialect/Traits.cpp +++ b/mlir/lib/Dialect/Traits.cpp @@ -94,7 +94,7 @@ } else if (*i2 == 1) { *iR = *i1; } else { - *iR = ShapedType::kDynamicSize; + *iR = ShapedType::kDynamic; } } else { if (*i1 == *i2 || *i2 == 1) { diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp --- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp +++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp @@ -46,7 +46,7 @@ break; int64_t currTargetShape = targetShape[targetDim]; - while (sourceShape[sourceDim] != ShapedType::kDynamicSize && + while (sourceShape[sourceDim] != ShapedType::kDynamic && prodOfCollapsedDims * sourceShape[sourceDim] < currTargetShape && sourceDim < sourceShape.size()) { prodOfCollapsedDims *= sourceShape[sourceDim]; @@ -56,15 +56,15 @@ // If the current expanded dimension is dynamic, then the collapsed // dimensions should also be dynamic and product of all previous unprocessed // dimensions of the expanded shape should be 1. - if (sourceShape[sourceDim] == ShapedType::kDynamicSize && - (currTargetShape != ShapedType::kDynamicSize || + if (sourceShape[sourceDim] == ShapedType::kDynamic && + (currTargetShape != ShapedType::kDynamic || prodOfCollapsedDims != 1)) return llvm::None; // If the collapsed dim is dynamic, the current expanded dim should also // be dynamic. - if (currTargetShape == ShapedType::kDynamicSize && - sourceShape[sourceDim] != ShapedType::kDynamicSize) + if (currTargetShape == ShapedType::kDynamic && + sourceShape[sourceDim] != ShapedType::kDynamic) return llvm::None; // For static shapes, if the product of dimensions of the expanded shape @@ -83,7 +83,7 @@ // Process any remaining entries in the source shape. They all need to be // 1 or dynamic. for (; sourceDim < sourceShape.size(); sourceDim++) { - if (sourceShape[sourceDim] != ShapedType::kDynamicSize && + if (sourceShape[sourceDim] != ShapedType::kDynamic && sourceShape[sourceDim] != 1) return llvm::None; // The map is empty when the target type is a scalar. diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp @@ -170,13 +170,13 @@ resStrides(bT.getRank(), 0); for (int64_t idx = 0, e = aT.getRank(); idx < e; ++idx) { resShape[idx] = - (aShape[idx] == bShape[idx]) ? aShape[idx] : ShapedType::kDynamicSize; + (aShape[idx] == bShape[idx]) ? aShape[idx] : ShapedType::kDynamic; resStrides[idx] = (aStrides[idx] == bStrides[idx]) ? aStrides[idx] - : ShapedType::kDynamicStrideOrOffset; + : ShapedType::kDynamic; } resOffset = - (aOffset == bOffset) ? aOffset : ShapedType::kDynamicStrideOrOffset; + (aOffset == bOffset) ? aOffset : ShapedType::kDynamic; return MemRefType::get( resShape, aT.getElementType(), StridedLayoutAttr::get(aT.getContext(), resOffset, resStrides)); diff --git a/mlir/lib/IR/BuiltinAttributes.cpp b/mlir/lib/IR/BuiltinAttributes.cpp --- a/mlir/lib/IR/BuiltinAttributes.cpp +++ b/mlir/lib/IR/BuiltinAttributes.cpp @@ -207,7 +207,7 @@ /// Prints a strided layout attribute. void StridedLayoutAttr::print(llvm::raw_ostream &os) const { auto printIntOrQuestion = [&](int64_t value) { - if (value == ShapedType::kDynamicStrideOrOffset) + if (ShapedType::isDynamic(value)) os << "?"; else os << value; @@ -1770,7 +1770,7 @@ // AffineExpr for offset. // Static case. - if (offset != MemRefType::getDynamicStrideOrOffset()) { + if (!ShapedType::isDynamic(offset)) { auto cst = getAffineConstantExpr(offset, context); expr = cst; } else { @@ -1787,7 +1787,7 @@ auto d = getAffineDimExpr(dim, context); AffineExpr mult; // Static case. - if (stride != MemRefType::getDynamicStrideOrOffset()) + if (!ShapedType::isDynamic(stride)) mult = getAffineConstantExpr(stride, context); else // Dynamic case, new symbol for each new stride. diff --git a/mlir/lib/IR/BuiltinTypeInterfaces.cpp b/mlir/lib/IR/BuiltinTypeInterfaces.cpp --- a/mlir/lib/IR/BuiltinTypeInterfaces.cpp +++ b/mlir/lib/IR/BuiltinTypeInterfaces.cpp @@ -23,8 +23,8 @@ // ShapedType //===----------------------------------------------------------------------===// -constexpr int64_t ShapedType::kDynamicSize; -constexpr int64_t ShapedType::kDynamicStrideOrOffset; +constexpr int64_t ShapedType::kDynamic; +constexpr int64_t ShapedType::kDynamic; int64_t ShapedType::getNumElements(ArrayRef shape) { int64_t num = 1; diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -608,7 +608,7 @@ if (!BaseMemRefType::isValidElementType(elementType)) return emitError() << "invalid memref element type"; - // Negative sizes are not allowed except for `kDynamicSize`. + // Negative sizes are not allowed except for `kDynamic`. for (int64_t s : shape) if (s < 0 && !ShapedType::isDynamic(s)) return emitError() << "invalid memref size"; @@ -705,7 +705,7 @@ } /// A stride specification is a list of integer values that are either static -/// or dynamic (encoded with ShapedType::kDynamicStrideOrOffset). Strides encode +/// or dynamic (encoded with ShapedType::kDynamic). Strides encode /// the distance in the number of elements between successive entries along a /// particular dimension. /// @@ -794,12 +794,12 @@ if (auto cst = offsetExpr.dyn_cast()) offset = cst.getValue(); else - offset = ShapedType::kDynamicStrideOrOffset; + offset = ShapedType::kDynamic; for (auto e : strideExprs) { if (auto c = e.dyn_cast()) strides.push_back(c.getValue()); else - strides.push_back(ShapedType::kDynamicStrideOrOffset); + strides.push_back(ShapedType::kDynamic); } return success(); } diff --git a/mlir/lib/Interfaces/ViewLikeInterface.cpp b/mlir/lib/Interfaces/ViewLikeInterface.cpp --- a/mlir/lib/Interfaces/ViewLikeInterface.cpp +++ b/mlir/lib/Interfaces/ViewLikeInterface.cpp @@ -55,17 +55,17 @@ << op.getMixedSizes().size() << " vs " << op.getMixedStrides().size() << ") so the rank of the result type is well-formed."; - if (failed(verifyListOfOperandsOrIntegers( - op, "offset", maxRanks[0], op.static_offsets(), op.offsets(), - ShapedType::isDynamicStrideOrOffset))) + if (failed(verifyListOfOperandsOrIntegers(op, "offset", maxRanks[0], + op.static_offsets(), op.offsets(), + ShapedType::isDynamic))) return failure(); if (failed(verifyListOfOperandsOrIntegers(op, "size", maxRanks[1], op.static_sizes(), op.sizes(), ShapedType::isDynamic))) return failure(); - if (failed(verifyListOfOperandsOrIntegers( - op, "stride", maxRanks[2], op.static_strides(), op.strides(), - ShapedType::isDynamicStrideOrOffset))) + if (failed(verifyListOfOperandsOrIntegers(op, "stride", maxRanks[2], + op.static_strides(), op.strides(), + ShapedType::isDynamic))) return failure(); return success(); } @@ -166,13 +166,12 @@ SmallVector mlir::getMixedStridesOrOffsets(ArrayAttr staticValues, ValueRange dynamicValues) { - return getMixedValues(staticValues, dynamicValues, - ShapedType::kDynamicStrideOrOffset); + return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamic); } SmallVector mlir::getMixedSizes(ArrayAttr staticValues, ValueRange dynamicValues) { - return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamicSize); + return getMixedValues(staticValues, dynamicValues, ShapedType::kDynamic); } std::pair> @@ -194,12 +193,11 @@ std::pair> mlir::decomposeMixedStridesOrOffsets( OpBuilder &b, const SmallVectorImpl &mixedValues) { - return decomposeMixedValues(b, mixedValues, - ShapedType::kDynamicStrideOrOffset); + return decomposeMixedValues(b, mixedValues, ShapedType::kDynamic); } std::pair> mlir::decomposeMixedSizes(OpBuilder &b, const SmallVectorImpl &mixedValues) { - return decomposeMixedValues(b, mixedValues, ShapedType::kDynamicSize); + return decomposeMixedValues(b, mixedValues, ShapedType::kDynamic); } diff --git a/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp b/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp --- a/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp +++ b/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp @@ -41,13 +41,13 @@ return; } llvm::outs() << "MemRefType offset: "; - if (offset == MemRefType::getDynamicStrideOrOffset()) + if (ShapedType::isDynamic(offset)) llvm::outs() << "?"; else llvm::outs() << offset; llvm::outs() << " strides: "; llvm::interleaveComma(strides, llvm::outs(), [&](int64_t v) { - if (v == MemRefType::getDynamicStrideOrOffset()) + if (ShapedType::isDynamic(v)) llvm::outs() << "?"; else llvm::outs() << v; diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -1157,7 +1157,7 @@ return emitOptionalError(location, "only shaped type operands allowed"); } int64_t dim = - sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamicSize; + sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic; auto type = IntegerType::get(context, 17); inferredReturnShapes.push_back(ShapedTypeComponents({dim}, type)); return success(); diff --git a/mlir/unittests/Dialect/BroadcastShapeTest.cpp b/mlir/unittests/Dialect/BroadcastShapeTest.cpp --- a/mlir/unittests/Dialect/BroadcastShapeTest.cpp +++ b/mlir/unittests/Dialect/BroadcastShapeTest.cpp @@ -47,7 +47,7 @@ TEST(BroadcastShapeTest, InterleavingUnknowns) { SmallVector result; - int64_t dyn = mlir::ShapedType::kDynamicSize; + int64_t dyn = mlir::ShapedType::kDynamic; ASSERT_TRUE(getBroadcastedShape({1, 2, dyn, dyn, dyn}, {dyn, dyn, dyn, 4, 1}, result)); EXPECT_THAT(result, ElementsAre(dyn, 2, dyn, 4, dyn));