diff --git a/llvm/include/llvm/ADT/Hashing.h b/llvm/include/llvm/ADT/Hashing.h --- a/llvm/include/llvm/ADT/Hashing.h +++ b/llvm/include/llvm/ADT/Hashing.h @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -122,6 +123,10 @@ template hash_code hash_value(const std::basic_string &arg); +/// Compute a hash_code for std::optional. +template +hash_code hash_value(const std::optional &arg); + /// Override the execution seed with a fixed value. /// diff --git a/mlir/docs/Dialects/Linalg/_index.md b/mlir/docs/Dialects/Linalg/_index.md --- a/mlir/docs/Dialects/Linalg/_index.md +++ b/mlir/docs/Dialects/Linalg/_index.md @@ -631,14 +631,14 @@ When `mlir-linalg-ods-gen -gen-impl=1` is called, the following C++ is produced: ``` -llvm::Optional> batchmatmul::referenceIterators() { +std::optional> batchmatmul::referenceIterators() { return SmallVector{ getParallelIteratorTypeName(), getParallelIteratorTypeName(), getParallelIteratorTypeName(), getReductionIteratorTypeName() }; } -llvm::Optional> batchmatmul::referenceIndexingMaps() { +std::optional> batchmatmul::referenceIndexingMaps() { MLIRContext *context = getContext(); AffineExpr d0, d1, d2, d3; bindDims(context, d0, d1, d2, d3); diff --git a/mlir/docs/OpDefinitions.md b/mlir/docs/OpDefinitions.md --- a/mlir/docs/OpDefinitions.md +++ b/mlir/docs/OpDefinitions.md @@ -1341,9 +1341,9 @@ Case20 = 20, }; -llvm::Optional symbolizeMyIntEnum(uint32_t); +std::optional symbolizeMyIntEnum(uint32_t); llvm::StringRef ConvertToString(MyIntEnum); -llvm::Optional ConvertToEnum(llvm::StringRef); +std::optional ConvertToEnum(llvm::StringRef); inline constexpr unsigned getMaxEnumValForMyIntEnum() { return 20; } @@ -1387,13 +1387,13 @@ return ""; } -llvm::Optional ConvertToEnum(llvm::StringRef str) { - return llvm::StringSwitch>(str) +std::optional ConvertToEnum(llvm::StringRef str) { + return llvm::StringSwitch>(str) .Case("Case15", MyIntEnum::Case15) .Case("Case20", MyIntEnum::Case20) .Default(std::nullopt); } -llvm::Optional symbolizeMyIntEnum(uint32_t value) { +std::optional symbolizeMyIntEnum(uint32_t value) { switch (value) { case 15: return MyIntEnum::Case15; case 20: return MyIntEnum::Case20; @@ -1430,9 +1430,9 @@ Bit3 = 8, }; -llvm::Optional symbolizeMyBitEnum(uint32_t); +std::optional symbolizeMyBitEnum(uint32_t); std::string stringifyMyBitEnum(MyBitEnum); -llvm::Optional symbolizeMyBitEnum(llvm::StringRef); +std::optional symbolizeMyBitEnum(llvm::StringRef); inline constexpr MyBitEnum operator|(MyBitEnum a, MyBitEnum b) { return static_cast(static_cast(a) | static_cast(b)); @@ -1462,10 +1462,10 @@ } template -::llvm::Optional symbolizeEnum(::llvm::StringRef); +::std::optional symbolizeEnum(::llvm::StringRef); template <> -inline ::llvm::Optional symbolizeEnum(::llvm::StringRef str) { +inline ::std::optional symbolizeEnum(::llvm::StringRef str) { return symbolizeMyBitEnum(str); } @@ -1506,7 +1506,7 @@ return llvm::join(strs, "|"); } -llvm::Optional symbolizeMyBitEnum(llvm::StringRef str) { +std::optional symbolizeMyBitEnum(llvm::StringRef str) { // Special case for all bits unset. if (str == "None") return MyBitEnum::None; @@ -1515,7 +1515,7 @@ uint32_t val = 0; for (auto symbol : symbols) { - auto bit = llvm::StringSwitch>(symbol) + auto bit = llvm::StringSwitch>(symbol) .Case("tagged", 1) .Case("Bit1", 2) .Case("Bit2", 4) @@ -1526,7 +1526,7 @@ return static_cast(val); } -llvm::Optional symbolizeMyBitEnum(uint32_t value) { +std::optional symbolizeMyBitEnum(uint32_t value) { // Special case for all bits unset. if (value == 0) return MyBitEnum::None; diff --git a/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h b/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h --- a/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h +++ b/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h @@ -198,7 +198,7 @@ /// regions or the parent operation itself, and set either the argument or /// parent result lattices. void visitRegionSuccessors(ProgramPoint point, RegionBranchOpInterface branch, - Optional successorIndex, + std::optional successorIndex, ArrayRef lattices); }; diff --git a/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h b/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h --- a/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h +++ b/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h @@ -61,7 +61,7 @@ int64_t alignedAllocationGetAlignment(ConversionPatternRewriter &rewriter, Location loc, OpType op, const DataLayout *defaultLayout) const { - if (Optional alignment = op.getAlignment()) + if (auto alignment = op.getAlignment()) return *alignment; // Whenever we don't have alignment set, we will use an alignment diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td @@ -708,7 +708,7 @@ unsigned getNumDims(); /// Get ranges as constants, may fail in dynamic case. - Optional> getConstantRanges(); + std::optional> getConstantRanges(); Block *getBody(); OpBuilder getBodyBuilder(); diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td --- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td +++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td @@ -261,7 +261,7 @@ let hasFolder = 1; let extraClassDeclaration = [{ - ::llvm::Optional<::llvm::SmallVector> getShapeForUnroll(); + ::std::optional<::llvm::SmallVector> getShapeForUnroll(); }]; } diff --git a/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h b/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h --- a/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h +++ b/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h @@ -43,7 +43,7 @@ DiagnosedSilenceableFailure mapNestedForeachToThreadsImpl( RewriterBase &rewriter, Operation *target, const SmallVectorImpl &blockDim, bool syncAfterDistribute, - llvm::Optional transformOp, + std::optional transformOp, const ArrayRef &threadMappingAttributes); /// Maps the top level `scf.foreach_thread` op to GPU Thread Blocks. Mapping is diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -990,8 +990,8 @@ let extraClassDeclaration = !strconcat([{ static llvm::Intrinsic::ID getIntrinsicID( int64_t m, int64_t n, uint64_t k, - llvm::Optional b1Op, - llvm::Optional sat, + std::optional b1Op, + std::optional sat, mlir::NVVM::MMALayout layoutAEnum, mlir::NVVM::MMALayout layoutBEnum, mlir::NVVM::MMATypes eltypeAEnum, mlir::NVVM::MMATypes eltypeBEnum, mlir::NVVM::MMATypes eltypeCEnum, mlir::NVVM::MMATypes eltypeDEnum) { @@ -1006,7 +1006,7 @@ return 0; } - static Optional inferOperandMMAType(Type operandElType, + static std::optional inferOperandMMAType(Type operandElType, bool isAccumulator); MMATypes accumPtxType(); @@ -1016,10 +1016,10 @@ let builders = [ OpBuilder<(ins "Type":$resultType, "ValueRange":$operandA, "ValueRange":$operandB, "ValueRange":$operandC, - "ArrayRef":$shape, "Optional":$b1Op, - "Optional":$intOverflow, - "Optional>":$multiplicandPtxTypes, - "Optional>":$multiplicandLayouts)> + "ArrayRef":$shape, "std::optional":$b1Op, + "std::optional":$intOverflow, + "std::optional>":$multiplicandPtxTypes, + "std::optional>":$multiplicandLayouts)> ]; string llvmBuilder = [{ diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -47,7 +47,7 @@ } void getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // Op has a region, but conceptually the control flow does not enter the // region. diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h @@ -47,7 +47,7 @@ RewriterBase &rewriter, transform::TransformState &state, TransformOpInterface transformOp, ArrayRef targets, ArrayRef mixedNumThreads, - ArrayRef mixedTileSizes, Optional mapping, + ArrayRef mixedTileSizes, std::optional mapping, SmallVector &tileOps, SmallVector &tiledOps); } // namespace transform diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -230,7 +230,7 @@ /// smallest constant value for the size of the buffer needed for each /// dimension. If that is not possible, contains the dynamic size of the /// subview. The call back should return the buffer to use. -using AllocBufferCallbackFn = std::function( +using AllocBufferCallbackFn = std::function( OpBuilder &b, memref::SubViewOp subView, ArrayRef boundingSubViewSize, DataLayout &layout)>; @@ -248,7 +248,7 @@ struct LinalgPromotionOptions { /// Indices of subViews to promote. If `None`, try to promote all operands. - Optional> operandsToPromote = None; + std::optional> operandsToPromote = std::nullopt; LinalgPromotionOptions &setOperandsToPromote(ArrayRef operands) { operandsToPromote = DenseSet(); operandsToPromote->insert(operands.begin(), operands.end()); @@ -259,7 +259,7 @@ /// Otherwise the partial view will be used. The decision is defaulted to /// `useFullTileBuffersDefault` when `useFullTileBuffers` is None and for /// operands missing from `useFullTileBuffers`. - Optional useFullTileBuffers = None; + std::optional useFullTileBuffers = std::nullopt; LinalgPromotionOptions &setUseFullTileBuffers(ArrayRef useFullTiles) { unsigned size = useFullTiles.size(); llvm::SmallBitVector tmp(size, false); @@ -276,7 +276,7 @@ return *this; } /// Alignment of promoted buffer. If `None` do not specify alignment. - Optional alignment = None; + std::optional alignment = std::nullopt; LinalgPromotionOptions &setAlignment(unsigned align) { alignment = align; return *this; @@ -290,8 +290,8 @@ /// Callback function to do the allocation of the promoted buffer. If None, /// then the default allocation scheme of allocating a memref buffer /// followed by a view operation is used. - Optional allocationFn = None; - Optional deallocationFn = None; + std::optional allocationFn = std::nullopt; + std::optional deallocationFn = std::nullopt; LinalgPromotionOptions & setAllocationDeallocationFns(AllocBufferCallbackFn const &allocFn, DeallocBufferCallbackFn const &deallocFn) { @@ -301,8 +301,8 @@ } /// Callback function to do the copy of data to and from the promoted /// subview. If None then a memref.copy is used. - Optional copyInFn = None; - Optional copyOutFn = None; + std::optional copyInFn = std::nullopt; + std::optional copyOutFn = std::nullopt; LinalgPromotionOptions &setCopyInOutFns(CopyCallbackFn const ©In, CopyCallbackFn const ©Out) { copyInFn = copyIn; @@ -445,14 +445,14 @@ FailureOr tileToForeachThreadOp(RewriterBase &builder, TilingInterface op, ArrayRef numThreads, - Optional mapping); + std::optional mapping); /// Same as `tileToForeachThreadOp`, but calculate the number of threads /// required using the given tileSizes. FailureOr tileToForeachThreadOpUsingTileSizes(RewriterBase &builder, TilingInterface op, ArrayRef tileSizes, - Optional mapping); + std::optional mapping); /// Transformation information returned after reduction tiling. struct ForeachThreadReductionTilingResult { @@ -493,7 +493,7 @@ FailureOr tileReductionUsingForeachThread(RewriterBase &b, PartialReductionOpInterface op, ArrayRef numThreads, - Optional mapping); + std::optional mapping); /// All indices returned by IndexOp should be invariant with respect to /// tiling. Therefore, if an operation is tiled, we have to transform the @@ -598,7 +598,7 @@ SmallVector tileInterchange; /// When specified, specifies distribution of generated tile loops to /// processors. - Optional tileDistribution = None; + std::optional tileDistribution = std::nullopt; LinalgTilingAndFusionOptions & setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) { tileDistribution = std::move(distributionOptions); @@ -651,7 +651,7 @@ /// When specified, specifies distribution of generated tile loops to /// processors. - Optional distribution = None; + std::optional distribution = std::nullopt; LinalgTilingOptions & setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) { @@ -781,7 +781,7 @@ }; /// Return vector::CombiningKind for the given op. -llvm::Optional getCombinerOpKind(Operation *combinerOp); +std::optional getCombinerOpKind(Operation *combinerOp); //===----------------------------------------------------------------------===// // Transformations exposed as rewrite patterns. diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -586,7 +586,7 @@ let extraClassDeclaration = [{ /// Helper function to get the index as a simple integer if it is constant. - Optional getConstantIndex(); + std::optional getConstantIndex(); /// Interface method of ShapedDimOpInterface: Return the source memref. Value getShapedValue() { return getSource(); } diff --git a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td --- a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td +++ b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td @@ -814,7 +814,7 @@ let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict"; let builders = [ OpBuilder<(ins "Type":$resultType, "Value":$inputOp, - "Optional":$index), [{ + "std::optional":$index), [{ build($_builder, $_state, resultType, inputOp, index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr()); }]>, @@ -881,7 +881,7 @@ let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict"; let builders = [ OpBuilder<(ins "Type":$resultType, "Value":$inputOp, - "Optional":$index), [{ + "std::optional":$index), [{ build($_builder, $_state, resultType, inputOp, index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr()); }]>, diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td --- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td +++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td @@ -222,7 +222,7 @@ let skipDefaultBuilders = 1; let builders = [ OpBuilder<(ins "Value":$lowerBound, "Value":$upperBound, "Value":$step, - CArg<"ValueRange", "std::nullopt">:$iterArgs, + CArg<"ValueRange", "llvm::None">:$iterArgs, CArg<"function_ref", "nullptr">)> ]; @@ -274,7 +274,7 @@ } /// Get the iter arg number for an operand. If it isnt an iter arg /// operand return std::nullopt. - Optional getIterArgNumberForOpOperand(OpOperand &opOperand) { + std::optional getIterArgNumberForOpOperand(OpOperand &opOperand) { if (opOperand.getOwner() != getOperation()) return std::nullopt; unsigned operandNumber = opOperand.getOperandNumber(); @@ -331,10 +331,10 @@ /// correspond to the loop iterator operands, i.e., those exclusing the /// induction variable. LoopOp only has one region, so 0 is the only valid /// value for `index`. - OperandRange getSuccessorEntryOperands(Optional index); + OperandRange getSuccessorEntryOperands(std::optional index); /// Returns the step as an `APInt` if it is constant. - Optional getConstantStep(); + std::optional getConstantStep(); /// Interface method for ConditionallySpeculatable. Speculation::Speculatability getSpeculatability(); @@ -496,7 +496,7 @@ let builders = [ // Bodyless builder, outputs must be specified. OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads, - "Optional":$mapping)>, + "std::optional":$mapping)>, // Builder that takes a bodyBuilder lambda. OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads, "ArrayRef":$mapping, @@ -1003,7 +1003,7 @@ using BodyBuilderFn = function_ref; - OperandRange getSuccessorEntryOperands(Optional index); + OperandRange getSuccessorEntryOperands(std::optional index); ConditionOp getConditionOp(); YieldOp getYieldOp(); Block::BlockArgListType getBeforeArguments(); diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h --- a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h +++ b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h @@ -31,7 +31,7 @@ auto loc = parser.getCurrentLocation(); if (parser.parseKeyword(&keyword)) return failure(); - if (Optional attr = spirv::symbolizeEnum(keyword)) { + if (auto attr = spirv::symbolizeEnum(keyword)) { value = *attr; return success(); } diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td @@ -490,11 +490,11 @@ let regions = (region AnyRegion); let builders = [ - OpBuilder<(ins CArg<"Optional", "std::nullopt">:$name)>, + OpBuilder<(ins CArg<"std::optional", "std::nullopt">:$name)>, OpBuilder<(ins "spirv::AddressingModel":$addressing_model, "spirv::MemoryModel":$memory_model, - CArg<"Optional", "std::nullopt">:$vce_triple, - CArg<"Optional", "std::nullopt">:$name)> + CArg<"std::optional", "std::nullopt">:$vce_triple, + CArg<"std::optional", "std::nullopt">:$name)> ]; // We need to ensure the block inside the region is properly terminated; @@ -509,7 +509,7 @@ bool isOptionalSymbol() { return true; } - Optional getName() { return getSymName(); } + std::optional getName() { return getSymName(); } static StringRef getVCETripleAttrName() { return "vce_triple"; } }]; diff --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td --- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td +++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td @@ -352,7 +352,7 @@ let extraClassDeclaration = [{ /// Get the `index` value as integer if it is constant. - Optional getConstantIndex(); + std::optional getConstantIndex(); /// Returns when two result types are compatible for this op; method used by /// InferTypeOpInterface @@ -383,7 +383,7 @@ let extraClassDeclaration = [{ /// Get the `dim` value as integer if it is constant. - Optional getConstantDim(); + std::optional getConstantDim(); /// Returns when two result types are compatible for this op; method used by /// InferTypeOpInterface static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -129,7 +129,7 @@ let extraClassDeclaration = [{ /// Helper function to get the index as a simple integer if it is constant. - Optional getConstantIndex(); + std::optional getConstantIndex(); /// Interface method of ShapedDimOpInterface: Return the source tensor. Value getShapedValue() { return getSource(); } @@ -380,7 +380,7 @@ /// Compute the rank-reduction mask that can be applied to map the source /// tensor type to the result tensor type by dropping unit dims. - llvm::Optional> + std::optional> computeRankReductionMask() { return ::mlir::computeRankReductionMask(getSourceType().getShape(), getType().getShape()); diff --git a/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h b/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h --- a/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h +++ b/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h @@ -69,7 +69,7 @@ SmallVector getAsOpFoldResult(ArrayAttr arrayAttr); /// If ofr is a constant integer or an IntegerAttr, return the integer. -Optional getConstantIntValue(OpFoldResult ofr); +std::optional getConstantIntValue(OpFoldResult ofr); /// Return true if `ofr` is constant integer equal to `value`. bool isConstantIntValue(OpFoldResult ofr, int64_t value); diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h @@ -118,7 +118,7 @@ /// VectorToSCF, which reduces the rank of vector transfer ops. void populateVectorTransferLoweringPatterns( RewritePatternSet &patterns, - llvm::Optional maxTransferRank = std::nullopt, + std::optional maxTransferRank = std::nullopt, PatternBenefit benefit = 1); /// These patterns materialize masks for various vector ops such as transfers. diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -537,7 +537,7 @@ Note that this instruction resembles vector.extract, but is restricted to 0-D and 1-D vectors and relaxed to dynamic indices. - If the vector is 0-D, the position must be std::nullopt. + If the vector is 0-D, the position must be llvm::None. It is meant to be closer to LLVM's version: @@ -1351,19 +1351,19 @@ "Value":$source, "ValueRange":$indices, "AffineMap":$permutationMap, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, /// 3. Builder that sets permutation map to 'getMinorIdentityMap'. OpBuilder<(ins "VectorType":$vectorType, "Value":$source, "ValueRange":$indices, "Value":$padding, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, /// 4. Builder that sets padding to zero and permutation map to /// 'getMinorIdentityMap'. OpBuilder<(ins "VectorType":$vectorType, "Value":$source, "ValueRange":$indices, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, ]; let extraClassDeclaration = [{ @@ -1500,13 +1500,13 @@ "Value":$dest, "ValueRange":$indices, "AffineMap":$permutationMap, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, /// 4. Builder with type inference that sets an empty mask and sets permutation /// map to 'getMinorIdentityMap'. OpBuilder<(ins "Value":$vector, "Value":$dest, "ValueRange":$indices, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, ]; let extraClassDeclaration = [{ diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h --- a/mlir/include/mlir/IR/Builders.h +++ b/mlir/include/mlir/IR/Builders.h @@ -439,8 +439,7 @@ /// Helper for sanity checking preconditions for create* methods below. template RegisteredOperationName getCheckRegisteredInfo(MLIRContext *ctx) { - Optional opName = - RegisteredOperationName::lookup(OpT::getOperationName(), ctx); + auto opName = RegisteredOperationName::lookup(OpT::getOperationName(), ctx); if (LLVM_UNLIKELY(!opName)) { llvm::report_fatal_error( "Building op `" + OpT::getOperationName() + diff --git a/mlir/include/mlir/IR/BuiltinOps.td b/mlir/include/mlir/IR/BuiltinOps.td --- a/mlir/include/mlir/IR/BuiltinOps.td +++ b/mlir/include/mlir/IR/BuiltinOps.td @@ -58,13 +58,13 @@ let regions = (region SizedRegion<1>:$bodyRegion); let assemblyFormat = "($sym_name^)? attr-dict-with-keyword $bodyRegion"; - let builders = [OpBuilder<(ins CArg<"Optional", "{}">:$name)>]; + let builders = [OpBuilder<(ins CArg<"std::optional", "{}">:$name)>]; let extraClassDeclaration = [{ /// Construct a module from the given location with an optional name. - static ModuleOp create(Location loc, Optional name = std::nullopt); + static ModuleOp create(Location loc, std::optional name = std::nullopt); /// Return the name of this module if present. - Optional getName() { return getSymName(); } + std::optional getName() { return getSymName(); } //===------------------------------------------------------------------===// // SymbolOpInterface Methods diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -63,7 +63,7 @@ type. If a shape is not provided, the current shape of the type is used. }], "::mlir::ShapedType", "cloneWith", (ins - "::llvm::Optional<::llvm::ArrayRef>":$shape, + "::std::optional<::llvm::ArrayRef>":$shape, "::mlir::Type":$elementType )>, diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -90,7 +90,7 @@ /// Clone this type with the given shape and element type. If the /// provided shape is `None`, the current shape of the type is used. - TensorType cloneWith(Optional> shape, + TensorType cloneWith(std::optional> shape, Type elementType) const; /// Return true if the specified element type is ok in a tensor. @@ -126,7 +126,7 @@ /// Clone this type with the given shape and element type. If the /// provided shape is `None`, the current shape of the type is used. - BaseMemRefType cloneWith(Optional> shape, + BaseMemRefType cloneWith(std::optional> shape, Type elementType) const; /// Return true if the specified element type is ok in a memref. @@ -337,7 +337,7 @@ /// which dimensions must be kept when e.g. compute MemRef strides under /// rank-reducing operations. Return None if reducedShape cannot be obtained /// by dropping only `1` entries in `originalShape`. -llvm::Optional> +std::optional> computeRankReductionMask(ArrayRef originalShape, ArrayRef reducedShape); diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -1031,7 +1031,7 @@ /// Clone this vector type with the given shape and element type. If the /// provided shape is `None`, the current shape of the type is used. - VectorType cloneWith(Optional> shape, + VectorType cloneWith(std::optional> shape, Type elementType) const; }]; let skipDefaultBuilders = 1; diff --git a/mlir/include/mlir/IR/Diagnostics.h b/mlir/include/mlir/IR/Diagnostics.h --- a/mlir/include/mlir/IR/Diagnostics.h +++ b/mlir/include/mlir/IR/Diagnostics.h @@ -483,19 +483,19 @@ /// the diagnostic arguments directly instead of relying on the returned /// InFlightDiagnostic. template -LogicalResult emitOptionalError(Optional loc, Args &&...args) { +LogicalResult emitOptionalError(std::optional loc, Args &&...args) { if (loc) return emitError(*loc).append(std::forward(args)...); return failure(); } template -LogicalResult emitOptionalWarning(Optional loc, Args &&...args) { +LogicalResult emitOptionalWarning(std::optional loc, Args &&...args) { if (loc) return emitWarning(*loc).append(std::forward(args)...); return failure(); } template -LogicalResult emitOptionalRemark(Optional loc, Args &&...args) { +LogicalResult emitOptionalRemark(std::optional loc, Args &&...args) { if (loc) return emitRemark(*loc).append(std::forward(args)...); return failure(); diff --git a/mlir/include/mlir/IR/Dialect.h b/mlir/include/mlir/IR/Dialect.h --- a/mlir/include/mlir/IR/Dialect.h +++ b/mlir/include/mlir/IR/Dialect.h @@ -115,7 +115,8 @@ /// By default this will lookup for registered operations and return the /// `parse()` method registered on the RegisteredOperationName. Dialects can /// override this behavior and handle unregistered operations as well. - virtual Optional getParseOperationHook(StringRef opName) const; + virtual std::optional + getParseOperationHook(StringRef opName) const; /// Print an operation registered to this dialect. /// This hook is invoked for registered operation which don't override the diff --git a/mlir/include/mlir/IR/EnumAttr.td b/mlir/include/mlir/IR/EnumAttr.td --- a/mlir/include/mlir/IR/EnumAttr.td +++ b/mlir/include/mlir/IR/EnumAttr.td @@ -132,7 +132,7 @@ // type to the corresponding symbol. It will have the following signature: // // ```c++ - // llvm::Optional<> (); + // std::optional<> (); // ``` string underlyingToSymbolFnName = "symbolize" # name; @@ -140,7 +140,7 @@ // corresponding symbol. It will have the following signature: // // ```c++ - // llvm::Optional<> (llvm::StringRef); + // std::optional<> (llvm::StringRef); // ``` string stringToSymbolFnName = "symbolize" # name; diff --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td --- a/mlir/include/mlir/IR/OpBase.td +++ b/mlir/include/mlir/IR/OpBase.td @@ -1033,7 +1033,7 @@ // Rewrite the attribute to be optional. // Note: this has to be kept up to date with Attr above. let storageType = attr.storageType; - let returnType = "::llvm::Optional<" # attr.returnType #">"; + let returnType = "::std::optional<" # attr.returnType #">"; let convertFromStorage = "$_self ? " # returnType # "(" # attr.convertFromStorage # ") : (::std::nullopt)"; let valueType = attr.valueType; diff --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h --- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h +++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h @@ -128,7 +128,7 @@ /// Return the `BlockArgument` corresponding to operand `operandIndex` in some /// successor if `operandIndex` is within the range of `operands`, or None if /// `operandIndex` isn't a successor operand index. -Optional +std::optional getBranchSuccessorArgument(const SuccessorOperands &operands, unsigned operandIndex, Block *successor); @@ -164,7 +164,7 @@ RegionSuccessor(Region *region, Block::BlockArgListType regionInputs = {}) : region(region), inputs(regionInputs) {} /// Initialize a successor that branches back to/out of the parent operation. - RegionSuccessor(Optional results = {}) + RegionSuccessor(std::optional results = {}) : inputs(results ? ValueRange(*results) : ValueRange()) {} /// Return the given region successor. Returns nullptr if the successor is the @@ -190,7 +190,8 @@ public: /// Create invocation bounds. The lower bound must be at least 0 and only the /// upper bound can be unknown. - InvocationBounds(unsigned lb, Optional ub) : lower(lb), upper(ub) { + InvocationBounds(unsigned lb, std::optional ub) + : lower(lb), upper(ub) { assert((!ub || ub >= lb) && "upper bound cannot be less than lower bound"); } @@ -198,7 +199,7 @@ unsigned getLowerBound() const { return lower; } /// Return the upper bound. - Optional getUpperBound() const { return upper; } + std::optional getUpperBound() const { return upper; } /// Returns the unknown invocation bounds, i.e., there is no information on /// how many times a region may be invoked. @@ -209,7 +210,7 @@ unsigned lower; /// The maximum number of times the successor region will be invoked or `None` /// if an upper bound is not known. - Optional upper; + std::optional upper; }; /// Return `true` if `a` and `b` are in mutually exclusive regions as per @@ -241,16 +242,16 @@ /// `OperandRange` represents all operands that are passed to the specified /// successor region. If `regionIndex` is `std::nullopt`, all operands that are /// passed to the parent operation will be returned. -Optional +std::optional getMutableRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex); + std::optional regionIndex); /// Returns the read only operands that are passed to the region with the given /// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more /// information. -Optional +std::optional getRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex); + std::optional regionIndex); //===----------------------------------------------------------------------===// // ControlFlow Traits diff --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td --- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td +++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td @@ -70,11 +70,11 @@ some successor, or None if `operandIndex` isn't a successor operand index. }], - "::llvm::Optional<::mlir::BlockArgument>", "getSuccessorBlockArgument", + "::std::optional<::mlir::BlockArgument>", "getSuccessorBlockArgument", (ins "unsigned":$operandIndex), [{ ::mlir::Operation *opaqueOp = $_op; for (unsigned i = 0, e = opaqueOp->getNumSuccessors(); i != e; ++i) { - if (::llvm::Optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument( + if (::std::optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument( $_op.getSuccessorOperands(i), operandIndex, opaqueOp->getSuccessor(i))) return arg; @@ -140,7 +140,7 @@ `getSuccessorRegions`. }], "::mlir::OperandRange", "getSuccessorEntryOperands", - (ins "::llvm::Optional":$index), [{}], + (ins "::std::optional":$index), [{}], /*defaultImplementation=*/[{ auto operandEnd = this->getOperation()->operand_end(); return ::mlir::OperandRange(operandEnd, operandEnd); @@ -161,7 +161,7 @@ successor region must be non-empty. }], "void", "getSuccessorRegions", - (ins "::llvm::Optional":$index, + (ins "::std::optional":$index, "::llvm::ArrayRef<::mlir::Attribute>":$operands, "::llvm::SmallVectorImpl<::mlir::RegionSuccessor> &":$regions) >, @@ -208,7 +208,7 @@ let extraClassDeclaration = [{ /// Convenience helper in case none of the operands is known. - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, SmallVectorImpl ®ions); /// Return `true` if control flow originating from the given region may @@ -239,7 +239,7 @@ the parent operation. }], "::mlir::MutableOperandRange", "getMutableSuccessorOperands", - (ins "::llvm::Optional":$index) + (ins "::std::optional":$index) >, InterfaceMethod<[{ Returns a range of operands that are semantically "returned" by passing @@ -248,7 +248,7 @@ operation. }], "::mlir::OperandRange", "getSuccessorOperands", - (ins "::llvm::Optional":$index), [{}], + (ins "::std::optional":$index), [{}], /*defaultImplementation=*/[{ return $_op.getMutableSuccessorOperands(index); }] diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h @@ -235,12 +235,13 @@ // TODO: Consider generating typedefs for trait member functions if this usage // becomes more common. LogicalResult inferReturnTensorTypes( - function_ref location, ValueShapeRange operands, - DictionaryAttr attributes, RegionRange regions, - SmallVectorImpl &retComponents)> + function_ref< + LogicalResult(MLIRContext *, std::optional location, + ValueShapeRange operands, DictionaryAttr attributes, + RegionRange regions, + SmallVectorImpl &retComponents)> componentTypeFn, - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes); @@ -272,7 +273,7 @@ class InferTensorType : public TraitBase { public: static LogicalResult - inferReturnTypes(MLIRContext *context, Optional location, + inferReturnTypes(MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td @@ -41,7 +41,7 @@ /*retTy=*/"::mlir::LogicalResult", /*methodName=*/"inferReturnTypes", /*args=*/(ins "::mlir::MLIRContext *":$context, - "::llvm::Optional<::mlir::Location>":$location, + "::std::optional<::mlir::Location>":$location, "::mlir::ValueRange":$operands, "::mlir::DictionaryAttr":$attributes, "::mlir::RegionRange":$regions, @@ -72,7 +72,7 @@ /*retTy=*/"::mlir::LogicalResult", /*methodName=*/"refineReturnTypes", /*args=*/(ins "::mlir::MLIRContext *":$context, - "::llvm::Optional<::mlir::Location>":$location, + "::std::optional<::mlir::Location>":$location, "::mlir::ValueRange":$operands, "::mlir::DictionaryAttr":$attributes, "::mlir::RegionRange":$regions, @@ -144,7 +144,7 @@ /*retTy=*/"::mlir::LogicalResult", /*methodName=*/"inferReturnTypeComponents", /*args=*/(ins "::mlir::MLIRContext*":$context, - "::llvm::Optional<::mlir::Location>":$location, + "::std::optional<::mlir::Location>":$location, "::mlir::ValueShapeRange":$operands, "::mlir::DictionaryAttr":$attributes, "::mlir::RegionRange":$regions, diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.td b/mlir/include/mlir/Interfaces/LoopLikeInterface.td --- a/mlir/include/mlir/Interfaces/LoopLikeInterface.td +++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.td @@ -52,7 +52,7 @@ If there is a single induction variable return it, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::Value>", + /*retTy=*/"::std::optional<::mlir::Value>", /*methodName=*/"getSingleInductionVar", /*args=*/(ins), /*methodBody=*/"", @@ -64,7 +64,7 @@ Return the single lower bound value or attribute if it exists, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>", + /*retTy=*/"::std::optional<::mlir::OpFoldResult>", /*methodName=*/"getSingleLowerBound", /*args=*/(ins), /*methodBody=*/"", @@ -76,7 +76,7 @@ Return the single step value or attribute if it exists, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>", + /*retTy=*/"::std::optional<::mlir::OpFoldResult>", /*methodName=*/"getSingleStep", /*args=*/(ins), /*methodBody=*/"", @@ -88,7 +88,7 @@ Return the single upper bound value or attribute if it exists, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>", + /*retTy=*/"::std::optional<::mlir::OpFoldResult>", /*methodName=*/"getSingleUpperBound", /*args=*/(ins), /*methodBody=*/"", diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td --- a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td @@ -106,7 +106,7 @@ /// Return the effect of the given type `Effect` that is applied to the /// given value, or None if no effect exists. template - ::llvm::Optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>> + ::std::optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>> getEffectOnValue(::mlir::Value value) { llvm::SmallVector<::mlir::SideEffects::EffectInstance< }] # baseEffect # [{>, 4> effects; diff --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td --- a/mlir/include/mlir/Interfaces/VectorInterfaces.td +++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td @@ -28,7 +28,7 @@ `targetShape`. Return `None` if the op cannot be unrolled to the target vector shape. }], - /*retTy=*/"::llvm::Optional<::llvm::SmallVector>", + /*retTy=*/"::std::optional<::llvm::SmallVector>", /*methodName=*/"getShapeForUnroll", /*args=*/(ins), /*methodBody=*/"", @@ -143,7 +143,7 @@ >, InterfaceMethod< /*desc=*/"Return the `in_bounds` boolean ArrayAttr.", - /*retTy=*/"::llvm::Optional<::mlir::ArrayAttr>", + /*retTy=*/"::std::optional<::mlir::ArrayAttr>", /*methodName=*/"in_bounds", /*args=*/(ins), /*methodBody=*/"return $_op.getInBounds();" diff --git a/mlir/include/mlir/TableGen/Attribute.h b/mlir/include/mlir/TableGen/Attribute.h --- a/mlir/include/mlir/TableGen/Attribute.h +++ b/mlir/include/mlir/TableGen/Attribute.h @@ -58,7 +58,7 @@ // Return the type constraint corresponding to the type of this attribute, or // None if this is not a TypedAttr. - llvm::Optional getValueType() const; + std::optional getValueType() const; // Returns the template getter method call which reads this attribute's // storage and returns the value as of the desired return type. diff --git a/mlir/include/mlir/TableGen/CodeGenHelpers.h b/mlir/include/mlir/TableGen/CodeGenHelpers.h --- a/mlir/include/mlir/TableGen/CodeGenHelpers.h +++ b/mlir/include/mlir/TableGen/CodeGenHelpers.h @@ -143,7 +143,8 @@ /// LogicalResult(PatternRewriter &rewriter, Operation *op, Attribute attr, /// StringRef failureStr); /// - Optional getAttrConstraintFn(const Constraint &constraint) const; + std::optional + getAttrConstraintFn(const Constraint &constraint) const; /// Get the name of the static function used for the given successor /// constraint. These functions are in the form: diff --git a/mlir/include/mlir/TableGen/Dialect.h b/mlir/include/mlir/TableGen/Dialect.h --- a/mlir/include/mlir/TableGen/Dialect.h +++ b/mlir/include/mlir/TableGen/Dialect.h @@ -50,7 +50,7 @@ ArrayRef getDependentDialects() const; // Returns the dialects extra class declaration code. - llvm::Optional getExtraClassDeclaration() const; + std::optional getExtraClassDeclaration() const; /// Returns true if this dialect has a canonicalizer. bool hasCanonicalizer() const; diff --git a/mlir/include/mlir/TableGen/Interfaces.h b/mlir/include/mlir/TableGen/Interfaces.h --- a/mlir/include/mlir/TableGen/Interfaces.h +++ b/mlir/include/mlir/TableGen/Interfaces.h @@ -43,13 +43,13 @@ bool isStatic() const; // Return the body for this method if it has one. - llvm::Optional getBody() const; + std::optional getBody() const; // Return the default implementation for this method if it has one. - llvm::Optional getDefaultImplementation() const; + std::optional getDefaultImplementation() const; // Return the description of this method if it has one. - llvm::Optional getDescription() const; + std::optional getDescription() const; // Arguments. ArrayRef getArguments() const; @@ -83,20 +83,20 @@ ArrayRef getMethods() const; // Return the description of this method if it has one. - llvm::Optional getDescription() const; + std::optional getDescription() const; // Return the interfaces extra class declaration code. - llvm::Optional getExtraClassDeclaration() const; + std::optional getExtraClassDeclaration() const; // Return the traits extra class declaration code. - llvm::Optional getExtraTraitClassDeclaration() const; + std::optional getExtraTraitClassDeclaration() const; // Return the extra class declaration code shared between the interface and // trait classes. - llvm::Optional getExtraSharedClassDeclaration() const; + std::optional getExtraSharedClassDeclaration() const; // Return the verify method body if it has one. - llvm::Optional getVerify() const; + std::optional getVerify() const; // If there's a verify method, return if it needs to access the ops in the // regions. diff --git a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp --- a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp +++ b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp @@ -44,7 +44,7 @@ // this region predecessor that correspond to the input values of `region`. If // an index could not be found, None is returned instead. auto getOperandIndexIfPred = - [&](Optional predIndex) -> Optional { + [&](std::optional predIndex) -> std::optional { SmallVector successors; branch.getSuccessorRegions(predIndex, successors); for (RegionSuccessor &successor : successors) { @@ -74,13 +74,12 @@ }; // Check branches from the parent operation. - Optional regionIndex; + std::optional regionIndex; if (region) { // Determine the actual region number from the passed region. regionIndex = region->getRegionNumber(); } - if (Optional operandIndex = - getOperandIndexIfPred(/*predIndex=*/std::nullopt)) { + if (auto operandIndex = getOperandIndexIfPred(/*predIndex=*/std::nullopt)) { collectUnderlyingAddressValues( branch.getSuccessorEntryOperands(regionIndex)[*operandIndex], maxDepth, visited, output); @@ -88,7 +87,7 @@ // Check branches from each child region. Operation *op = branch.getOperation(); for (int i = 0, e = op->getNumRegions(); i != e; ++i) { - if (Optional operandIndex = getOperandIndexIfPred(i)) { + if (auto operandIndex = getOperandIndexIfPred(i)) { for (Block &block : op->getRegion(i)) { Operation *term = block.getTerminator(); // Try to determine possible region-branch successor operands for the @@ -210,7 +209,8 @@ /// non-null it specifies the parent operation that the allocation does not /// escape. If no scope is found, `allocScopeOp` is set to nullptr. static LogicalResult -getAllocEffectFor(Value value, Optional &effect, +getAllocEffectFor(Value value, + std::optional &effect, Operation *&allocScopeOp) { // Try to get a memory effect interface for the parent operation. Operation *op; @@ -248,7 +248,7 @@ if (lhs == rhs) return AliasResult::MustAlias; Operation *lhsAllocScope = nullptr, *rhsAllocScope = nullptr; - Optional lhsAlloc, rhsAlloc; + std::optional lhsAlloc, rhsAlloc; // Handle the case where lhs is a constant. Attribute lhsAttr, rhsAttr; @@ -330,7 +330,7 @@ return AliasResult::MayAlias; // Check the alias results against each of the underlying values. - Optional result; + std::optional result; for (Value lhsVal : lhsValues) { for (Value rhsVal : rhsValues) { AliasResult nextResult = aliasImpl(lhsVal, rhsVal); diff --git a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp @@ -37,7 +37,7 @@ // If the integer range can be narrowed to a constant, update the constant // value of the SSA value. - Optional constant = getValue().getValue().getConstantValue(); + auto constant = getValue().getValue().getConstantValue(); auto value = point.get(); auto *cv = solver->getOrCreateState>(value); if (!constant) @@ -165,7 +165,7 @@ /// Given the results of getConstant{Lower,Upper}Bound() or getConstantStep() /// on a LoopLikeInterface return the lower/upper bound for that result if /// possible. - auto getLoopBoundFromFold = [&](Optional loopBound, + auto getLoopBoundFromFold = [&](std::optional loopBound, Type boundType, bool getUpper) { unsigned int width = ConstantIntRanges::getStorageBitwidth(boundType); if (loopBound.has_value()) { @@ -190,14 +190,14 @@ // Infer bounds for loop arguments that have static bounds if (auto loop = dyn_cast(op)) { - Optional iv = loop.getSingleInductionVar(); + auto iv = loop.getSingleInductionVar(); if (!iv) { return SparseDataFlowAnalysis ::visitNonControlFlowArguments( op, successor, argLattices, firstIndex); } - Optional lowerBound = loop.getSingleLowerBound(); - Optional upperBound = loop.getSingleUpperBound(); - Optional step = loop.getSingleStep(); + auto lowerBound = loop.getSingleLowerBound(); + auto upperBound = loop.getSingleUpperBound(); + auto step = loop.getSingleStep(); APInt min = getLoopBoundFromFold(lowerBound, iv->getType(), /*getUpper=*/false); APInt max = getLoopBoundFromFold(upperBound, iv->getType(), diff --git a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp @@ -207,7 +207,7 @@ void AbstractSparseDataFlowAnalysis::visitRegionSuccessors( ProgramPoint point, RegionBranchOpInterface branch, - Optional successorIndex, + std::optional successorIndex, ArrayRef lattices) { const auto *predecessors = getOrCreateFor(point, point); assert(predecessors->allPredecessorsKnown() && @@ -215,7 +215,7 @@ for (Operation *op : predecessors->getKnownPredecessors()) { // Get the incoming successor operands. - Optional operands; + std::optional operands; // Check if the predecessor is the parent op. if (op == branch) { diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -1870,7 +1870,7 @@ if (iface && !iface->getDefaultDialect().empty()) defaultDialect = iface->getDefaultDialect(); } else { - Optional dialectHook; + std::optional dialectHook; Dialect *dialect = opNameInfo->getDialect(); if (!dialect) { InFlightDiagnostic diag = diff --git a/mlir/lib/CAPI/Interfaces/Interfaces.cpp b/mlir/lib/CAPI/Interfaces/Interfaces.cpp --- a/mlir/lib/CAPI/Interfaces/Interfaces.cpp +++ b/mlir/lib/CAPI/Interfaces/Interfaces.cpp @@ -46,7 +46,7 @@ if (!info) return mlirLogicalResultFailure(); - llvm::Optional maybeLocation; + std::optional maybeLocation; if (!mlirLocationIsNull(location)) maybeLocation = unwrap(location); SmallVector unwrappedOperands; diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp --- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp +++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp @@ -223,9 +223,8 @@ // initialization of the result values. Attribute reduction = std::get<0>(pair); Type resultType = std::get<1>(pair); - Optional reductionOp = - arith::symbolizeAtomicRMWKind( - static_cast(reduction.cast().getInt())); + auto reductionOp = arith::symbolizeAtomicRMWKind( + static_cast(reduction.cast().getInt())); assert(reductionOp && "Reduction operation cannot be of None Type"); arith::AtomicRMWKind reductionOpValue = *reductionOp; identityVals.push_back( @@ -243,9 +242,8 @@ "Unequal number of reductions and operands."); for (unsigned i = 0, end = reductions.size(); i < end; i++) { // For each of the reduction operations get the respective mlir::Value. - Optional reductionOp = - arith::symbolizeAtomicRMWKind( - reductions[i].cast().getInt()); + auto reductionOp = arith::symbolizeAtomicRMWKind( + reductions[i].cast().getInt()); assert(reductionOp && "Reduction Operation cannot be of None Type"); arith::AtomicRMWKind reductionOpValue = *reductionOp; rewriter.setInsertionPoint(&parOp.getBody()->back()); diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -441,8 +441,8 @@ return rewriter.create(loc, sizePtr); } - Optional getConstantDimIndex(memref::DimOp dimOp) const { - if (Optional idx = dimOp.getConstantIndex()) + std::optional getConstantDimIndex(memref::DimOp dimOp) const { + if (auto idx = dimOp.getConstantIndex()) return idx; if (auto constantOp = dimOp.getIndex().getDefiningOp()) @@ -461,7 +461,7 @@ // Take advantage if index is constant. MemRefType memRefType = operandType.cast(); - if (Optional index = getConstantDimIndex(dimOp)) { + if (auto index = getConstantDimIndex(dimOp)) { int64_t i = *index; if (memRefType.isDynamicDim(i)) { // extract dynamic size from the memref descriptor. diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp --- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp +++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp @@ -298,14 +298,14 @@ FailureOr ptxTypeB = getNvvmMmaType(bType); if (failed(ptxTypeB)) return op->emitOpError("failed to deduce operand PTX types"); - Optional ptxTypeC = NVVM::MmaOp::inferOperandMMAType( - cType.getElementType(), /*isAccumulator=*/true); + auto ptxTypeC = NVVM::MmaOp::inferOperandMMAType(cType.getElementType(), + /*isAccumulator=*/true); if (!ptxTypeC) return op->emitError( "could not infer the PTX type for the accumulator/result"); // TODO: add an attribute to the op to customize this behavior. - Optional overflow(std::nullopt); + std::optional overflow(std::nullopt); if (aType.getElementType().isa()) overflow = NVVM::MMAIntOverflow::satfinite; @@ -322,7 +322,7 @@ Value intrinsicResult = rewriter.create( op.getLoc(), intrinsicResTy, matA, matB, matC, /*shape=*/gemmShape, - /*b1Op=*/std::nullopt, + /*b1Op=*/llvm::None, /*intOverflow=*/overflow, /*multiplicandPtxTypes=*/ std::array{*ptxTypeA, *ptxTypeB}, @@ -413,7 +413,7 @@ unsigned matBSize, unsigned matCSize, NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB, NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD, - Optional overflow) { + std::optional overflow) { auto ptxTypeStr = [](NVVM::MMATypes ptxType) { return NVVM::stringifyMMATypes(ptxType); }; @@ -449,7 +449,7 @@ static FailureOr emitMmaSparseSyncOpAsm( Location loc, NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB, NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD, - Optional overflow, ArrayRef unpackedAData, + std::optional overflow, ArrayRef unpackedAData, ArrayRef unpackedB, ArrayRef unpackedC, Value indexData, int64_t metadataSelector, const std::array &shape, Type intrinsicResultType, ConversionPatternRewriter &rewriter) { @@ -505,8 +505,8 @@ FailureOr ptxTypeB = getNvvmMmaType(bType); if (failed(ptxTypeB)) return op->emitOpError("failed to deduce operand PTX types"); - Optional ptxTypeC = NVVM::MmaOp::inferOperandMMAType( - cType.getElementType(), /*isAccumulator=*/true); + auto ptxTypeC = NVVM::MmaOp::inferOperandMMAType(cType.getElementType(), + /*isAccumulator=*/true); if (!ptxTypeC) return op->emitError( "could not infer the PTX type for the accumulator/result"); @@ -517,7 +517,7 @@ return failure(); // TODO: add an attribute to the op to customize this behavior. - Optional overflow(std::nullopt); + std::optional overflow(std::nullopt); if (aType.getElementType().isa()) overflow = NVVM::MMAIntOverflow::satfinite; diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -595,7 +595,7 @@ // Grab the root kind if present. StringAttr rootKindAttr; if (pdl::OperationOp rootOp = root.getDefiningOp()) - if (Optional rootKind = rootOp.getOpName()) + if (auto rootKind = rootOp.getOpName()) rootKindAttr = builder.getStringAttr(*rootKind); builder.setInsertionPointToEnd(currentBlock); diff --git a/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h b/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h --- a/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h +++ b/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h @@ -222,7 +222,7 @@ struct OperandGroupPosition : public PredicateBase< OperandGroupPosition, Position, - std::tuple, bool>, + std::tuple, bool>, Predicates::OperandGroupPos> { explicit OperandGroupPosition(const KeyTy &key); @@ -233,7 +233,9 @@ /// Returns the group number of this position. If None, this group refers to /// all operands. - Optional getOperandGroupNumber() const { return std::get<1>(key); } + std::optional getOperandGroupNumber() const { + return std::get<1>(key); + } /// Returns if the operand group has unknown size. If false, the operand group /// has at max one element. @@ -298,7 +300,7 @@ struct ResultGroupPosition : public PredicateBase< ResultGroupPosition, Position, - std::tuple, bool>, + std::tuple, bool>, Predicates::ResultGroupPos> { explicit ResultGroupPosition(const KeyTy &key) : Base(key) { parent = std::get<0>(key); @@ -311,7 +313,9 @@ /// Returns the group number of this position. If None, this group refers to /// all results. - Optional getResultGroupNumber() const { return std::get<1>(key); } + std::optional getResultGroupNumber() const { + return std::get<1>(key); + } /// Returns if the result group has unknown size. If false, the result group /// has at max one element. @@ -595,7 +599,7 @@ } /// Returns a position for a group of operands of the given operation. - Position *getOperandGroup(OperationPosition *p, Optional group, + Position *getOperandGroup(OperationPosition *p, std::optional group, bool isVariadic) { return OperandGroupPosition::get(uniquer, p, group, isVariadic); } @@ -609,7 +613,7 @@ } /// Returns a position for a group of results of the given operation. - Position *getResultGroup(OperationPosition *p, Optional group, + Position *getResultGroup(OperationPosition *p, std::optional group, bool isVariadic) { return ResultGroupPosition::get(uniquer, p, group, isVariadic); } diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp @@ -81,7 +81,7 @@ builder.getType(pos)); }) .Case([&](auto op) { - Optional index = op.getIndex(); + std::optional index = op.getIndex(); // Prevent traversal into a null value if the result has a proper index. if (index) @@ -106,11 +106,11 @@ }); } -static void getTreePredicates(std::vector &predList, - Value val, PredicateBuilder &builder, - DenseMap &inputs, - OperationPosition *pos, - Optional ignoreOperand = std::nullopt) { +static void +getTreePredicates(std::vector &predList, Value val, + PredicateBuilder &builder, + DenseMap &inputs, OperationPosition *pos, + std::optional ignoreOperand = std::nullopt) { assert(val.getType().isa() && "expected operation"); pdl::OperationOp op = cast(val.getDefiningOp()); OperationPosition *opPos = cast(pos); @@ -120,7 +120,7 @@ predList.emplace_back(pos, builder.getIsNotNull()); // Check that this is the correct root operation. - if (Optional opName = op.getOpName()) + if (auto opName = op.getOpName()) predList.emplace_back(pos, builder.getOperationName(*opName)); // Check that the operation has the proper number of operands. If there are @@ -302,7 +302,7 @@ // Ensure that the result isn't null if the result has an index. auto *parentPos = cast(inputs.lookup(op.getParent())); bool isVariadic = op.getType().isa(); - Optional index = op.getIndex(); + auto index = op.getIndex(); resultPos = builder.getResultGroup(parentPos, index, isVariadic); if (index) predList.emplace_back(resultPos, builder.getIsNotNull()); @@ -356,7 +356,7 @@ /// An op accepting a value at an optional index. struct OpIndex { Value parent; - Optional index; + std::optional index; }; /// The parent and operand index of each operation for each root, stored @@ -408,12 +408,13 @@ // * the operand index of the value in its parent; // * the depth of the visited value. struct Entry { - Entry(Value value, Value parent, Optional index, unsigned depth) + Entry(Value value, Value parent, std::optional index, + unsigned depth) : value(value), parent(parent), index(index), depth(depth) {} Value value; Value parent; - Optional index; + std::optional index; unsigned depth; }; diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -335,7 +335,7 @@ // The dim op is also okay if its operand memref is a view/subview whose // corresponding size is a valid symbol. - Optional index = getConstantIntValue(dimOp.getDimension()); + auto index = getConstantIntValue(dimOp.getDimension()); assert(index.has_value() && "expect only `dim` operations with a constant index"); int64_t i = index.value(); @@ -918,7 +918,7 @@ static std::enable_if_t(), OpFoldResult> createOrFold(OpBuilder &b, Location loc, ValueRange operands, - Args &&... leadingArguments) { + Args &&...leadingArguments) { // Identify the constant operands and extract their values as attributes. // Note that we cannot use the original values directly because the list of // operands may have changed due to canonicalization and composition. @@ -2004,10 +2004,10 @@ namespace { /// Returns constant trip count in trivial cases. -static Optional getTrivialConstantTripCount(AffineForOp forOp) { +static std::optional getTrivialConstantTripCount(AffineForOp forOp) { int64_t step = forOp.getStep(); if (!forOp.hasConstantBounds() || step <= 0) - return None; + return std::nullopt; int64_t lb = forOp.getConstantLowerBound(); int64_t ub = forOp.getConstantUpperBound(); return ub - lb <= 0 ? 0 : (ub - lb + step - 1) / step; @@ -2025,7 +2025,7 @@ return failure(); if (forOp.getNumResults() == 0) return success(); - Optional tripCount = getTrivialConstantTripCount(forOp); + auto tripCount = getTrivialConstantTripCount(forOp); if (tripCount && *tripCount == 0) { // The initial values of the iteration arguments would be the op's // results. @@ -2077,7 +2077,8 @@ /// correspond to the loop iterator operands, i.e., those excluding the /// induction variable. AffineForOp only has one region, so zero is the only /// valid value for `index`. -OperandRange AffineForOp::getSuccessorEntryOperands(Optional index) { +OperandRange +AffineForOp::getSuccessorEntryOperands(std::optional index) { assert((!index || *index == 0) && "invalid region index"); // The initial operands map to the loop arguments after the induction @@ -2091,14 +2092,14 @@ /// correspond to a constant value for each operand, or null if that operand is /// not a constant. void AffineForOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { assert((!index.has_value() || index.value() == 0) && "expected loop region"); // The loop may typically branch back to its body or to the parent operation. // If the predecessor is the parent op and the trip count is known to be at // least one, branch into the body using the iterator arguments. And in cases // we know the trip count is zero, it can only branch back to its parent. - Optional tripCount = getTrivialConstantTripCount(*this); + auto tripCount = getTrivialConstantTripCount(*this); if (!index.has_value() && tripCount.has_value()) { if (tripCount.value() > 0) { regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs())); @@ -2125,7 +2126,7 @@ /// Returns true if the affine.for has zero iterations in trivial cases. static bool hasTrivialZeroTripCount(AffineForOp op) { - Optional tripCount = getTrivialConstantTripCount(op); + auto tripCount = getTrivialConstantTripCount(op); return tripCount && *tripCount == 0; } @@ -2257,23 +2258,23 @@ Region &AffineForOp::getLoopBody() { return getRegion(); } -Optional AffineForOp::getSingleInductionVar() { +std::optional AffineForOp::getSingleInductionVar() { return getInductionVar(); } -Optional AffineForOp::getSingleLowerBound() { +std::optional AffineForOp::getSingleLowerBound() { if (!hasConstantLowerBound()) return std::nullopt; OpBuilder b(getContext()); return OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound())); } -Optional AffineForOp::getSingleStep() { +std::optional AffineForOp::getSingleStep() { OpBuilder b(getContext()); return OpFoldResult(b.getI64IntegerAttr(getStep())); } -Optional AffineForOp::getSingleUpperBound() { +std::optional AffineForOp::getSingleUpperBound() { if (!hasConstantUpperBound()) return std::nullopt; OpBuilder b(getContext()); @@ -2365,8 +2366,8 @@ buildAffineLoopFromConstants(OpBuilder &builder, Location loc, int64_t lb, int64_t ub, int64_t step, AffineForOp::BodyBuilderFn bodyBuilderFn) { - return builder.create(loc, lb, ub, step, /*iterArgs=*/std::nullopt, - bodyBuilderFn); + return builder.create(loc, lb, ub, step, + /*iterArgs=*/std::nullopt, bodyBuilderFn); } /// Creates an affine loop from the bounds that may or may not be constants. @@ -2523,7 +2524,7 @@ /// AffineIfOp has two regions -- `then` and `else`. The flow of data should be /// as follows: AffineIfOp -> `then`/`else` -> AffineIfOp void AffineIfOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // If the predecessor is an AffineIfOp, then branching into both `then` and // `else` region is valid. @@ -3549,7 +3550,7 @@ return AffineValueMap(getUpperBoundsMap(), getUpperBoundsOperands()); } -Optional> AffineParallelOp::getConstantRanges() { +std::optional> AffineParallelOp::getConstantRanges() { if (hasMinMaxBounds()) return std::nullopt; @@ -3967,8 +3968,7 @@ if (parser.parseAttribute(attrVal, builder.getNoneType(), "reduce", attrStorage)) return failure(); - llvm::Optional reduction = - arith::symbolizeAtomicRMWKind(attrVal.getValue()); + auto reduction = arith::symbolizeAtomicRMWKind(attrVal.getValue()); if (!reduction) return parser.emitError(loc, "invalid reduction value: ") << attrVal; reductions.push_back( @@ -4213,7 +4213,7 @@ result.addOperands(linearIndex); SmallVector basisValues = llvm::to_vector(llvm::map_range(basis, [&](OpFoldResult ofr) -> Value { - Optional staticDim = getConstantIntValue(ofr); + auto staticDim = getConstantIntValue(ofr); if (staticDim.has_value()) return builder.create(result.location, *staticDim); diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp --- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp +++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp @@ -222,10 +222,11 @@ // AddUICarryOp //===----------------------------------------------------------------------===// -Optional> arith::AddUICarryOp::getShapeForUnroll() { +std::optional> +arith::AddUICarryOp::getShapeForUnroll() { if (auto vt = getType(0).dyn_cast()) return llvm::to_vector<4>(vt.getShape()); - return None; + return std::nullopt; } // Returns the carry bit, assuming that `sum` is the result of addition of @@ -1502,7 +1503,7 @@ return DenseElementsAttr::get(shapedType, boolAttr); } -static Optional getIntegerWidth(Type t) { +static std::optional getIntegerWidth(Type t) { if (auto intType = t.dyn_cast()) { return intType.getWidth(); } @@ -1524,16 +1525,14 @@ if (matchPattern(getRhs(), m_Zero())) { if (auto extOp = getLhs().getDefiningOp()) { // extsi(%x : i1 -> iN) != 0 -> %x - Optional integerWidth = - getIntegerWidth(extOp.getOperand().getType()); + auto integerWidth = getIntegerWidth(extOp.getOperand().getType()); if (integerWidth && integerWidth.value() == 1 && getPredicate() == arith::CmpIPredicate::ne) return extOp.getOperand(); } if (auto extOp = getLhs().getDefiningOp()) { // extui(%x : i1 -> iN) != 0 -> %x - Optional integerWidth = - getIntegerWidth(extOp.getOperand().getType()); + auto integerWidth = getIntegerWidth(extOp.getOperand().getType()); if (integerWidth && integerWidth.value() == 1 && getPredicate() == arith::CmpIPredicate::ne) return extOp.getOperand(); diff --git a/mlir/lib/Dialect/Async/IR/Async.cpp b/mlir/lib/Dialect/Async/IR/Async.cpp --- a/mlir/lib/Dialect/Async/IR/Async.cpp +++ b/mlir/lib/Dialect/Async/IR/Async.cpp @@ -53,7 +53,7 @@ } MutableOperandRange -YieldOp::getMutableSuccessorOperands(Optional index) { +YieldOp::getMutableSuccessorOperands(std::optional index) { return operandsMutable(); } @@ -63,7 +63,8 @@ constexpr char kOperandSegmentSizesAttr[] = "operand_segment_sizes"; -OperandRange ExecuteOp::getSuccessorEntryOperands(Optional index) { +OperandRange +ExecuteOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index == 0 && "invalid region index"); return getBodyOperands(); } @@ -77,7 +78,7 @@ return getValueOrTokenType(lhs) == getValueOrTokenType(rhs); } -void ExecuteOp::getSuccessorRegions(Optional index, +void ExecuteOp::getSuccessorRegions(std::optional index, ArrayRef, SmallVectorImpl ®ions) { // The `body` region branch back to the parent operation. diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -348,7 +348,7 @@ LogicalResult matchAndRewrite(tensor::DimOp dimOp, PatternRewriter &rewriter) const override { - Optional maybeConstantIndex = dimOp.getConstantIndex(); + auto maybeConstantIndex = dimOp.getConstantIndex(); auto allocTensorOp = dimOp.getSource().getDefiningOp(); if (!allocTensorOp || !maybeConstantIndex) return failure(); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp @@ -104,7 +104,7 @@ successorRegions); for (RegionSuccessor &successorRegion : successorRegions) { // Determine the current region index (if any). - Optional regionIndex; + std::optional regionIndex; Region *regionSuccessor = successorRegion.getSuccessor(); if (regionSuccessor) regionIndex = regionSuccessor->getRegionNumber(); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -239,7 +239,7 @@ } const FuncAnalysisState &funcState = getFuncAnalysisState(state); - Optional maybeEquiv = + auto maybeEquiv = getEquivalentFuncArgIdx(funcOp, funcState, opResult.getResultNumber()); if (maybeEquiv) { #ifndef NDEBUG diff --git a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp --- a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp +++ b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp @@ -595,7 +595,7 @@ } Block *SwitchOp::getSuccessorForOperands(ArrayRef operands) { - Optional caseValues = getCaseValues(); + auto caseValues = getCaseValues(); if (!caseValues) return getDefaultDestination(); @@ -805,7 +805,7 @@ SuccessorRange predDests = predSwitch.getCaseDestinations(); auto it = llvm::find(predDests, currentBlock); if (it != predDests.end()) { - Optional predCaseValues = predSwitch.getCaseValues(); + auto predCaseValues = predSwitch.getCaseValues(); foldSwitch(op, rewriter, predCaseValues->getValues()[it - predDests.begin()]); } else { diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp --- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp +++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp @@ -84,7 +84,7 @@ if (getCallee().empty()) return emitOpError("callee must not be empty"); - if (Optional argsAttr = getArgs()) { + if (auto argsAttr = getArgs()) { for (Attribute arg : *argsAttr) { auto intAttr = arg.dyn_cast(); if (intAttr && intAttr.getType().isa()) { @@ -102,7 +102,7 @@ } } - if (Optional templateArgsAttr = getTemplateArgs()) { + if (auto templateArgsAttr = getTemplateArgs()) { for (Attribute tArg : *templateArgsAttr) { if (!tArg.isa()) return emitOpError("template argument has invalid type"); diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -367,7 +367,7 @@ AllReduceOperationAttr &attr) { StringRef enumStr; if (!parser.parseOptionalKeyword(&enumStr)) { - Optional op = gpu::symbolizeAllReduceOperation(enumStr); + auto op = gpu::symbolizeAllReduceOperation(enumStr); if (!op) return parser.emitError(parser.getCurrentLocation(), "invalid op kind"); attr = AllReduceOperationAttr::get(parser.getContext(), *op); diff --git a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp --- a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp +++ b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp @@ -37,8 +37,8 @@ /// Check if given mapping attributes are one of the desired attributes static DiagnosedSilenceableFailure checkAttributeType(ArrayRef threadMappingAttributes, - const Optional &foreachMapping, - Optional transformOp) { + const std::optional &foreachMapping, + std::optional transformOp) { if (!foreachMapping.has_value()) return transformOp->emitSilenceableError() << "mapping must be present"; @@ -63,11 +63,11 @@ /// Determines if the size of the kernel configuration is supported by the GPU /// architecture being used. It presently makes use of CUDA limitations, however /// that aspect may be enhanced for other GPUs. -static DiagnosedSilenceableFailure -checkGpuLimits(TransformOpInterface transformOp, Optional gridDimX, - Optional gridDimY, Optional gridDimZ, - Optional blockDimX, Optional blockDimY, - Optional blockDimZ) { +static DiagnosedSilenceableFailure checkGpuLimits( + TransformOpInterface transformOp, std::optional gridDimX, + std::optional gridDimY, std::optional gridDimZ, + std::optional blockDimX, std::optional blockDimY, + std::optional blockDimZ) { static constexpr int maxTotalBlockdim = 1024; static constexpr int maxBlockdimx = 1024; @@ -103,12 +103,12 @@ static DiagnosedSilenceableFailure createGpuLaunch(RewriterBase &rewriter, Location loc, TransformOpInterface transformOp, LaunchOp &launchOp, - Optional gridDimX = std::nullopt, - Optional gridDimY = std::nullopt, - Optional gridDimZ = std::nullopt, - Optional blockDimX = std::nullopt, - Optional blockDimY = std::nullopt, - Optional blockDimZ = std::nullopt) { + std::optional gridDimX = std::nullopt, + std::optional gridDimY = std::nullopt, + std::optional gridDimZ = std::nullopt, + std::optional blockDimX = std::nullopt, + std::optional blockDimY = std::nullopt, + std::optional blockDimZ = std::nullopt) { DiagnosedSilenceableFailure diag = checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ); @@ -137,12 +137,12 @@ static DiagnosedSilenceableFailure alterGpuLaunch(SimpleRewriter &rewriter, LaunchOp gpuLaunch, TransformOpInterface transformOp, - Optional gridDimX = std::nullopt, - Optional gridDimY = std::nullopt, - Optional gridDimZ = std::nullopt, - Optional blockDimX = std::nullopt, - Optional blockDimY = std::nullopt, - Optional blockDimZ = std::nullopt) { + std::optional gridDimX = std::nullopt, + std::optional gridDimY = std::nullopt, + std::optional gridDimZ = std::nullopt, + std::optional blockDimX = std::nullopt, + std::optional blockDimY = std::nullopt, + std::optional blockDimZ = std::nullopt) { DiagnosedSilenceableFailure diag = checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ); @@ -381,7 +381,7 @@ static DiagnosedSilenceableFailure rewriteOneForeachThreadToGpuThreads( RewriterBase &rewriter, scf::ForeachThreadOp foreachThreadOp, const SmallVectorImpl &globalBlockDims, bool syncAfterDistribute, - llvm::Optional transformOp, + std::optional transformOp, const ArrayRef &threadMappingAttributes) { // Step 0. Target-specific verifications. There is no good place to anchor // those right now: the ForeachThreadOp is target-independent and the @@ -513,7 +513,7 @@ DiagnosedSilenceableFailure mlir::transform::gpu::mapNestedForeachToThreadsImpl( RewriterBase &rewriter, Operation *target, const SmallVectorImpl &blockDim, bool syncAfterDistribute, - llvm::Optional transformOp, + std::optional transformOp, const ArrayRef &threadMappingAttributes) { DiagnosedSilenceableFailure diag = DiagnosedSilenceableFailure::success(); target->walk([&](scf::ForeachThreadOp foreachThreadOp) { @@ -546,7 +546,7 @@ blockDim.resize(/*size=*/3, /*value=*/1); DiagnosedSilenceableFailure diag = - checkGpuLimits(transformOp, std::nullopt, llvm::None, llvm::None, + checkGpuLimits(transformOp, std::nullopt, std::nullopt, std::nullopt, blockDim[0], blockDim[1], blockDim[2]); if (diag.isSilenceableFailure()) { results.assign({target}); @@ -568,9 +568,9 @@ threadMappingAttributes); if (diag.succeeded()) { - diag = - alterGpuLaunch(rewriter, gpuLaunch, transformOp, std::nullopt, llvm::None, - std::nullopt, blockDim[0], blockDim[1], blockDim[2]); + diag = alterGpuLaunch(rewriter, gpuLaunch, transformOp, std::nullopt, + std::nullopt, std::nullopt, blockDim[0], blockDim[1], + blockDim[2]); } results.assign({gpuLaunch}); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -146,16 +146,14 @@ // Replace the string attribute `predicate` with an integer attribute. int64_t predicateValue = 0; if (std::is_same()) { - Optional predicate = - symbolizeICmpPredicate(predicateAttr.getValue()); + auto predicate = symbolizeICmpPredicate(predicateAttr.getValue()); if (!predicate) return parser.emitError(predicateLoc) << "'" << predicateAttr.getValue() << "' is an incorrect value of the 'predicate' attribute"; predicateValue = static_cast(*predicate); } else { - Optional predicate = - symbolizeFCmpPredicate(predicateAttr.getValue()); + auto predicate = symbolizeFCmpPredicate(predicateAttr.getValue()); if (!predicate) return parser.emitError(predicateLoc) << "'" << predicateAttr.getValue() @@ -216,8 +214,7 @@ parser.getCurrentLocation(&trailingTypeLoc) || parser.parseType(type)) return failure(); - Optional alignmentAttr = - result.attributes.getNamed("alignment"); + auto alignmentAttr = result.attributes.getNamed("alignment"); if (alignmentAttr.has_value()) { auto alignmentInt = alignmentAttr.value().getValue().dyn_cast(); @@ -252,7 +249,7 @@ /// Checks that the elemental type is present in either the pointer type or /// the attribute, but not both. static LogicalResult verifyOpaquePtr(Operation *op, LLVMPointerType ptrType, - Optional ptrElementType) { + std::optional ptrElementType) { if (ptrType.isOpaque() && !ptrElementType.has_value()) { return op->emitOpError() << "expected '" << kElemTypeAttrName << "' attribute if opaque pointer type is used"; @@ -660,7 +657,7 @@ } Type LLVM::GEPOp::getSourceElementType() { - if (Optional elemType = getElemType()) + if (auto elemType = getElemType()) return *elemType; return extractVectorElementType(getBase().getType()) @@ -797,8 +794,7 @@ parser.resolveOperand(addr, type, result.operands)) return failure(); - Optional elemTy = - getLoadStoreElementType(parser, type, trailingTypeLoc); + auto elemTy = getLoadStoreElementType(parser, type, trailingTypeLoc); if (!elemTy) return failure(); if (*elemTy) { @@ -866,7 +862,7 @@ if (parser.parseType(type)) return failure(); } else { - Optional maybeOperandType = + auto maybeOperandType = getLoadStoreElementType(parser, type, trailingTypeLoc); if (!maybeOperandType) return failure(); @@ -1848,7 +1844,7 @@ } } - Optional alignAttr = getAlignment(); + auto alignAttr = getAlignment(); if (alignAttr.has_value()) { uint64_t value = alignAttr.value(); if (!llvm::isPowerOf2_64(value)) diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -77,8 +77,8 @@ // Given the element type of an operand and whether or not it is an accumulator, // this function returns the PTX type (`NVVM::MMATypes`) that corresponds to the // operand's element type. -Optional MmaOp::inferOperandMMAType(Type operandElType, - bool isAccumulator) { +std::optional +MmaOp::inferOperandMMAType(Type operandElType, bool isAccumulator) { auto half2Type = LLVM::getFixedVectorType(Float16Type::get(operandElType.getContext()), 2); if (operandElType.isF64()) @@ -118,14 +118,14 @@ } MMATypes MmaOp::accumPtxType() { - Optional val = inferOperandMMAType( + std::optional val = inferOperandMMAType( getODSOperands(2).getTypes().front(), /*isAccum=*/true); assert(val.has_value() && "accumulator PTX type should always be inferrable"); return val.value(); } MMATypes MmaOp::resultPtxType() { - Optional val = + std::optional val = inferOperandMMAType(getResult().getType(), /*isAccum=*/true); assert(val.has_value() && "result PTX type should always be inferrable"); return val.value(); @@ -159,7 +159,7 @@ regTypes.push_back(this->getOperand(operandIdx).getType()); } } - Optional inferredType = + std::optional inferredType = inferOperandMMAType(regTypes.back(), /*isAccum=*/fragIdx >= 2); if (inferredType) ignoreAttrNames.push_back(frag.ptxTypeAttr); @@ -191,10 +191,10 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType, ValueRange operandA, ValueRange operandB, ValueRange operandC, - ArrayRef shape, Optional b1Op, - Optional intOverflow, - Optional> multiplicandPtxTypes, - Optional> multiplicandLayouts) { + ArrayRef shape, std::optional b1Op, + std::optional intOverflow, + std::optional> multiplicandPtxTypes, + std::optional> multiplicandLayouts) { assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)"); MLIRContext *ctx = builder.getContext(); @@ -247,7 +247,7 @@ // `->` type($res) ParseResult MmaOp::parse(OpAsmParser &parser, OperationState &result) { struct OperandFragment { - Optional elemtype; + std::optional elemtype; SmallVector regs; SmallVector regTypes; }; @@ -313,7 +313,7 @@ "multiplicandBPtxType"}; for (unsigned idx = 0; idx < names.size(); idx++) { const auto &frag = frags[idx]; - Optional attr = namedAttributes.getNamed(names[idx]); + auto attr = namedAttributes.getNamed(names[idx]); if (!frag.elemtype.has_value() && !attr.has_value()) { return parser.emitError( parser.getNameLoc(), diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1062,8 +1062,7 @@ } LogicalResult SplitOp::verify() { - if ((static_cast(getStaticSplitPoint()) != - ShapedType::kDynamic) ^ + if ((static_cast(getStaticSplitPoint()) != ShapedType::kDynamic) ^ (getDynamicSplitPoint() == nullptr)) { return emitOpError() << "expects either a dynamic or a static split " "point to be provided"; @@ -1480,7 +1479,7 @@ RewriterBase &rewriter, transform::TransformState &state, TransformOpInterface transformOp, ArrayRef targets, ArrayRef mixedNumThreads, - ArrayRef mixedTileSizes, Optional mapping, + ArrayRef mixedTileSizes, std::optional mapping, SmallVector &tileOps, SmallVector &tiledOps) { if (targets.empty()) return DiagnosedSilenceableFailure(success()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -44,7 +44,7 @@ static Value allocBuffer(ImplicitLocOpBuilder &b, const LinalgPromotionOptions &options, Type elementType, Value allocSize, DataLayout &layout, - Optional alignment = None) { + std::optional alignment = std::nullopt) { auto width = layout.getTypeSize(elementType); IntegerAttr alignmentAttr; @@ -77,11 +77,10 @@ /// no call back to do so is provided. The default is to allocate a /// memref<..xi8> and return a view to get a memref type of shape /// boundingSubViewSize. -static Optional -defaultAllocBufferCallBack(const LinalgPromotionOptions &options, - OpBuilder &builder, memref::SubViewOp subView, - ArrayRef boundingSubViewSize, - Optional alignment, DataLayout &layout) { +static std::optional defaultAllocBufferCallBack( + const LinalgPromotionOptions &options, OpBuilder &builder, + memref::SubViewOp subView, ArrayRef boundingSubViewSize, + std::optional alignment, DataLayout &layout) { ShapedType viewType = subView.getType(); ImplicitLocOpBuilder b(subView.getLoc(), builder); auto zero = b.createOrFold(0); @@ -136,7 +135,7 @@ CopyCallbackFn copyOutFn; /// Alignment of promoted buffer. - Optional alignment; + std::optional alignment; }; } // namespace @@ -166,7 +165,7 @@ } else { allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp, ArrayRef boundingSubViewSize, - DataLayout &layout) -> Optional { + DataLayout &layout) -> std::optional { return defaultAllocBufferCallBack(options, b, subViewOp, boundingSubViewSize, alignment, layout); }; @@ -246,7 +245,7 @@ SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamic); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. - Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); + auto fullLocalView = allocationFn(b, subView, fullSizes, layout); if (!fullLocalView) return failure(); SmallVector zeros(fullSizes.size(), b.getIndexAttr(0)); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -187,9 +187,9 @@ static bool canOmitTileOffsetInBoundsCheck(OpFoldResult tileSize, OpFoldResult numThreads, OpFoldResult iterationSize) { - Optional tileSizeConst = getConstantIntValue(tileSize); - Optional numThreadsConst = getConstantIntValue(numThreads); - Optional iterSizeConst = getConstantIntValue(iterationSize); + auto tileSizeConst = getConstantIntValue(tileSize); + auto numThreadsConst = getConstantIntValue(numThreads); + auto iterSizeConst = getConstantIntValue(iterationSize); if (!tileSizeConst || !numThreadsConst || !iterSizeConst) return false; return *tileSizeConst * (*numThreadsConst - 1) < *iterSizeConst; @@ -217,7 +217,7 @@ RewriterBase &b, Location loc, scf::ForeachThreadOp foreachThreadOp, ArrayRef numThreads, SmallVector loopRanges, bool omitTileOffsetBoundsCheck, - Optional> nominalTileSizes, + std::optional> nominalTileSizes, SmallVector &tiledOffsets, SmallVector &tiledSizes) { ValueRange threadIds = foreachThreadOp.getThreadIndices(); @@ -295,8 +295,8 @@ /// assume that `tileSize[i] * (numThread[i] -1) <= dimSize[i]` holds. static FailureOr tileToForeachThreadOpImpl( RewriterBase &b, TilingInterface op, ArrayRef numThreads, - Optional> nominalTileSizes, - Optional mapping, bool omitTileOffsetBoundsCheck) { + std::optional> nominalTileSizes, + std::optional mapping, bool omitTileOffsetBoundsCheck) { Location loc = op->getLoc(); OpBuilder::InsertionGuard g(b); SmallVector loopRanges = op.getIterationDomain(b); @@ -381,7 +381,7 @@ FailureOr linalg::tileToForeachThreadOp(RewriterBase &b, TilingInterface op, ArrayRef numThreads, - Optional mapping) { + std::optional mapping) { return tileToForeachThreadOpImpl(b, op, numThreads, /*nominalTileSizes=*/None, mapping, /*omitTileOffsetBoundsCheck=*/false); @@ -390,7 +390,7 @@ FailureOr linalg::tileToForeachThreadOpUsingTileSizes(RewriterBase &b, TilingInterface op, ArrayRef tileSizes, - Optional mapping) { + std::optional mapping) { SmallVector loopRanges = op.getIterationDomain(b); unsigned nLoops = loopRanges.size(); SmallVector numThreads; @@ -414,7 +414,7 @@ linalg::tileReductionUsingForeachThread(RewriterBase &b, PartialReductionOpInterface op, ArrayRef numThreads, - Optional mapping) { + std::optional mapping) { Location loc = op.getLoc(); OpBuilder::InsertionGuard g(b); // Ops implementing PartialReductionOpInterface are expected to implement diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -118,14 +118,13 @@ Operation *newOp; }; -llvm::Optional +std::optional mlir::linalg::getCombinerOpKind(Operation *combinerOp) { using ::mlir::vector::CombiningKind; if (!combinerOp) return std::nullopt; - return llvm::TypeSwitch>( - combinerOp) + return llvm::TypeSwitch>(combinerOp) .Case( [&](auto op) { return CombiningKind::ADD; }) .Case([&](auto op) { return CombiningKind::AND; }) @@ -1446,7 +1445,7 @@ Operation *reduceOp = matchLinalgReduction(linalgOp.getDpsInitOperand(0)); if (!reduceOp) return; - llvm::Optional maybeKind; + std::optional maybeKind; maybeKind = getCombinerOpKind(reduceOp); if (!maybeKind || *maybeKind != vector::CombiningKind::ADD) return; diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -814,7 +814,7 @@ // b. The subshape size is 1. According to the way the loops are set up, // tensors with "0" dimensions would never be constructed. int64_t shapeSize = shape[r]; - Optional sizeCst = getConstantIntValue(size); + auto sizeCst = getConstantIntValue(size); auto hasTileSizeOne = sizeCst && *sizeCst == 1; auto dividesEvenly = sizeCst && !ShapedType::isDynamic(shapeSize) && ((shapeSize % *sizeCst) == 0); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -31,23 +31,17 @@ namespace saturated_arith { struct Wrapper { static Wrapper stride(int64_t v) { - return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} - : Wrapper{false, v}; + return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v}; } static Wrapper offset(int64_t v) { - return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} - : Wrapper{false, v}; + return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v}; } static Wrapper size(int64_t v) { return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v}; } - int64_t asOffset() { - return saturated ? ShapedType::kDynamic : v; - } + int64_t asOffset() { return saturated ? ShapedType::kDynamic : v; } int64_t asSize() { return saturated ? ShapedType::kDynamic : v; } - int64_t asStride() { - return saturated ? ShapedType::kDynamic : v; - } + int64_t asStride() { return saturated ? ShapedType::kDynamic : v; } bool operator==(Wrapper other) { return (saturated && other.saturated) || (!saturated && !other.saturated && v == other.v); @@ -169,7 +163,7 @@ ofr.get().cast().getInt()); continue; } - Optional maybeConstant = getConstantIntValue(ofr.get()); + auto maybeConstant = getConstantIntValue(ofr.get()); if (maybeConstant) ofr = builder.getIndexAttr(*maybeConstant); } @@ -443,7 +437,7 @@ } void AllocaScopeOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index) { regions.push_back(RegionSuccessor(getResults())); @@ -713,8 +707,7 @@ for (auto it : llvm::zip(sourceStrides, resultStrides)) { auto ss = std::get<0>(it), st = std::get<1>(it); if (ss != st) - if (ShapedType::isDynamic(ss) && - !ShapedType::isDynamic(st)) + if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st)) return false; } @@ -747,8 +740,7 @@ // same. They are also compatible if either one is dynamic (see // description of MemRefCastOp for details). auto checkCompatible = [](int64_t a, int64_t b) { - return (ShapedType::isDynamic(a) || - ShapedType::isDynamic(b) || a == b); + return (ShapedType::isDynamic(a) || ShapedType::isDynamic(b) || a == b); }; if (!checkCompatible(aOffset, bOffset)) return false; @@ -907,7 +899,7 @@ build(builder, result, source, indexValue); } -Optional DimOp::getConstantIndex() { +std::optional DimOp::getConstantIndex() { return getConstantIntValue(getIndex()); } @@ -927,7 +919,7 @@ LogicalResult DimOp::verify() { // Assume unknown index to be in range. - Optional index = getConstantIndex(); + auto index = getConstantIndex(); if (!index) return success(); @@ -962,7 +954,7 @@ /// This accounts for cases where there are multiple unit-dims, but only a /// subset of those are dropped. For MemRefTypes these can be disambiguated /// using the strides. If a dimension is dropped the stride must be dropped too. -static llvm::Optional +static std::optional computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType, ArrayRef sizes) { llvm::SmallBitVector unusedDims(originalType.getRank()); @@ -1034,7 +1026,7 @@ llvm::SmallBitVector SubViewOp::getDroppedDims() { MemRefType sourceType = getSourceType(); MemRefType resultType = getType(); - llvm::Optional unusedDims = + auto unusedDims = computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes()); assert(unusedDims && "unable to find unused dims of subview"); return *unusedDims; @@ -1349,7 +1341,7 @@ /// The number and type of the results are inferred from the /// shape of the source. LogicalResult ExtractStridedMetadataOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { ExtractStridedMetadataOpAdaptor extractAdaptor(operands, attributes, regions); @@ -1610,7 +1602,7 @@ } } - if (Optional alignAttr = getAlignment()) { + if (auto alignAttr = getAlignment()) { uint64_t alignment = *alignAttr; if (!llvm::isPowerOf2_64(alignment)) @@ -1847,8 +1839,7 @@ // Match offset in result memref type and in static_offsets attribute. int64_t expectedOffset = getStaticOffsets().front(); if (!ShapedType::isDynamic(resultOffset) && - !ShapedType::isDynamic(expectedOffset) && - resultOffset != expectedOffset) + !ShapedType::isDynamic(expectedOffset) && resultOffset != expectedOffset) return emitError("expected result type with offset = ") << resultOffset << " instead of " << expectedOffset; @@ -2605,7 +2596,7 @@ return inferredType; // Compute which dimensions are dropped. - Optional> dimsToProject = + auto dimsToProject = computeRankReductionMask(inferredType.getShape(), resultShape); assert(dimsToProject.has_value() && "invalid rank reduction"); @@ -2888,7 +2879,7 @@ auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets, mixedSizes, mixedStrides) .cast(); - llvm::Optional unusedDims = + std::optional unusedDims = computeMemRefRankReductionMask(currentSourceType, currentResultType, mixedSizes); // Return nullptr as failure mode. @@ -2942,14 +2933,14 @@ // Check offsets are zero. if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) { - Optional intValue = getConstantIntValue(ofr); + auto intValue = getConstantIntValue(ofr); return !intValue || intValue.value() != 0; })) return false; // Check strides are one. if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) { - Optional intValue = getConstantIntValue(ofr); + auto intValue = getConstantIntValue(ofr); return !intValue || intValue.value() != 1; })) return false; @@ -2957,7 +2948,7 @@ // Check all size values are static and matches the (static) source shape. ArrayRef sourceShape = subViewOp.getSourceType().getShape(); for (const auto &size : llvm::enumerate(mixedSizes)) { - Optional intValue = getConstantIntValue(size.value()); + auto intValue = getConstantIntValue(size.value()); if (!intValue || *intValue != sourceShape[size.index()]) return false; } diff --git a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp @@ -98,9 +98,9 @@ } if (!candidateLoop) return failure(); - llvm::Optional inductionVar = candidateLoop.getSingleInductionVar(); - llvm::Optional lowerBound = candidateLoop.getSingleLowerBound(); - llvm::Optional singleStep = candidateLoop.getSingleStep(); + auto inductionVar = candidateLoop.getSingleInductionVar(); + auto lowerBound = candidateLoop.getSingleLowerBound(); + auto singleStep = candidateLoop.getSingleStep(); if (!inductionVar || !lowerBound || !singleStep) return failure(); @@ -125,13 +125,12 @@ AffineExpr induc = getAffineDimExpr(0, allocOp.getContext()); unsigned dimCount = 1; auto getAffineExpr = [&](OpFoldResult e) -> AffineExpr { - if (Optional constValue = getConstantIntValue(e)) { + if (auto constValue = getConstantIntValue(e)) { return getAffineConstantExpr(*constValue, allocOp.getContext()); } auto value = getOrCreateValue(e, builder, candidateLoop->getLoc()); operands.push_back(value); return getAffineDimExpr(dimCount++, allocOp.getContext()); - }; auto init = getAffineExpr(*lowerBound); auto step = getAffineExpr(*singleStep); diff --git a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp @@ -46,7 +46,7 @@ if (!shapedTypeOp) return failure(); - Optional dimIndex = dimOp.getConstantIndex(); + auto dimIndex = dimOp.getConstantIndex(); if (!dimIndex) return failure(); @@ -88,7 +88,7 @@ if (!rankedShapeTypeOp) return failure(); - Optional dimIndex = dimOp.getConstantIndex(); + auto dimIndex = dimOp.getConstantIndex(); if (!dimIndex) return failure(); diff --git a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp --- a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp +++ b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp @@ -149,7 +149,7 @@ MemoryEffectOpInterface iface = dyn_cast(op); if (!iface) return; - Optional effect = + std::optional effect = iface.getEffectOnValue(shmMemRef); if (effect) { readOps.push_back(op); diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -117,7 +117,7 @@ SMLoc loc = parser.getCurrentLocation(); if (parser.parseKeyword(&enumStr)) return failure(); - if (Optional enumValue = symbolizeEnum(enumStr)) { + if (auto enumValue = symbolizeEnum(enumStr)) { attr = ClauseAttr::get(parser.getContext(), *enumValue); return success(); } @@ -173,9 +173,9 @@ //===----------------------------------------------------------------------===// // Parser, verifier and printer for Aligned Clause //===----------------------------------------------------------------------===// -static LogicalResult verifyAlignedClause(Operation *op, - Optional alignmentValues, - OperandRange alignedVariables) { +static LogicalResult +verifyAlignedClause(Operation *op, std::optional alignmentValues, + OperandRange alignedVariables) { // Check if number of alignment values equals to number of aligned variables if (!alignedVariables.empty()) { if (!alignmentValues || alignmentValues->size() != alignedVariables.size()) @@ -236,7 +236,7 @@ static void printAlignedClause(OpAsmPrinter &p, Operation *op, ValueRange alignedVars, TypeRange alignedVarTypes, - Optional alignmentValues) { + std::optional alignmentValues) { for (unsigned i = 0; i < alignedVars.size(); ++i) { if (i != 0) p << ", "; @@ -293,12 +293,11 @@ static ParseResult parseScheduleClause( OpAsmParser &parser, ClauseScheduleKindAttr &scheduleAttr, ScheduleModifierAttr &scheduleModifier, UnitAttr &simdModifier, - Optional &chunkSize, Type &chunkType) { + std::optional &chunkSize, Type &chunkType) { StringRef keyword; if (parser.parseKeyword(&keyword)) return failure(); - llvm::Optional schedule = - symbolizeClauseScheduleKind(keyword); + auto schedule = symbolizeClauseScheduleKind(keyword); if (!schedule) return parser.emitError(parser.getNameLoc()) << " expected schedule kind"; @@ -334,8 +333,7 @@ if (!modifiers.empty()) { SMLoc loc = parser.getCurrentLocation(); - if (Optional mod = - symbolizeScheduleModifier(modifiers[0])) { + if (auto mod = symbolizeScheduleModifier(modifiers[0])) { scheduleModifier = ScheduleModifierAttr::get(parser.getContext(), *mod); } else { return parser.emitError(loc, "invalid schedule modifier"); @@ -396,7 +394,7 @@ static void printReductionVarList(OpAsmPrinter &p, Operation *op, OperandRange reductionVars, TypeRange reductionTypes, - Optional reductions) { + std::optional reductions) { for (unsigned i = 0, e = reductions->size(); i < e; ++i) { if (i != 0) p << ", "; @@ -407,7 +405,7 @@ /// Verifies Reduction Clause static LogicalResult verifyReductionVarList(Operation *op, - Optional reductions, + std::optional reductions, OperandRange reductionVars) { if (!reductionVars.empty()) { if (!reductions || reductions->size() != reductionVars.size()) diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp --- a/mlir/lib/Dialect/PDL/IR/PDL.cpp +++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp @@ -112,7 +112,7 @@ LogicalResult AttributeOp::verify() { Value attrType = getValueType(); - Optional attrValue = getValue(); + auto attrValue = getValue(); if (!attrValue) { if (isa((*this)->getParentOp())) @@ -203,11 +203,10 @@ if (resultTypes.empty()) { // If we don't know the concrete operation, don't attempt any verification. // We can't make assumptions if we don't know the concrete operation. - Optional rawOpName = op.getOpName(); + auto rawOpName = op.getOpName(); if (!rawOpName) return success(); - Optional opName = - RegisteredOperationName::lookup(*rawOpName, op.getContext()); + auto opName = RegisteredOperationName::lookup(*rawOpName, op.getContext()); if (!opName) return success(); @@ -290,7 +289,7 @@ } bool OperationOp::hasTypeInference() { - if (Optional rawOpName = getOpName()) { + if (auto rawOpName = getOpName()) { OperationName opName(*rawOpName, getContext()); return opName.hasInterface(); } @@ -298,7 +297,7 @@ } bool OperationOp::mightHaveTypeInference() { - if (Optional rawOpName = getOpName()) { + if (auto rawOpName = getOpName()) { OperationName opName(*rawOpName, getContext()); return opName.mightHaveInterface(); } diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp --- a/mlir/lib/Dialect/SCF/IR/SCF.cpp +++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp @@ -248,7 +248,7 @@ /// correspond to a constant value for each operand, or null if that operand is /// not a constant. void ExecuteRegionOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // If the predecessor is the ExecuteRegionOp, branch into the body. if (!index) { @@ -265,7 +265,7 @@ //===----------------------------------------------------------------------===// MutableOperandRange -ConditionOp::getMutableSuccessorOperands(Optional index) { +ConditionOp::getMutableSuccessorOperands(std::optional index) { // Pass all operands except the condition to the successor region. return getArgsMutable(); } @@ -352,17 +352,19 @@ return success(); } -Optional ForOp::getSingleInductionVar() { return getInductionVar(); } +std::optional ForOp::getSingleInductionVar() { + return getInductionVar(); +} -Optional ForOp::getSingleLowerBound() { +std::optional ForOp::getSingleLowerBound() { return OpFoldResult(getLowerBound()); } -Optional ForOp::getSingleStep() { +std::optional ForOp::getSingleStep() { return OpFoldResult(getStep()); } -Optional ForOp::getSingleUpperBound() { +std::optional ForOp::getSingleUpperBound() { return OpFoldResult(getUpperBound()); } @@ -476,7 +478,7 @@ /// correspond to the loop iterator operands, i.e., those excluding the /// induction variable. LoopOp only has one region, so 0 is the only valid value /// for `index`. -OperandRange ForOp::getSuccessorEntryOperands(Optional index) { +OperandRange ForOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index == 0 && "invalid region index"); // The initial operands map to the loop arguments after the induction @@ -489,7 +491,7 @@ /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void ForOp::getSuccessorRegions(Optional index, +void ForOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // If the predecessor is the ForOp, branch into the body using the iterator @@ -721,7 +723,7 @@ /// Util function that tries to compute a constant diff between u and l. /// Returns std::nullopt when the difference between two AffineValueMap is /// dynamic. -static Optional computeConstDiff(Value l, Value u) { +static std::optional computeConstDiff(Value l, Value u) { IntegerAttr clb, cub; if (matchPattern(l, m_Constant(&clb)) && matchPattern(u, m_Constant(&cub))) { llvm::APInt lbValue = clb.getValue(); @@ -754,8 +756,7 @@ return success(); } - Optional diff = - computeConstDiff(op.getLowerBound(), op.getUpperBound()); + auto diff = computeConstDiff(op.getLowerBound(), op.getUpperBound()); if (!diff) return failure(); @@ -765,7 +766,7 @@ return success(); } - llvm::Optional maybeStepValue = op.getConstantStep(); + auto maybeStepValue = op.getConstantStep(); if (!maybeStepValue) return failure(); @@ -1068,7 +1069,7 @@ LastTensorLoadCanonicalization, ForOpTensorCastFolder>(context); } -Optional ForOp::getConstantStep() { +std::optional ForOp::getConstantStep() { IntegerAttr step; if (matchPattern(getStep(), m_Constant(&step))) return step.getValue(); @@ -1212,7 +1213,7 @@ void ForeachThreadOp::build(mlir::OpBuilder &builder, mlir::OperationState &result, ValueRange outputs, ValueRange numThreads, - Optional mapping) { + std::optional mapping) { result.addOperands(numThreads); result.addOperands(outputs); if (mapping.has_value()) { @@ -1565,7 +1566,7 @@ /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void IfOp::getSuccessorRegions(Optional index, +void IfOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // The `then` and the `else` region branch back to the parent operation. @@ -2722,7 +2723,7 @@ afterBuilder(odsBuilder, odsState.location, afterBlock->getArguments()); } -OperandRange WhileOp::getSuccessorEntryOperands(Optional index) { +OperandRange WhileOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index == 0 && "WhileOp is expected to branch only to the first region"); @@ -2745,7 +2746,7 @@ return getAfter().front().getArguments(); } -void WhileOp::getSuccessorRegions(Optional index, +void WhileOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // The parent op always branches to the condition region. @@ -3520,7 +3521,7 @@ } void IndexSwitchOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl &successors) { // All regions branch back to the parent op. if (index) { diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -453,8 +453,8 @@ /// Return `true` if the given loop may have 0 iterations. bool mayHaveZeroIterations(scf::ForOp forOp) { - Optional lb = getConstantIntValue(forOp.getLowerBound()); - Optional ub = getConstantIntValue(forOp.getUpperBound()); + auto lb = getConstantIntValue(forOp.getLowerBound()); + auto ub = getConstantIntValue(forOp.getUpperBound()); if (!lb.has_value() || !ub.has_value()) return true; return *ub <= *lb; @@ -1055,7 +1055,7 @@ bool mayHaveZeroIterations(scf::ForeachThreadOp foreachThreadOp) { int64_t p = 1; for (Value v : foreachThreadOp.getNumThreads()) { - if (Optional c = getConstantIntValue(v)) { + if (auto c = getConstantIntValue(v)) { p *= *c; } else { return true; diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp --- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp @@ -93,13 +93,13 @@ // Check if `stride` evenly divides the trip count `size - offset`. static bool tileDividesIterationDomain(Range loopRange) { - Optional offsetAsInt = getConstantIntValue(loopRange.offset); + auto offsetAsInt = getConstantIntValue(loopRange.offset); if (!offsetAsInt) return false; - Optional sizeAsInt = getConstantIntValue(loopRange.size); + auto sizeAsInt = getConstantIntValue(loopRange.size); if (!sizeAsInt) return false; - Optional strideAsInt = getConstantIntValue(loopRange.stride); + auto strideAsInt = getConstantIntValue(loopRange.stride); if (!strideAsInt) return false; return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0); @@ -110,7 +110,7 @@ static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc, Range loopRange, Value iv, Value tileSize) { - Optional ts = getConstantIntValue(tileSize); + auto ts = getConstantIntValue(tileSize); if (ts && ts.value() == 1) return getAsOpFoldResult(tileSize); @@ -513,10 +513,10 @@ /// `iter_args` of the outer most that is encountered. Traversing the iter_args /// indicates that this is a destination operand of the consumer. If there was /// no loop traversal needed, the second value of the returned tuple is empty. -static std::tuple> +static std::tuple> getUntiledProducerFromSliceSource(OpOperand *source, ArrayRef loops) { - Optional destinationIterArg; + std::optional destinationIterArg; auto loopIt = loops.rbegin(); while (auto iterArg = source->get().dyn_cast()) { scf::ForOp loop = *loopIt; @@ -662,7 +662,7 @@ // TODO: This can be modeled better if the `DestinationStyleOpInterface`. // Update to use that when it does become available. scf::ForOp outerMostLoop = tileAndFuseResult.loops.front(); - Optional iterArgNumber; + std::optional iterArgNumber; if (destinationIterArg) { iterArgNumber = outerMostLoop.getIterArgNumberForOpOperand( *destinationIterArg.value()); diff --git a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp --- a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp +++ b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp @@ -218,8 +218,8 @@ : constraints.appendSymbolVar(/*num=*/1); // If loop lower/upper bounds are constant: Add EQ constraint. - Optional lbInt = getConstantIntValue(lb); - Optional ubInt = getConstantIntValue(ub); + auto lbInt = getConstantIntValue(lb); + auto ubInt = getConstantIntValue(ub); if (lbInt) constraints.addBound(IntegerPolyhedron::EQ, symLb, *lbInt); if (ubInt) diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp @@ -142,15 +142,15 @@ // Forward declarations. template -static Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser); +static std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser); template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser); +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser); template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser); +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser); static Type parseAndVerifyType(SPIRVDialect const &dialect, DialectAsmParser &parser) { @@ -264,7 +264,7 @@ return failure(); SMLoc strideLoc = parser.getCurrentLocation(); - Optional optStride = parseAndVerify(dialect, parser); + auto optStride = parseAndVerify(dialect, parser); if (!optStride) return failure(); @@ -474,8 +474,8 @@ // Specialize this function to parse each of the parameters that define an // ImageType. By default it assumes this is an enum type. template -static Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +static std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser) { StringRef enumSpec; SMLoc enumLoc = parser.getCurrentLocation(); if (parser.parseKeyword(&enumSpec)) { @@ -489,8 +489,8 @@ } template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser) { // TODO: Further verify that the element type can be sampled auto ty = parseAndVerifyType(dialect, parser); if (!ty) @@ -499,8 +499,8 @@ } template -static Optional parseAndVerifyInteger(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +static std::optional parseAndVerifyInteger(SPIRVDialect const &dialect, + DialectAsmParser &parser) { IntTy offsetVal = std::numeric_limits::max(); if (parser.parseInteger(offsetVal)) return std::nullopt; @@ -508,8 +508,8 @@ } template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser) { return parseAndVerifyInteger(dialect, parser); } @@ -520,7 +520,7 @@ // (termination condition) needs partial specialization. template struct ParseCommaSeparatedList { - Optional> + std::optional> operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const { auto parseVal = parseAndVerify(dialect, parser); if (!parseVal) @@ -541,8 +541,8 @@ // specs to parse the last element of the list. template struct ParseCommaSeparatedList { - Optional> operator()(SPIRVDialect const &dialect, - DialectAsmParser &parser) const { + std::optional> + operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const { if (auto value = parseAndVerify(dialect, parser)) return std::tuple(*value); return std::nullopt; diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -311,8 +311,8 @@ static void printMemoryAccessAttribute( MemoryOpTy memoryOp, OpAsmPrinter &printer, SmallVectorImpl &elidedAttrs, - Optional memoryAccessAtrrValue = None, - Optional alignmentAttrValue = None) { + std::optional memoryAccessAtrrValue = std::nullopt, + std::optional alignmentAttrValue = std::nullopt) { // Print optional memory access attribute. if (auto memAccess = (memoryAccessAtrrValue ? memoryAccessAtrrValue : memoryOp.getMemoryAccess())) { @@ -325,7 +325,7 @@ if (auto alignment = (alignmentAttrValue ? alignmentAttrValue : memoryOp.getAlignment())) { elidedAttrs.push_back(kAlignmentAttrName); - printer << ", " << alignment; + printer << ", " << *alignment; } } printer << "]"; @@ -341,8 +341,8 @@ static void printSourceMemoryAccessAttribute( MemoryOpTy memoryOp, OpAsmPrinter &printer, SmallVectorImpl &elidedAttrs, - Optional memoryAccessAtrrValue = None, - Optional alignmentAttrValue = None) { + std::optional memoryAccessAtrrValue = std::nullopt, + std::optional alignmentAttrValue = std::nullopt) { printer << ", "; @@ -358,7 +358,7 @@ if (auto alignment = (alignmentAttrValue ? alignmentAttrValue : memoryOp.getAlignment())) { elidedAttrs.push_back(kSourceAlignmentAttrName); - printer << ", " << alignment; + printer << ", " << *alignment; } } printer << "]"; @@ -910,7 +910,7 @@ parser.parseOperand(valueInfo)) return failure(); - Optional clusterSizeInfo; + std::optional clusterSizeInfo; if (succeeded(parser.parseOptionalKeyword(kClusterSize))) { clusterSizeInfo = OpAsmParser::UnresolvedOperand(); if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) || @@ -3344,7 +3344,7 @@ //===----------------------------------------------------------------------===// void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state, - Optional name) { + std::optional name) { OpBuilder::InsertionGuard guard(builder); builder.createBlock(state.addRegion()); if (name) { @@ -3356,8 +3356,8 @@ void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state, spirv::AddressingModel addressingModel, spirv::MemoryModel memoryModel, - Optional vceTriple, - Optional name) { + std::optional vceTriple, + std::optional name) { state.addAttribute( "addressing_model", builder.getAttr(addressingModel)); @@ -3410,7 +3410,7 @@ } void spirv::ModuleOp::print(OpAsmPrinter &printer) { - if (Optional name = getName()) { + if (auto name = getName()) { printer << ' '; printer.printSymbolName(*name); } @@ -3424,7 +3424,7 @@ elidedAttrs.assign({addressingModelAttrName, memoryModelAttrName, mlir::SymbolTable::getSymbolAttrName()}); - if (Optional triple = getVceTriple()) { + if (auto triple = getVceTriple()) { printer << " requires " << *triple; elidedAttrs.push_back(spirv::ModuleOp::getVCETripleAttrName()); } diff --git a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp @@ -52,8 +52,8 @@ AliasedResourceMap aliasedResources; moduleOp->walk([&aliasedResources](spirv::GlobalVariableOp varOp) { if (varOp->getAttrOfType("aliased")) { - Optional set = varOp.getDescriptorSet(); - Optional binding = varOp.getBinding(); + auto set = varOp.getDescriptorSet(); + auto binding = varOp.getBinding(); if (set && binding) aliasedResources[{*set, *binding}].push_back(varOp); } @@ -100,7 +100,7 @@ if (vectorType.getNumElements() % 2 != 0) return std::nullopt; // Odd-sized vector has special layout requirements. - Optional numBytes = type.getSizeInBytes(); + auto numBytes = type.getSizeInBytes(); if (!numBytes) return std::nullopt; @@ -279,7 +279,7 @@ elementTypes.push_back(type); } - Optional index = deduceCanonicalResource(elementTypes); + auto index = deduceCanonicalResource(elementTypes); if (!index) return; diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -335,7 +335,7 @@ // See RegionBranchOpInterface in Interfaces/ControlFlowInterfaces.td void AssumingOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // AssumingOp has unconditional control flow into the region and back to the // parent, so return the correct RegionSuccessor purely based on the index @@ -394,7 +394,7 @@ //===----------------------------------------------------------------------===// LogicalResult mlir::shape::AddOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa() || @@ -911,7 +911,7 @@ } LogicalResult mlir::shape::ConstShapeOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { Builder b(context); @@ -1068,7 +1068,7 @@ // DimOp //===----------------------------------------------------------------------===// -Optional DimOp::getConstantIndex() { +std::optional DimOp::getConstantIndex() { if (auto constSizeOp = getIndex().getDefiningOp()) return constSizeOp.getValue().getLimitedValue(); if (auto constantOp = getIndex().getDefiningOp()) @@ -1081,7 +1081,7 @@ auto valShapedType = valType.dyn_cast(); if (!valShapedType || !valShapedType.hasRank()) return nullptr; - Optional index = getConstantIndex(); + auto index = getConstantIndex(); if (!index.has_value()) return nullptr; if (index.value() >= valShapedType.getRank()) @@ -1093,7 +1093,7 @@ } LogicalResult mlir::shape::DimOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { DimOpAdaptor dimOp(operands); @@ -1141,7 +1141,7 @@ } LogicalResult mlir::shape::DivOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa() || @@ -1322,7 +1322,7 @@ // GetExtentOp //===----------------------------------------------------------------------===// -Optional GetExtentOp::getConstantDim() { +std::optional GetExtentOp::getConstantDim() { if (auto constSizeOp = getDim().getDefiningOp()) return constSizeOp.getValue().getLimitedValue(); if (auto constantOp = getDim().getDefiningOp()) @@ -1334,7 +1334,7 @@ auto elements = operands[0].dyn_cast_or_null(); if (!elements) return nullptr; - Optional dim = getConstantDim(); + auto dim = getConstantDim(); if (!dim.has_value()) return nullptr; if (dim.value() >= elements.getNumElements()) @@ -1357,7 +1357,7 @@ } LogicalResult mlir::shape::GetExtentOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { inferredReturnTypes.assign({IndexType::get(context)}); @@ -1395,7 +1395,7 @@ //===----------------------------------------------------------------------===// LogicalResult mlir::shape::MeetOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands.empty()) @@ -1531,7 +1531,7 @@ } LogicalResult mlir::shape::RankOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa()) @@ -1567,7 +1567,7 @@ } LogicalResult mlir::shape::NumElementsOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa()) @@ -1599,7 +1599,7 @@ } LogicalResult mlir::shape::MaxOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType() == operands[1].getType()) @@ -1631,7 +1631,7 @@ } LogicalResult mlir::shape::MinOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType() == operands[1].getType()) @@ -1668,7 +1668,7 @@ } LogicalResult mlir::shape::MulOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa() || @@ -1755,7 +1755,7 @@ } LogicalResult mlir::shape::ShapeOfOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa()) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -273,11 +273,11 @@ if (isSingletonDim(rtp, r)) { return; // nothing to do } // Keep compounding the size, but nothing needs to be initialized - // at this level. We will eventually reach a compressed level or - // otherwise the values array for the from-here "all-dense" case. - assert(isDenseDim(rtp, r)); - Value size = sizeAtStoredDim(builder, loc, rtp, fields, r); - linear = builder.create(loc, linear, size); + // at this level. We will eventually reach a compressed level or + // otherwise the values array for the from-here "all-dense" case. + assert(isDenseDim(rtp, r)); + Value size = sizeAtStoredDim(builder, loc, rtp, fields, r); + linear = builder.create(loc, linear, size); } // Reached values array so prepare for an insertion. Value valZero = constantZero(builder, loc, rtp.getElementType()); @@ -658,7 +658,7 @@ LogicalResult matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Optional index = op.getConstantIndex(); + auto index = op.getConstantIndex(); if (!index) return failure(); auto sz = diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -63,7 +63,8 @@ uint64_t lvl) { // Generate the call. StringRef name = "sparseLvlSize"; - SmallVector params{ // just two + SmallVector params{ + // just two src, constantIndex(builder, loc, toStoredDim(enc, lvl))}; Type iTp = builder.getIndexType(); return createFuncCall(builder, loc, name, iTp, params, EmitCInterface::Off) @@ -665,7 +666,7 @@ auto enc = getSparseTensorEncoding(op.getSource().getType()); if (!enc) return failure(); - Optional index = op.getConstantIndex(); + auto index = op.getConstantIndex(); if (!index) return failure(); // Generate the call. diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -378,7 +378,7 @@ build(builder, result, source, indexValue); } -Optional DimOp::getConstantIndex() { +std::optional DimOp::getConstantIndex() { return getConstantIntValue(getIndex()); } @@ -398,7 +398,7 @@ LogicalResult DimOp::verify() { // Assume unknown index to be in range. - Optional index = getConstantIndex(); + auto index = getConstantIndex(); if (!index) return success(); @@ -597,7 +597,7 @@ for (int64_t i = 0; i < op.getType().getRank(); ++i) { if (op.getType().isDynamicDim(i)) { Value dynamicSize = op.getDynamicSizes()[ctr++]; - Optional cst = getConstantIntValue(dynamicSize); + auto cst = getConstantIntValue(dynamicSize); if (cst.has_value()) { staticShape[i] = *cst; changedType = true; @@ -677,7 +677,7 @@ LogicalResult matchAndRewrite(tensor::DimOp dimOp, PatternRewriter &rewriter) const override { - Optional maybeConstantIndex = dimOp.getConstantIndex(); + auto maybeConstantIndex = dimOp.getConstantIndex(); auto emptyTensorOp = dimOp.getSource().getDefiningOp(); if (!emptyTensorOp || !maybeConstantIndex) return failure(); @@ -1499,7 +1499,7 @@ return failure(); // Only constant dimension values are supported. - Optional dim = dimOp.getConstantIndex(); + auto dim = dimOp.getConstantIndex(); if (!dim.has_value()) return failure(); @@ -1543,7 +1543,7 @@ return failure(); // Only constant dimension values are supported. - Optional dim = dimOp.getConstantIndex(); + auto dim = dimOp.getConstantIndex(); if (!dim.has_value()) return failure(); @@ -1795,7 +1795,7 @@ llvm::SmallBitVector droppedDims(mixedSizes.size()); unsigned shapePos = 0; for (const auto &size : enumerate(mixedSizes)) { - Optional sizeVal = getConstantIntValue(size.value()); + auto sizeVal = getConstantIntValue(size.value()); // If the size is not 1, or if the current matched dimension of the result // is the same static shape as the size value (which is 1), then the // dimension is preserved. @@ -2327,15 +2327,14 @@ })) return failure(); - auto getSourceOfCastOp = [](Value v) -> Optional { + auto getSourceOfCastOp = [](Value v) -> std::optional { auto castOp = v.getDefiningOp(); if (!castOp || !canFoldIntoConsumerOp(castOp)) return std::nullopt; return castOp.getSource(); }; - Optional sourceCastSource = - getSourceOfCastOp(insertSliceOp.getSource()); - Optional destCastSource = getSourceOfCastOp(insertSliceOp.getDest()); + auto sourceCastSource = getSourceOfCastOp(insertSliceOp.getSource()); + auto destCastSource = getSourceOfCastOp(insertSliceOp.getDest()); if (!sourceCastSource && !destCastSource) return failure(); @@ -2401,8 +2400,7 @@ SmallVector newSrcShape(srcType.getShape().begin(), srcType.getShape().end()); for (int64_t i = 0; i < srcType.getRank(); ++i) { - if (Optional constInt = - getConstantIntValue(insertSliceOp.getMixedSizes()[i])) + if (auto constInt = getConstantIntValue(insertSliceOp.getMixedSizes()[i])) newSrcShape[i] = *constInt; } @@ -2469,9 +2467,10 @@ void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand, Type typeToInfer, Type typeToInferFrom) {} -ParseResult parseInferType(OpAsmParser &parser, - Optional optOperand, - Type &typeToInfer, Type typeToInferFrom) { +ParseResult +parseInferType(OpAsmParser &parser, + std::optional optOperand, + Type &typeToInfer, Type typeToInferFrom) { if (optOperand) typeToInfer = typeToInferFrom; return success(); @@ -2530,8 +2529,7 @@ SmallVector inferredShape; for (auto i : llvm::seq(0, rank)) { - if (sourceType.isDynamicDim(i) || - staticLow[i] == ShapedType::kDynamic || + if (sourceType.isDynamicDim(i) || staticLow[i] == ShapedType::kDynamic || staticHigh[i] == ShapedType::kDynamic) { inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamic : resultShape[i]); @@ -2580,8 +2578,7 @@ // This will grow staticLow and staticHigh with 1 value. If the config is // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1 // value as well. - dispatchIndexOpFoldResults(low, dynamicLow, staticLow, - ShapedType::kDynamic); + dispatchIndexOpFoldResults(low, dynamicLow, staticLow, ShapedType::kDynamic); dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh, ShapedType::kDynamic); if (!resultType) { @@ -3206,8 +3203,7 @@ llvm::zip(packedType.getShape().take_back(mixedTiles.size()), mixedTiles), [](std::tuple it) { - Optional constTileSize = - getConstantIntValue(std::get<1>(it)); + auto constTileSize = getConstantIntValue(std::get<1>(it)); int64_t shape = std::get<0>(it); if (!constTileSize) { // If specified tile size is dynamic, output shape should @@ -3288,7 +3284,7 @@ auto it = dimAndTileMapping.find(dim); if (it == dimAndTileMapping.end()) continue; - Optional constantTile = getConstantIntValue(it->second); + auto constantTile = getConstantIntValue(it->second); if (!constantTile) continue; if (inputShape[dim] % (*constantTile) != 0) @@ -3358,7 +3354,7 @@ SmallVector mixedTiles = op.getMixedTiles(); for (auto [dimDest, tile] : llvm::zip( packedType.getShape().take_back(mixedTiles.size()), mixedTiles)) { - Optional constTileSize = getConstantIntValue(tile); + auto constTileSize = getConstantIntValue(tile); if (!constTileSize || ShapedType::isDynamic(dimDest)) return false; } diff --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp @@ -26,7 +26,7 @@ /// Returns true if the the given `attrOrValue` is a constant zero. static bool isZero(OpFoldResult attrOrValue) { - if (Optional val = getConstantIntValue(attrOrValue)) + if (auto val = getConstantIntValue(attrOrValue)) return *val == 0; return false; } diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -373,7 +373,7 @@ } LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -398,7 +398,7 @@ } LogicalResult tosa::ConcatOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { // Infer all dimension sizes by reducing based on inputs. @@ -455,7 +455,7 @@ } LogicalResult tosa::EqualOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outShape; @@ -476,7 +476,7 @@ } LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -496,9 +496,8 @@ } if (biasShape.hasRank()) { - outShape[1] = outShape[1] == ShapedType::kDynamic - ? biasShape.getDimSize(0) - : outShape[1]; + outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0) + : outShape[1]; } inferredReturnShapes.push_back(ShapedTypeComponents(outShape)); @@ -508,7 +507,7 @@ LogicalResult FullyConnectedOp::verify() { return verifyConvOp(*this); } LogicalResult tosa::MatMulOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor lhsShape = operands.getShape(0); @@ -524,9 +523,8 @@ } if (rhsShape.hasRank()) { - outShape[0] = outShape[0] == ShapedType::kDynamic - ? rhsShape.getDimSize(0) - : outShape[0]; + outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0) + : outShape[0]; outShape[2] = rhsShape.getDimSize(2); } @@ -535,7 +533,7 @@ } LogicalResult tosa::PadOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -597,7 +595,7 @@ } LogicalResult tosa::SliceOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ArrayAttr sizes = SliceOpAdaptor(operands, attributes).getSize(); @@ -607,13 +605,13 @@ outputShape.push_back(val.cast().getValue().getSExtValue()); } - inferredReturnShapes.push_back(ShapedTypeComponents( - convertToMlirShape(outputShape))); + inferredReturnShapes.push_back( + ShapedTypeComponents(convertToMlirShape(outputShape))); return success(); } LogicalResult tosa::TableOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -629,7 +627,7 @@ } LogicalResult tosa::TileOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { TileOpAdaptor adaptor(operands, attributes); @@ -663,7 +661,7 @@ } LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ReshapeOpAdaptor adaptor(operands, attributes); @@ -703,7 +701,7 @@ } LogicalResult tosa::TransposeOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -770,7 +768,7 @@ } LogicalResult tosa::GatherOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape; @@ -795,7 +793,7 @@ } LogicalResult tosa::ResizeOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ResizeOpAdaptor adaptor(operands, attributes); @@ -838,7 +836,7 @@ } LogicalResult tosa::ScatterOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape; @@ -887,7 +885,7 @@ #define REDUCE_SHAPE_INFER(OP) \ LogicalResult OP::inferReturnTypeComponents( \ - MLIRContext *context, ::llvm::Optional location, \ + MLIRContext *context, ::std::optional location, \ ValueShapeRange operands, DictionaryAttr attributes, \ RegionRange regions, \ SmallVectorImpl &inferredReturnShapes) { \ @@ -918,7 +916,7 @@ #define NARY_SHAPE_INFER(OP) \ LogicalResult OP::inferReturnTypeComponents( \ - MLIRContext *context, ::llvm::Optional location, \ + MLIRContext *context, ::std::optional location, \ ValueShapeRange operands, DictionaryAttr attributes, \ RegionRange regions, \ SmallVectorImpl &inferredReturnShapes) { \ @@ -1007,7 +1005,7 @@ } LogicalResult Conv2DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape(4, ShapedType::kDynamic); @@ -1074,7 +1072,7 @@ LogicalResult Conv2DOp::verify() { return verifyConvOp(*this); } LogicalResult Conv3DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape(5, ShapedType::kDynamic); @@ -1151,21 +1149,21 @@ LogicalResult Conv3DOp::verify() { return verifyConvOp(*this); } LogicalResult AvgPool2dOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { return poolingInferReturnTypes(operands, attributes, inferredReturnShapes); } LogicalResult MaxPool2dOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { return poolingInferReturnTypes(operands, attributes, inferredReturnShapes); } LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape(4, ShapedType::kDynamic); @@ -1245,7 +1243,7 @@ LogicalResult DepthwiseConv2DOp::verify() { return verifyConvOp(*this); } LogicalResult TransposeConv2DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes); @@ -1313,7 +1311,7 @@ } LogicalResult IfOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector yieldOps; @@ -1357,7 +1355,7 @@ } LogicalResult WhileOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector yieldOps; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp @@ -46,7 +46,7 @@ private: void runOnOperation() override; - llvm::Optional profileType; + std::optional profileType; }; void TosaValidation::runOnOperation() { diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp --- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp +++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp @@ -124,8 +124,8 @@ // AlternativesOp //===----------------------------------------------------------------------===// -OperandRange -transform::AlternativesOp::getSuccessorEntryOperands(Optional index) { +OperandRange transform::AlternativesOp::getSuccessorEntryOperands( + std::optional index) { if (index && getOperation()->getNumOperands() == 1) return getOperation()->getOperands(); return OperandRange(getOperation()->operand_end(), @@ -133,7 +133,7 @@ } void transform::AlternativesOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { for (Region &alternative : llvm::drop_begin( getAlternatives(), index.has_value() ? *index + 1 : 0)) { @@ -345,7 +345,7 @@ } void transform::ForeachOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { Region *bodyRegion = &getBody(); if (!index) { @@ -360,7 +360,7 @@ } OperandRange -transform::ForeachOp::getSuccessorEntryOperands(Optional index) { +transform::ForeachOp::getSuccessorEntryOperands(std::optional index) { // The iteration variable op handle is mapped to a subset (one op to be // precise) of the payload ops of the ForeachOp operand. assert(index && *index == 0 && "unexpected region index"); @@ -735,8 +735,8 @@ } } -OperandRange -transform::SequenceOp::getSuccessorEntryOperands(Optional index) { +OperandRange transform::SequenceOp::getSuccessorEntryOperands( + std::optional index) { assert(index && *index == 0 && "unexpected region index"); if (getOperation()->getNumOperands() == 1) return getOperation()->getOperands(); @@ -745,7 +745,7 @@ } void transform::SequenceOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (!index) { Region *bodyRegion = &getBody(); diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp --- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp +++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp @@ -24,14 +24,14 @@ if (sourceType.getRank() < targetType.getRank()) return getReassociationIndicesForCollapse(targetType.getShape(), sourceType.getShape()); - return std::nullopt; + return llvm::None; } Optional> mlir::getReassociationIndicesForCollapse(ArrayRef sourceShape, ArrayRef targetShape) { if (sourceShape.size() <= targetShape.size()) - return std::nullopt; + return llvm::None; unsigned sourceDim = 0; SmallVector reassociationMap; reassociationMap.reserve(targetShape.size()); @@ -57,20 +57,19 @@ // dimensions should also be dynamic and product of all previous unprocessed // dimensions of the expanded shape should be 1. if (sourceShape[sourceDim] == ShapedType::kDynamic && - (currTargetShape != ShapedType::kDynamic || - prodOfCollapsedDims != 1)) - return std::nullopt; + (currTargetShape != ShapedType::kDynamic || prodOfCollapsedDims != 1)) + return llvm::None; // If the collapsed dim is dynamic, the current expanded dim should also // be dynamic. if (currTargetShape == ShapedType::kDynamic && sourceShape[sourceDim] != ShapedType::kDynamic) - return std::nullopt; + return llvm::None; // For static shapes, if the product of dimensions of the expanded shape // should match the collapsed dimension shape. if (prodOfCollapsedDims * sourceShape[sourceDim] != currTargetShape) - return std::nullopt; + return llvm::None; currIndices.push_back(sourceDim++); reassociationMap.emplace_back(ReassociationIndices{}); @@ -79,13 +78,13 @@ } // All the dimensions in the target must have been processed. if (reassociationMap.size() != targetShape.size()) - return std::nullopt; + return llvm::None; // Process any remaining entries in the source shape. They all need to be // 1 or dynamic. for (; sourceDim < sourceShape.size(); sourceDim++) { if (sourceShape[sourceDim] != ShapedType::kDynamic && sourceShape[sourceDim] != 1) - return std::nullopt; + return llvm::None; // The map is empty when the target type is a scalar. if (!reassociationMap.empty()) reassociationMap.back().push_back(sourceDim); @@ -101,7 +100,7 @@ // Make the producer the larger sized vector. If they are of same size, the // resulting reshape is not a supported reshape op. if (producerReassociations.size() == consumerReassociations.size()) - return std::nullopt; + return llvm::None; if (producerReassociations.size() < consumerReassociations.size()) std::swap(producerReassociations, consumerReassociations); @@ -116,7 +115,7 @@ return all + indices.size(); }); if (producerReassociations.size() != consumerDims) - return std::nullopt; + return llvm::None; for (ReassociationIndicesRef consumerIndices : consumerReassociations) { ReassociationIndices reassociations; @@ -229,7 +228,7 @@ ArrayRef reassociationMaps, bool isExpandingReshape) { unsigned expandedDimStart = 0; for (const auto &map : llvm::enumerate(reassociationMaps)) { - Optional dynamicShape; + std::optional dynamicShape; int64_t linearizedStaticShape = 1; for (const auto &dim : llvm::enumerate( expandedShape.slice(expandedDimStart, map.value().size()))) { @@ -279,8 +278,8 @@ llvm::SmallBitVector mask(sliceInputShape.size()); unsigned idx = 0; for (const auto &[offset, size, stride] : sliceParams) { - Optional offsetConst = getConstantIntValue(offset); - Optional strideConst = getConstantIntValue(stride); + auto offsetConst = getConstantIntValue(offset); + auto strideConst = getConstantIntValue(stride); mask[idx] = !isEqualConstantIntOrValue(size, sliceInputShape[idx]) || (!strideConst || *strideConst != 1) || (!offsetConst || *offsetConst != 0); diff --git a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp --- a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp +++ b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp @@ -93,7 +93,7 @@ } /// If ofr is a constant integer or an IntegerAttr, return the integer. -Optional getConstantIntValue(OpFoldResult ofr) { +std::optional getConstantIntValue(OpFoldResult ofr) { // Case 1: Check for Constant integer. if (auto val = ofr.dyn_cast()) { APSInt intVal; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -318,7 +318,8 @@ return {}; } -Optional> MultiDimReductionOp::getShapeForUnroll() { +std::optional> +MultiDimReductionOp::getShapeForUnroll() { return llvm::to_vector<4>(getSourceVectorType().getShape()); } @@ -494,7 +495,7 @@ return nullptr; } -Optional> ReductionOp::getShapeForUnroll() { +std::optional> ReductionOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -933,7 +934,7 @@ getContext()); } -Optional> ContractionOp::getShapeForUnroll() { +std::optional> ContractionOp::getShapeForUnroll() { SmallVector shape; getIterationBounds(shape); return shape; @@ -1071,7 +1072,7 @@ } LogicalResult -ExtractOp::inferReturnTypes(MLIRContext *, Optional, +ExtractOp::inferReturnTypes(MLIRContext *, std::optional, ValueRange operands, DictionaryAttr attributes, RegionRange, SmallVectorImpl &inferredReturnTypes) { @@ -1715,7 +1716,7 @@ // FmaOp //===----------------------------------------------------------------------===// -Optional> FMAOp::getShapeForUnroll() { +std::optional> FMAOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -1882,7 +1883,7 @@ } LogicalResult -ShuffleOp::inferReturnTypes(MLIRContext *, Optional, +ShuffleOp::inferReturnTypes(MLIRContext *, std::optional, ValueRange operands, DictionaryAttr attributes, RegionRange, SmallVectorImpl &inferredReturnTypes) { @@ -3059,7 +3060,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result, VectorType vectorType, Value source, ValueRange indices, AffineMap permutationMap, - Optional> inBounds) { + std::optional> inBounds) { auto permutationMapAttr = AffineMapAttr::get(permutationMap); auto inBoundsAttr = (inBounds && !inBounds.value().empty()) ? builder.getBoolArrayAttr(inBounds.value()) @@ -3072,7 +3073,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result, VectorType vectorType, Value source, ValueRange indices, Value padding, - Optional> inBounds) { + std::optional> inBounds) { AffineMap permutationMap = getTransferMinorIdentityMap( source.getType().cast(), vectorType); auto permutationMapAttr = AffineMapAttr::get(permutationMap); @@ -3089,7 +3090,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result, VectorType vectorType, Value source, ValueRange indices, - Optional> inBounds) { + std::optional> inBounds) { Type elemType = source.getType().cast().getElementType(); Value padding = builder.create( result.location, elemType, builder.getZeroAttr(elemType)); @@ -3446,7 +3447,7 @@ return OpFoldResult(); } -Optional> TransferReadOp::getShapeForUnroll() { +std::optional> TransferReadOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -3673,7 +3674,7 @@ void TransferWriteOp::build(OpBuilder &builder, OperationState &result, Value vector, Value dest, ValueRange indices, AffineMap permutationMap, - Optional> inBounds) { + std::optional> inBounds) { auto permutationMapAttr = AffineMapAttr::get(permutationMap); auto inBoundsAttr = (inBounds && !inBounds.value().empty()) ? builder.getBoolArrayAttr(inBounds.value()) @@ -3686,7 +3687,7 @@ /// map to 'getMinorIdentityMap'. void TransferWriteOp::build(OpBuilder &builder, OperationState &result, Value vector, Value dest, ValueRange indices, - Optional> inBounds) { + std::optional> inBounds) { auto vectorType = vector.getType().cast(); AffineMap permutationMap = getTransferMinorIdentityMap( dest.getType().cast(), vectorType); @@ -3911,7 +3912,7 @@ return memref::foldMemRefCast(*this); } -Optional> TransferWriteOp::getShapeForUnroll() { +std::optional> TransferWriteOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -4902,7 +4903,7 @@ return success(); } -Optional> TransposeOp::getShapeForUnroll() { +std::optional> TransposeOp::getShapeForUnroll() { return llvm::to_vector<4>(getResultType().getShape()); } @@ -5447,7 +5448,7 @@ } void WarpExecuteOnLane0Op::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index) { regions.push_back(RegionSuccessor(getResults())); @@ -5462,7 +5463,7 @@ TypeRange resultTypes, Value laneId, int64_t warpSize) { build(builder, result, resultTypes, laneId, warpSize, - /*operands=*/std::nullopt, /*argTypes=*/llvm::None); + /*operands=*/std::nullopt, /*argTypes=*/std::nullopt); } void WarpExecuteOnLane0Op::build(OpBuilder &builder, OperationState &result, diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -44,7 +44,7 @@ using namespace mlir::vector; // Helper to find an index in an affine map. -static Optional getResultIndex(AffineMap map, int64_t index) { +static std::optional getResultIndex(AffineMap map, int64_t index) { for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) { int64_t idx = map.getDimPosition(i); if (idx == index) @@ -147,11 +147,11 @@ } /// Helper to create arithmetic operation associated with a kind of contraction. -static Optional createContractArithOp(Location loc, Value x, Value y, - Value acc, - vector::CombiningKind kind, - PatternRewriter &rewriter, - bool isInt) { +static std::optional createContractArithOp(Location loc, Value x, + Value y, Value acc, + vector::CombiningKind kind, + PatternRewriter &rewriter, + bool isInt) { using vector::CombiningKind; Value mul; if (isInt) { @@ -169,12 +169,13 @@ return None; // Special case for fused multiply-add. if (acc && acc.getType().isa() && kind == CombiningKind::ADD) { - return Optional(rewriter.create(loc, x, y, acc)); + return std::optional( + rewriter.create(loc, x, y, acc)); } mul = rewriter.create(loc, x, y); } if (!acc) - return Optional(mul); + return std::optional(mul); return makeArithReduction(rewriter, loc, kind, mul, acc); } @@ -190,13 +191,13 @@ } /// Look for a given dimension in an affine map and return its position. Return -/// std::nullopt if the dimension is not in the map results. -static llvm::Optional getDimPosition(AffineMap map, unsigned dim) { +/// llvm::None if the dimension is not in the map results. +static std::optional getDimPosition(AffineMap map, unsigned dim) { for (unsigned i = 0, e = map.getNumResults(); i < e; i++) { if (map.getDimPosition(i) == dim) return i; } - return std::nullopt; + return llvm::None; } namespace { @@ -552,8 +553,8 @@ if (!rhsType) { // Special case: AXPY operation. Value b = rewriter.create(loc, lhsType, op.getRhs()); - Optional mult = createContractArithOp(loc, op.getLhs(), b, acc, - kind, rewriter, isInt); + auto mult = createContractArithOp(loc, op.getLhs(), b, acc, kind, + rewriter, isInt); if (!mult.has_value()) return failure(); rewriter.replaceOp(op, mult.value()); @@ -570,7 +571,7 @@ Value r = nullptr; if (acc) r = rewriter.create(loc, rhsType, acc, pos); - Optional m = + auto m = createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt); if (!m.has_value()) return failure(); @@ -645,8 +646,7 @@ // Loop through the parallel dimensions to calculate the dimensions to // broadcast and to permute in order to extract only parallel dimensions. for (unsigned i = 0; i < numParallelDims; i++) { - llvm::Optional lhsDim = - getDimPosition(lhsMap, accMap.getDimPosition(i)); + auto lhsDim = getDimPosition(lhsMap, accMap.getDimPosition(i)); if (lhsDim) { lhsTranspose.push_back(numLhsDimToBroadcast + *lhsDim); } else { @@ -655,8 +655,7 @@ contractOp.getResultType().cast().getDimSize(i)); lhsTranspose.push_back(lhsDims.size() - 1); } - llvm::Optional rhsDim = - getDimPosition(rhsMap, accMap.getDimPosition(i)); + auto rhsDim = getDimPosition(rhsMap, accMap.getDimPosition(i)); if (rhsDim) { rhsTranspose.push_back(numRhsDimToBroadcast + *rhsDim); } else { @@ -690,7 +689,7 @@ loc, newLhs, rewriter.getI64ArrayAttr(lhsOffsets)); newRhs = rewriter.create( loc, newRhs, rewriter.getI64ArrayAttr(rhsOffsets)); - Optional result = + auto result = createContractArithOp(loc, newLhs, newRhs, contractOp.getAcc(), contractOp.getKind(), rewriter, isInt); rewriter.replaceOp(contractOp, {*result}); @@ -2011,8 +2010,8 @@ // Use iterator index 0. int64_t iterIndex = 0; SmallVector iMap = op.getIndexingMapsArray(); - Optional lookupLhs = getResultIndex(iMap[0], iterIndex); - Optional lookupRhs = getResultIndex(iMap[1], iterIndex); + auto lookupLhs = getResultIndex(iMap[0], iterIndex); + auto lookupRhs = getResultIndex(iMap[1], iterIndex); if (!lookupLhs.has_value()) return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) { diag << "expected iterIndex=" << iterIndex << "to map to a LHS dimension"; @@ -2076,7 +2075,7 @@ struct TransferReadToVectorLoadLowering : public OpRewritePattern { TransferReadToVectorLoadLowering(MLIRContext *context, - llvm::Optional maxRank, + std::optional maxRank, PatternBenefit benefit = 1) : OpRewritePattern(context, benefit), maxTransferRank(maxRank) {} @@ -2152,7 +2151,7 @@ return success(); } - llvm::Optional maxTransferRank; + std::optional maxTransferRank; }; /// Replace a 0-d vector.load with a memref.load + vector.broadcast. @@ -2218,7 +2217,7 @@ struct TransferWriteToVectorStoreLowering : public OpRewritePattern { TransferWriteToVectorStoreLowering(MLIRContext *context, - llvm::Optional maxRank, + std::optional maxRank, PatternBenefit benefit = 1) : OpRewritePattern(context, benefit), maxTransferRank(maxRank) {} @@ -2281,7 +2280,7 @@ return success(); } - llvm::Optional maxTransferRank; + std::optional maxTransferRank; }; // Returns the values in `arrayAttr` as an integer vector. @@ -3027,7 +3026,7 @@ } void mlir::vector::populateVectorTransferLoweringPatterns( - RewritePatternSet &patterns, llvm::Optional maxTransferRank, + RewritePatternSet &patterns, std::optional maxTransferRank, PatternBenefit benefit) { patterns.add(patterns.getContext(), diff --git a/mlir/lib/IR/BuiltinDialect.cpp b/mlir/lib/IR/BuiltinDialect.cpp --- a/mlir/lib/IR/BuiltinDialect.cpp +++ b/mlir/lib/IR/BuiltinDialect.cpp @@ -126,7 +126,7 @@ //===----------------------------------------------------------------------===// void ModuleOp::build(OpBuilder &builder, OperationState &state, - Optional name) { + std::optional name) { state.addRegion()->emplaceBlock(); if (name) { state.attributes.push_back(builder.getNamedAttr( @@ -135,7 +135,7 @@ } /// Construct a module from the given context. -ModuleOp ModuleOp::create(Location loc, Optional name) { +ModuleOp ModuleOp::create(Location loc, std::optional name) { OpBuilder builder(loc->getContext()); return builder.create(loc, name); } diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -246,7 +246,7 @@ return VectorType(); } -VectorType VectorType::cloneWith(Optional> shape, +VectorType VectorType::cloneWith(std::optional> shape, Type elementType) const { return VectorType::get(shape.value_or(getShape()), elementType, getNumScalableDims()); @@ -268,7 +268,7 @@ return cast().getShape(); } -TensorType TensorType::cloneWith(Optional> shape, +TensorType TensorType::cloneWith(std::optional> shape, Type elementType) const { if (auto unrankedTy = dyn_cast()) { if (shape) @@ -346,7 +346,7 @@ return cast().getShape(); } -BaseMemRefType BaseMemRefType::cloneWith(Optional> shape, +BaseMemRefType BaseMemRefType::cloneWith(std::optional> shape, Type elementType) const { if (auto unrankedTy = dyn_cast()) { if (!shape) @@ -387,7 +387,7 @@ /// which dimensions must be kept when e.g. compute MemRef strides under /// rank-reducing operations. Return None if reducedShape cannot be obtained /// by dropping only `1` entries in `originalShape`. -llvm::Optional> +std::optional> mlir::computeRankReductionMask(ArrayRef originalShape, ArrayRef reducedShape) { size_t originalRank = originalShape.size(), reducedRank = reducedShape.size(); diff --git a/mlir/lib/IR/Dialect.cpp b/mlir/lib/IR/Dialect.cpp --- a/mlir/lib/IR/Dialect.cpp +++ b/mlir/lib/IR/Dialect.cpp @@ -75,9 +75,9 @@ return Type(); } -Optional +std::optional Dialect::getParseOperationHook(StringRef opName) const { - return None; + return std::nullopt; } llvm::unique_function diff --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp --- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp +++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp @@ -36,7 +36,7 @@ /// Returns the `BlockArgument` corresponding to operand `operandIndex` in some /// successor if 'operandIndex' is within the range of 'operands', or None if /// `operandIndex` isn't a successor operand index. -Optional +std::optional detail::getBranchSuccessorArgument(const SuccessorOperands &operands, unsigned operandIndex, Block *successor) { OperandRange forwardedOperands = operands.getForwardedOperands(); @@ -90,17 +90,17 @@ /// inputs that flow from `sourceIndex' to the given region, or std::nullopt if /// the exact type match verification is not necessary (e.g., if the Op verifies /// the match itself). -static LogicalResult -verifyTypesAlongAllEdges(Operation *op, Optional sourceNo, - function_ref(Optional)> - getInputsTypesForRegion) { +static LogicalResult verifyTypesAlongAllEdges( + Operation *op, std::optional sourceNo, + function_ref(std::optional)> + getInputsTypesForRegion) { auto regionInterface = cast(op); SmallVector successors; regionInterface.getSuccessorRegions(sourceNo, successors); for (RegionSuccessor &succ : successors) { - Optional succRegionNo; + std::optional succRegionNo; if (!succ.isParent()) succRegionNo = succ.getSuccessor()->getRegionNumber(); @@ -119,7 +119,8 @@ return diag; }; - Optional sourceTypes = getInputsTypesForRegion(succRegionNo); + std::optional sourceTypes = + getInputsTypesForRegion(succRegionNo); if (!sourceTypes.has_value()) continue; @@ -151,7 +152,8 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) { auto regionInterface = cast(op); - auto inputTypesFromParent = [&](Optional regionNo) -> TypeRange { + auto inputTypesFromParent = + [&](std::optional regionNo) -> TypeRange { return regionInterface.getSuccessorEntryOperands(regionNo).getTypes(); }; @@ -179,7 +181,7 @@ // implementing the `RegionBranchTerminatorOpInterface`, all should have the // same operand types when passing them to the same region. - Optional regionReturnOperands; + std::optional regionReturnOperands; for (Block &block : region) { Operation *terminator = block.getTerminator(); auto terminatorOperands = @@ -202,7 +204,7 @@ } auto inputTypesFromRegion = - [&](Optional regionNo) -> Optional { + [&](std::optional regionNo) -> std::optional { // If there is no return-like terminator, the op itself should verify // type consistency. if (!regionReturnOperands) @@ -307,7 +309,7 @@ } void RegionBranchOpInterface::getSuccessorRegions( - Optional index, SmallVectorImpl ®ions) { + std::optional index, SmallVectorImpl ®ions) { unsigned numInputs = 0; if (index) { // If the predecessor is a region, get the number of operands from an @@ -367,9 +369,9 @@ /// `OperandRange` represents all operands that are passed to the specified /// successor region. If `regionIndex` is `std::nullopt`, all operands that are /// passed to the parent operation will be returned. -Optional -mlir::getMutableRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex) { +std::optional +mlir::getMutableRegionBranchSuccessorOperands( + Operation *operation, std::optional regionIndex) { // Try to query a RegionBranchTerminatorOpInterface to determine // all successor operands that will be passed to the successor // input arguments. @@ -388,9 +390,9 @@ /// Returns the read only operands that are passed to the region with the given /// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more /// information. -Optional +std::optional mlir::getRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex) { + std::optional regionIndex) { auto range = getMutableRegionBranchSuccessorOperands(operation, regionIndex); - return range ? Optional(*range) : std::nullopt; + return range ? std::optional(*range) : std::nullopt; } diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp --- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp +++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp @@ -174,12 +174,13 @@ } LogicalResult mlir::detail::inferReturnTensorTypes( - function_ref location, ValueShapeRange operands, - DictionaryAttr attributes, RegionRange regions, - SmallVectorImpl &retComponents)> + function_ref< + LogicalResult(MLIRContext *, std::optional location, + ValueShapeRange operands, DictionaryAttr attributes, + RegionRange regions, + SmallVectorImpl &retComponents)> componentTypeFn, - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { SmallVector retComponents; diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp --- a/mlir/lib/Rewrite/ByteCode.cpp +++ b/mlir/lib/Rewrite/ByteCode.cpp @@ -46,7 +46,7 @@ llvm::to_vector<8>(generatedOpsAttr.getAsValueRange()); // Check to see if this is pattern matches a specific operation type. - if (Optional rootKind = matchOp.getRootKind()) + if (auto rootKind = matchOp.getRootKind()) return PDLByteCodePattern(rewriterAddr, configSet, *rootKind, benefit, ctx, generatedOps); return PDLByteCodePattern(rewriterAddr, configSet, MatchAnyOpTypeTag(), @@ -940,7 +940,7 @@ } void Generator::generate(pdl_interp::GetOperandsOp op, ByteCodeWriter &writer) { Value result = op.getValue(); - Optional index = op.getIndex(); + auto index = op.getIndex(); writer.append(OpCode::GetOperands, index.value_or(std::numeric_limits::max()), op.getInputOp()); @@ -960,7 +960,7 @@ } void Generator::generate(pdl_interp::GetResultsOp op, ByteCodeWriter &writer) { Value result = op.getValue(); - Optional index = op.getIndex(); + auto index = op.getIndex(); writer.append(OpCode::GetResults, index.value_or(std::numeric_limits::max()), op.getInputOp()); @@ -1449,12 +1449,11 @@ // If the result is a range, we need to copy it over to the bytecodes // range memory. - if (Optional typeRange = result.dyn_cast()) { + if (auto typeRange = result.dyn_cast()) { unsigned rangeIndex = read(); typeRangeMemory[rangeIndex] = *typeRange; memory[read()] = &typeRangeMemory[rangeIndex]; - } else if (Optional valueRange = - result.dyn_cast()) { + } else if (auto valueRange = result.dyn_cast()) { unsigned rangeIndex = read(); valueRangeMemory[rangeIndex] = *valueRange; memory[read()] = &valueRangeMemory[rangeIndex]; diff --git a/mlir/lib/TableGen/Attribute.cpp b/mlir/lib/TableGen/Attribute.cpp --- a/mlir/lib/TableGen/Attribute.cpp +++ b/mlir/lib/TableGen/Attribute.cpp @@ -70,7 +70,7 @@ // Return the type constraint corresponding to the type of this attribute, or // None if this is not a TypedAttr. -llvm::Optional Attribute::getValueType() const { +std::optional Attribute::getValueType() const { if (auto *defInit = dyn_cast(def->getValueInit("valueType"))) return Type(defInit->getDef()); return std::nullopt; diff --git a/mlir/lib/TableGen/CodeGenHelpers.cpp b/mlir/lib/TableGen/CodeGenHelpers.cpp --- a/mlir/lib/TableGen/CodeGenHelpers.cpp +++ b/mlir/lib/TableGen/CodeGenHelpers.cpp @@ -78,10 +78,10 @@ // Find a uniqued attribute constraint. Since not all attribute constraints can // be uniqued, return None if one was not found. -Optional StaticVerifierFunctionEmitter::getAttrConstraintFn( +std::optional StaticVerifierFunctionEmitter::getAttrConstraintFn( const Constraint &constraint) const { auto it = attrConstraints.find(constraint); - return it == attrConstraints.end() ? Optional() + return it == attrConstraints.end() ? std::optional() : StringRef(it->second); } diff --git a/mlir/lib/TableGen/Dialect.cpp b/mlir/lib/TableGen/Dialect.cpp --- a/mlir/lib/TableGen/Dialect.cpp +++ b/mlir/lib/TableGen/Dialect.cpp @@ -57,9 +57,9 @@ return dependentDialects; } -llvm::Optional Dialect::getExtraClassDeclaration() const { +std::optional Dialect::getExtraClassDeclaration() const { auto value = def->getValueAsString("extraClassDeclaration"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } bool Dialect::hasCanonicalizer() const { diff --git a/mlir/lib/TableGen/Interfaces.cpp b/mlir/lib/TableGen/Interfaces.cpp --- a/mlir/lib/TableGen/Interfaces.cpp +++ b/mlir/lib/TableGen/Interfaces.cpp @@ -43,21 +43,21 @@ } // Return the body for this method if it has one. -llvm::Optional InterfaceMethod::getBody() const { +std::optional InterfaceMethod::getBody() const { auto value = def->getValueAsString("body"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } // Return the default implementation for this method if it has one. -llvm::Optional InterfaceMethod::getDefaultImplementation() const { +std::optional InterfaceMethod::getDefaultImplementation() const { auto value = def->getValueAsString("defaultBody"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } // Return the description of this method if it has one. -llvm::Optional InterfaceMethod::getDescription() const { +std::optional InterfaceMethod::getDescription() const { auto value = def->getValueAsString("description"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } ArrayRef InterfaceMethod::getArguments() const { @@ -93,36 +93,36 @@ ArrayRef Interface::getMethods() const { return methods; } // Return the description of this method if it has one. -llvm::Optional Interface::getDescription() const { +std::optional Interface::getDescription() const { auto value = def->getValueAsString("description"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } // Return the interfaces extra class declaration code. -llvm::Optional Interface::getExtraClassDeclaration() const { +std::optional Interface::getExtraClassDeclaration() const { auto value = def->getValueAsString("extraClassDeclaration"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } // Return the traits extra class declaration code. -llvm::Optional Interface::getExtraTraitClassDeclaration() const { +std::optional Interface::getExtraTraitClassDeclaration() const { auto value = def->getValueAsString("extraTraitClassDeclaration"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } // Return the shared extra class declaration code. -llvm::Optional Interface::getExtraSharedClassDeclaration() const { +std::optional Interface::getExtraSharedClassDeclaration() const { auto value = def->getValueAsString("extraSharedClassDeclaration"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } // Return the body for this method if it has one. -llvm::Optional Interface::getVerify() const { +std::optional Interface::getVerify() const { // Only OpInterface supports the verify method. if (!isa(this)) return std::nullopt; auto value = def->getValueAsString("verify"); - return value.empty() ? llvm::Optional() : value; + return value.empty() ? std::optional() : value; } bool Interface::verifyWithRegions() const { diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp --- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp +++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp @@ -42,8 +42,7 @@ } DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) { - Optional emissionKind = - symbolizeDIEmissionKind(node->getEmissionKind()); + auto emissionKind = symbolizeDIEmissionKind(node->getEmissionKind()); return DICompileUnitAttr::get(context, node->getSourceLanguage(), translate(node->getFile()), StringAttr::get(context, node->getProducer()), @@ -51,7 +50,7 @@ } DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) { - Optional flags = symbolizeDIFlags(node->getFlags()); + auto flags = symbolizeDIFlags(node->getFlags()); SmallVector elements; for (llvm::DINode *element : node->getElements()) { assert(element && "expected a non-null element type"); @@ -102,7 +101,7 @@ } DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) { - Optional subprogramFlags = + auto subprogramFlags = symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags()); return DISubprogramAttr::get( context, translate(node->getUnit()), translate(node->getScope()), diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -27,7 +27,7 @@ namespace { static llvm::omp::ScheduleKind -convertToScheduleKind(Optional schedKind) { +convertToScheduleKind(std::optional schedKind) { if (!schedKind.has_value()) return llvm::omp::OMP_SCHEDULE_Default; switch (schedKind.value()) { @@ -398,7 +398,7 @@ static void collectReductionDecls(omp::WsLoopOp loop, SmallVectorImpl &reductions) { - Optional attr = loop.getReductions(); + auto attr = loop.getReductions(); if (!attr) return; @@ -855,7 +855,7 @@ // TODO: Handle doacross loops when the ordered clause has a parameter. bool isOrdered = loop.getOrderedVal().has_value(); - Optional scheduleModifier = loop.getScheduleModifier(); + auto scheduleModifier = loop.getScheduleModifier(); bool isSimd = loop.getSimdModifier(); ompBuilder->applyWorkshareLoop( @@ -989,11 +989,11 @@ ompBuilder->collapseLoops(ompLoc.DL, loopInfos, {}); llvm::ConstantInt *simdlen = nullptr; - if (llvm::Optional simdlenVar = loop.getSimdlen()) + if (auto simdlenVar = loop.getSimdlen()) simdlen = builder.getInt64(simdlenVar.value()); llvm::ConstantInt *safelen = nullptr; - if (llvm::Optional safelenVar = loop.getSafelen()) + if (auto safelenVar = loop.getSafelen()) safelen = builder.getInt64(safelenVar.value()); llvm::MapVector alignedVars; @@ -1009,7 +1009,7 @@ /// Convert an Atomic Ordering attribute to llvm::AtomicOrdering. llvm::AtomicOrdering -convertAtomicOrdering(Optional ao) { +convertAtomicOrdering(std::optional ao) { if (!ao) return llvm::AtomicOrdering::Monotonic; // Default Memory Ordering diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -119,8 +119,8 @@ unsigned preferred = dataLayout.getTypePreferredAlignment(type) * 8u; layoutStream << size << ":" << abi << ":" << preferred; - if (Optional index = extractPointerSpecValue( - entry.getValue(), PtrDLEntryPos::Index)) + if (auto index = extractPointerSpecValue(entry.getValue(), + PtrDLEntryPos::Index)) layoutStream << ":" << *index; return success(); }) @@ -687,7 +687,7 @@ addRuntimePreemptionSpecifier(op.getDsoLocal(), var); - Optional alignment = op.getAlignment(); + auto alignment = op.getAlignment(); if (alignment.has_value()) var->setAlignment(llvm::MaybeAlign(alignment.value())); @@ -783,7 +783,7 @@ /// attribute and the second string beings its value. Note that even integer /// attributes are expected to have their values expressed as strings. static LogicalResult -forwardPassthroughAttributes(Location loc, Optional attributes, +forwardPassthroughAttributes(Location loc, std::optional attributes, llvm::Function *llvmFunc) { if (!attributes) return success(); @@ -1104,7 +1104,7 @@ llvm::LLVMContext &ctx = llvmModule->getContext(); llvm::SmallVector operands; operands.push_back({}); // Placeholder for self-reference - if (Optional description = op.getDescription()) + if (auto description = op.getDescription()) operands.push_back(llvm::MDString::get(ctx, *description)); llvm::MDNode *domain = llvm::MDNode::get(ctx, operands); domain->replaceOperandWith(0, domain); // Self-reference for uniqueness @@ -1123,7 +1123,7 @@ llvm::SmallVector operands; operands.push_back({}); // Placeholder for self-reference operands.push_back(domain); - if (Optional description = op.getDescription()) + if (auto description = op.getDescription()) operands.push_back(llvm::MDString::get(ctx, *description)); llvm::MDNode *scope = llvm::MDNode::get(ctx, operands); scope->replaceOperandWith(0, scope); // Self-reference for uniqueness diff --git a/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp b/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp --- a/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp +++ b/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp @@ -79,7 +79,7 @@ int patternIndex = 0; for (pdl::PatternOp pattern : module.getOps()) { // If the pattern has a name, use that. Otherwise, generate a unique name. - if (Optional patternName = pattern.getSymName()) { + if (auto patternName = pattern.getSymName()) { patternNames.insert(patternName->str()); } else { std::string name; diff --git a/mlir/test/lib/Dialect/SPIRV/TestModuleCombiner.cpp b/mlir/test/lib/Dialect/SPIRV/TestModuleCombiner.cpp --- a/mlir/test/lib/Dialect/SPIRV/TestModuleCombiner.cpp +++ b/mlir/test/lib/Dialect/SPIRV/TestModuleCombiner.cpp @@ -39,8 +39,8 @@ auto listener = [](spirv::ModuleOp originalModule, StringRef oldSymbol, StringRef newSymbol) { - llvm::outs() << "[" << originalModule.getName() << "] " << oldSymbol - << " -> " << newSymbol << "\n"; + llvm::outs() << "[" << originalModule.getName().value_or("") << "] " + << oldSymbol << " -> " << newSymbol << "\n"; }; OwningOpRef combinedModule = diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -84,8 +84,8 @@ // Check the contents of the string attribute to see what the test alias // should be named. - Optional aliasName = - StringSwitch>(strAttr.getValue()) + auto aliasName = + StringSwitch>(strAttr.getValue()) .Case("alias_test:dot_in_name", StringRef("test.alias")) .Case("alias_test:trailing_digit", StringRef("test_alias0")) .Case("alias_test:prefixed_digit", StringRef("0_test_alias")) @@ -383,7 +383,7 @@ } ::mlir::LogicalResult FormatInferType2Op::inferReturnTypes( - ::mlir::MLIRContext *context, ::llvm::Optional<::mlir::Location> location, + ::mlir::MLIRContext *context, ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { @@ -424,7 +424,7 @@ return success(); } -Optional +std::optional TestDialect::getParseOperationHook(StringRef opName) const { if (opName == "test.dialect_custom_printer") { return ParseOpHook{[](OpAsmParser &parser, OperationState &state) { @@ -569,7 +569,8 @@ // Parsing static ParseResult parseCustomOptionalOperand( - OpAsmParser &parser, Optional &optOperand) { + OpAsmParser &parser, + std::optional &optOperand) { if (succeeded(parser.parseOptionalLParen())) { optOperand.emplace(); if (parser.parseOperand(*optOperand) || parser.parseRParen()) @@ -580,7 +581,7 @@ static ParseResult parseCustomDirectiveOperands( OpAsmParser &parser, OpAsmParser::UnresolvedOperand &operand, - Optional &optOperand, + std::optional &optOperand, SmallVectorImpl &varOperands) { if (parser.parseOperand(operand)) return failure(); @@ -633,7 +634,7 @@ } static ParseResult parseCustomDirectiveOperandsAndTypes( OpAsmParser &parser, OpAsmParser::UnresolvedOperand &operand, - Optional &optOperand, + std::optional &optOperand, SmallVectorImpl &varOperands, Type &operandType, Type &optOperandType, SmallVectorImpl &varOperandTypes) { @@ -686,7 +687,8 @@ return parser.parseOptionalAttrDict(attrs); } static ParseResult parseCustomDirectiveOptionalOperandRef( - OpAsmParser &parser, Optional &optOperand) { + OpAsmParser &parser, + std::optional &optOperand) { int64_t operandCount = 0; if (parser.parseInteger(operandCount)) return failure(); @@ -1119,7 +1121,7 @@ } LogicalResult OpWithInferTypeInterfaceOp::inferReturnTypes( - MLIRContext *, Optional location, ValueRange operands, + MLIRContext *, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType() != operands[1].getType()) { @@ -1134,7 +1136,7 @@ // TODO: We should be able to only define either inferReturnType or // refineReturnType, currently only refineReturnType can be omitted. LogicalResult OpWithRefineTypeInterfaceOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &returnTypes) { returnTypes.clear(); @@ -1143,7 +1145,7 @@ } LogicalResult OpWithRefineTypeInterfaceOp::refineReturnTypes( - MLIRContext *, Optional location, ValueRange operands, + MLIRContext *, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &returnTypes) { if (operands[0].getType() != operands[1].getType()) { @@ -1162,8 +1164,8 @@ } LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents( - MLIRContext *context, Optional location, ValueShapeRange operands, - DictionaryAttr attributes, RegionRange regions, + MLIRContext *context, std::optional location, + ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { // Create return type consisting of the last element of the first operand. auto operandType = operands.front().getType(); @@ -1171,8 +1173,7 @@ if (!sval) { return emitOptionalError(location, "only shaped type operands allowed"); } - int64_t dim = - sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic; + int64_t dim = sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic; auto type = IntegerType::get(context, 17); inferredReturnShapes.push_back(ShapedTypeComponents({dim}, type)); return success(); @@ -1441,13 +1442,14 @@ parser.getCurrentLocation(), result.operands); } -OperandRange RegionIfOp::getSuccessorEntryOperands(Optional index) { +OperandRange +RegionIfOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index < 2 && "invalid region index"); return getOperands(); } void RegionIfOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // We always branch to the join region. if (index.has_value()) { @@ -1474,7 +1476,7 @@ // AnyCondOp //===----------------------------------------------------------------------===// -void AnyCondOp::getSuccessorRegions(Optional index, +void AnyCondOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // The parent op branches into the only region, and the region branches back diff --git a/mlir/test/lib/Dialect/Test/TestDialect.td b/mlir/test/lib/Dialect/Test/TestDialect.td --- a/mlir/test/lib/Dialect/Test/TestDialect.td +++ b/mlir/test/lib/Dialect/Test/TestDialect.td @@ -31,7 +31,7 @@ void registerTypes(); // Provides a custom printing/parsing for some operations. - ::llvm::Optional + ::std::optional getParseOperationHook(::llvm::StringRef opName) const override; ::llvm::unique_function diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -374,7 +374,7 @@ let extraClassDeclaration = [{ static mlir::LogicalResult inferReturnTypes(mlir::MLIRContext *context, - llvm::Optional<::mlir::Location> location, mlir::ValueRange operands, + std::optional<::mlir::Location> location, mlir::ValueRange operands, mlir::DictionaryAttr attributes, mlir::RegionRange regions, llvm::SmallVectorImpl &inferredReturnTypes) { inferredReturnTypes.assign({mlir::IntegerType::get(context, 16)}); @@ -2347,7 +2347,7 @@ let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { inferredReturnTypes.assign({::mlir::IntegerType::get(context, 16)}); @@ -2370,7 +2370,7 @@ let results = (outs Variadic:$outs); let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { ::mlir::TypeRange operandTypes = operands.getTypes(); @@ -2417,7 +2417,7 @@ let assemblyFormat = "$region attr-dict"; let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { if (regions.empty()) @@ -2438,7 +2438,7 @@ let assemblyFormat = "`(` $a `:` type($a) `)` `(` $b `:` type($b) `)` attr-dict"; let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { FormatInferTypeVariadicOperandsOpAdaptor adaptor(operands, attributes); @@ -2583,7 +2583,7 @@ return getBody(2)->getArguments(); } ::mlir::OperandRange getSuccessorEntryOperands( - ::llvm::Optional index); + ::std::optional index); }]; let hasCustomAssemblyFormat = 1; } @@ -2646,7 +2646,7 @@ let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { inferredReturnTypes.assign({operands[0].getType()}); diff --git a/mlir/test/mlir-tblgen/enums-gen.td b/mlir/test/mlir-tblgen/enums-gen.td --- a/mlir/test/mlir-tblgen/enums-gen.td +++ b/mlir/test/mlir-tblgen/enums-gen.td @@ -27,9 +27,9 @@ // DECL: Bit3 = 8, // DECL: } -// DECL: ::llvm::Optional symbolizeMyBitEnum(uint32_t); +// DECL: ::std::optional symbolizeMyBitEnum(uint32_t); // DECL: std::string stringifyMyBitEnum(MyBitEnum); -// DECL: ::llvm::Optional symbolizeMyBitEnum(::llvm::StringRef); +// DECL: ::std::optional symbolizeMyBitEnum(::llvm::StringRef); // DECL: struct FieldParser<::MyBitEnum, ::MyBitEnum> { // DECL: template @@ -40,7 +40,7 @@ // DECL: if (failed(parser.parseOptionalKeywordOrString(&enumKeyword))) // DECL: return parser.emitError(loc, "expected keyword for An example bit enum"); // DECL: // Symbolize the keyword. -// DECL: if (::llvm::Optional<::MyBitEnum> attr = ::symbolizeEnum<::MyBitEnum>(enumKeyword)) +// DECL: if (::std::optional<::MyBitEnum> attr = ::symbolizeEnum<::MyBitEnum>(enumKeyword)) // DECL: return *attr; // DECL: return parser.emitError(loc, "invalid An example bit enum specification: ") << enumKeyword; // DECL: } @@ -66,7 +66,7 @@ // DEF: if (2u == (2u & val)) // DEF-NEXT: push_back("Bit1") -// DEF-LABEL: ::llvm::Optional symbolizeMyBitEnum(::llvm::StringRef str) +// DEF-LABEL: ::std::optional symbolizeMyBitEnum(::llvm::StringRef str) // DEF: if (str == "None") return MyBitEnum::None; // DEF: .Case("tagged", 1) // DEF: .Case("Bit1", 2) diff --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td --- a/mlir/test/mlir-tblgen/op-attribute.td +++ b/mlir/test/mlir-tblgen/op-attribute.td @@ -110,9 +110,9 @@ // DEF: some-attr-kind AOp::getCAttrAttr() // DEF-NEXT: ::mlir::impl::getAttrFromSortedRange((*this)->getAttrs().begin() + 1, (*this)->getAttrs().end() - 0, getCAttrAttrName()).dyn_cast_or_null() -// DEF: ::llvm::Optional AOp::getCAttr() { +// DEF: ::std::optional AOp::getCAttr() { // DEF-NEXT: auto attr = getCAttrAttr() -// DEF-NEXT: return attr ? ::llvm::Optional(attr.some-convert-from-storage()) : (::std::nullopt); +// DEF-NEXT: return attr ? ::std::optional(attr.some-convert-from-storage()) : (::std::nullopt); // DEF: some-attr-kind AOp::getDAttrAttr() // DEF-NEXT: ::mlir::impl::getAttrFromSortedRange((*this)->getAttrs().begin() + 1, (*this)->getAttrs().end() - 0, getDAttrAttrName()).dyn_cast_or_null() @@ -135,7 +135,7 @@ // DEF-NEXT: (*this)->setAttr(getBAttrAttrName(), some-const-builder-call(::mlir::Builder((*this)->getContext()), attrValue)); // DEF: void AOp::setCAttrAttr(some-attr-kind attr) { // DEF-NEXT: (*this)->setAttr(getCAttrAttrName(), attr); -// DEF: void AOp::setCAttr(::llvm::Optional attrValue) { +// DEF: void AOp::setCAttr(::std::optional attrValue) { // DEF-NEXT: if (attrValue) // DEF-NEXT: return (*this)->setAttr(getCAttrAttrName(), some-const-builder-call(::mlir::Builder((*this)->getContext()), *attrValue)); // DEF-NEXT: (*this)->removeAttr(getCAttrAttrName()); @@ -247,9 +247,9 @@ // DEF: some-attr-kind AgetOp::getCAttrAttr() // DEF-NEXT: return ::mlir::impl::getAttrFromSortedRange({{.*}}).dyn_cast_or_null() -// DEF: ::llvm::Optional AgetOp::getCAttr() { +// DEF: ::std::optional AgetOp::getCAttr() { // DEF-NEXT: auto attr = getCAttrAttr() -// DEF-NEXT: return attr ? ::llvm::Optional(attr.some-convert-from-storage()) : (::std::nullopt); +// DEF-NEXT: return attr ? ::std::optional(attr.some-convert-from-storage()) : (::std::nullopt); // Test setter methods // --- diff --git a/mlir/test/mlir-tblgen/op-decl-and-defs.td b/mlir/test/mlir-tblgen/op-decl-and-defs.td --- a/mlir/test/mlir-tblgen/op-decl-and-defs.td +++ b/mlir/test/mlir-tblgen/op-decl-and-defs.td @@ -61,7 +61,7 @@ // CHECK: ::mlir::IntegerAttr getAttr1Attr(); // CHECK: uint32_t getAttr1(); // CHECK: ::mlir::FloatAttr getAttr2Attr(); -// CHECK: ::llvm::Optional< ::llvm::APFloat > getAttr2(); +// CHECK: ::std::optional< ::llvm::APFloat > getAttr2(); // CHECK: ::mlir::Region &getSomeRegion(); // CHECK: ::mlir::RegionRange getSomeRegions(); // CHECK: private: @@ -88,7 +88,7 @@ // CHECK: ::mlir::IntegerAttr getAttr1Attr() // CHECK: uint32_t getAttr1(); // CHECK: ::mlir::FloatAttr getAttr2Attr() -// CHECK: ::llvm::Optional< ::llvm::APFloat > getAttr2(); +// CHECK: ::std::optional< ::llvm::APFloat > getAttr2(); // CHECK: ::mlir::Attribute removeAttr2Attr(); // CHECK: static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value val); // CHECK: static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, int integer = 0); diff --git a/mlir/test/python/python_test_ops.td b/mlir/test/python/python_test_ops.td --- a/mlir/test/python/python_test_ops.td +++ b/mlir/test/python/python_test_ops.td @@ -71,7 +71,7 @@ let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes( - ::mlir::MLIRContext *context, ::llvm::Optional<::mlir::Location> location, + ::mlir::MLIRContext *context, ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { diff --git a/mlir/tools/mlir-tblgen/DialectGen.cpp b/mlir/tools/mlir-tblgen/DialectGen.cpp --- a/mlir/tools/mlir-tblgen/DialectGen.cpp +++ b/mlir/tools/mlir-tblgen/DialectGen.cpp @@ -18,7 +18,6 @@ #include "mlir/TableGen/Interfaces.h" #include "mlir/TableGen/Operator.h" #include "mlir/TableGen/Trait.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/CommandLine.h" @@ -56,7 +55,8 @@ DialectFilterIterator(records.end(), records.end(), filterFn)}; } -Optional tblgen::findDialectToGenerate(ArrayRef dialects) { +std::optional +tblgen::findDialectToGenerate(ArrayRef dialects) { if (dialects.empty()) { llvm::errs() << "no dialect was found\n"; return std::nullopt; @@ -216,8 +216,7 @@ os << regionResultAttrVerifierDecl; if (dialect.hasOperationInterfaceFallback()) os << operationInterfaceFallbackDecl; - if (llvm::Optional extraDecl = - dialect.getExtraClassDeclaration()) + if (auto extraDecl = dialect.getExtraClassDeclaration()) os << *extraDecl; // End the dialect decl. @@ -237,7 +236,7 @@ return false; SmallVector dialects(dialectDefs.begin(), dialectDefs.end()); - Optional dialect = findDialectToGenerate(dialects); + auto dialect = findDialectToGenerate(dialects); if (!dialect) return true; emitDialectDecl(*dialect, os); @@ -255,7 +254,7 @@ /// initialize(). /// {2}: The dialect parent class. static const char *const dialectConstructorStr = R"( -{0}::{0}(::mlir::MLIRContext *context) +{0}::{0}(::mlir::MLIRContext *context) : ::mlir::{2}(getDialectNamespace(), context, ::mlir::TypeID::get<{0}>()) {{ {1} initialize(); @@ -308,7 +307,7 @@ return false; SmallVector dialects(dialectDefs.begin(), dialectDefs.end()); - Optional dialect = findDialectToGenerate(dialects); + auto dialect = findDialectToGenerate(dialects); if (!dialect) return true; emitDialectDef(*dialect, os); diff --git a/mlir/tools/mlir-tblgen/DialectGenUtilities.h b/mlir/tools/mlir-tblgen/DialectGenUtilities.h --- a/mlir/tools/mlir-tblgen/DialectGenUtilities.h +++ b/mlir/tools/mlir-tblgen/DialectGenUtilities.h @@ -17,7 +17,7 @@ /// Find the dialect selected by the user to generate for. Returns None if no /// dialect was found, or if more than one potential dialect was found. -Optional findDialectToGenerate(ArrayRef dialects); +std::optional findDialectToGenerate(ArrayRef dialects); } // namespace tblgen } // namespace mlir diff --git a/mlir/tools/mlir-tblgen/EnumsGen.cpp b/mlir/tools/mlir-tblgen/EnumsGen.cpp --- a/mlir/tools/mlir-tblgen/EnumsGen.cpp +++ b/mlir/tools/mlir-tblgen/EnumsGen.cpp @@ -97,7 +97,7 @@ return parser.emitError(loc, "expected keyword for {2}"); // Symbolize the keyword. - if (::llvm::Optional<{0}> attr = {1}::symbolizeEnum<{0}>(enumKeyword)) + if (::std::optional<{0}> attr = {1}::symbolizeEnum<{0}>(enumKeyword)) return *attr; return parser.emitError(loc, "invalid {2} specification: ") << enumKeyword; } @@ -228,7 +228,7 @@ // Returns the EnumAttrCase whose value is zero if exists; returns std::nullopt // otherwise. -static llvm::Optional +static std::optional getAllBitsUnsetCase(llvm::ArrayRef cases) { for (auto attrCase : cases) { if (attrCase.getValue() == 0) @@ -382,9 +382,9 @@ StringRef strToSymFnName = enumAttr.getStringToSymbolFnName(); auto enumerants = enumAttr.getAllCases(); - os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef str) {{\n", + os << formatv("::std::optional<{0}> {1}(::llvm::StringRef str) {{\n", enumName, strToSymFnName); - os << formatv(" return ::llvm::StringSwitch<::llvm::Optional<{0}>>(str)\n", + os << formatv(" return ::llvm::StringSwitch<::std::optional<{0}>>(str)\n", enumName); for (const auto &enumerant : enumerants) { auto symbol = enumerant.getSymbol(); @@ -406,7 +406,7 @@ auto enumerants = enumAttr.getAllCases(); auto allBitsUnsetCase = getAllBitsUnsetCase(enumerants); - os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef str) {{\n", + os << formatv("::std::optional<{0}> {1}(::llvm::StringRef str) {{\n", enumName, strToSymFnName); if (allBitsUnsetCase) { @@ -426,7 +426,7 @@ // Convert each symbol to the bit ordinal and set the corresponding bit. os << formatv(" auto bit = " - "llvm::StringSwitch<::llvm::Optional<{0}>>(symbol.trim())\n", + "llvm::StringSwitch<::std::optional<{0}>>(symbol.trim())\n", underlyingType); for (const auto &enumerant : enumerants) { // Skip the special enumerant for None. @@ -457,7 +457,7 @@ })) return; - os << formatv("::llvm::Optional<{0}> {1}({2} value) {{\n", enumName, + os << formatv("::std::optional<{0}> {1}({2} value) {{\n", enumName, underlyingToSymFnName, underlyingType.empty() ? std::string("unsigned") : underlyingType) @@ -540,7 +540,7 @@ auto enumerants = enumAttr.getAllCases(); auto allBitsUnsetCase = getAllBitsUnsetCase(enumerants); - os << formatv("::llvm::Optional<{0}> {1}({2} value) {{\n", enumName, + os << formatv("::std::optional<{0}> {1}({2} value) {{\n", enumName, underlyingToSymFnName, underlyingType); if (allBitsUnsetCase) { os << " // Special case for all bits unset.\n"; @@ -580,11 +580,11 @@ return enumerant.getValue() >= 0; })) { os << formatv( - "::llvm::Optional<{0}> {1}({2});\n", enumName, underlyingToSymFnName, + "::std::optional<{0}> {1}({2});\n", enumName, underlyingToSymFnName, underlyingType.empty() ? std::string("unsigned") : underlyingType); } os << formatv("{2} {1}({0});\n", enumName, symToStrFnName, symToStrFnRetType); - os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef);\n", enumName, + os << formatv("::std::optional<{0}> {1}(::llvm::StringRef);\n", enumName, strToSymFnName); if (enumAttr.isBitEnum()) { @@ -606,10 +606,10 @@ // specified by the user. const char *const symbolizeEnumStr = R"( template -::llvm::Optional symbolizeEnum(::llvm::StringRef); +::std::optional symbolizeEnum(::llvm::StringRef); template <> -inline ::llvm::Optional<{0}> symbolizeEnum<{0}>(::llvm::StringRef str) { +inline ::std::optional<{0}> symbolizeEnum<{0}>(::llvm::StringRef str) { return {1}(str); } )"; diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -216,7 +216,7 @@ /// Whether the attribute is required. bool isRequired; /// The ODS attribute constraint. Not present for implicit attributes. - Optional constraint; + std::optional constraint; /// The number of required attributes less than this attribute. unsigned lowerBound = 0; /// The number of required attributes greater than this attribute. @@ -731,7 +731,7 @@ StringRef varName) { std::string condition = attr.getPredicate().getCondition(); - Optional constraintFn; + std::optional constraintFn; if (emitHelper.isEmittingForOp() && (constraintFn = staticVerifierEmitter.getAttrConstraintFn(attr))) { body << formatv(verifyAttrUnique, *constraintFn, varName, attrName); @@ -1132,7 +1132,7 @@ method = createMethod("bool"); else if (isOptional) method = - createMethod("::llvm::Optional<" + baseAttr.getReturnType() + ">"); + createMethod("::std::optional<" + baseAttr.getReturnType() + ">"); else method = createMethod(attr.getReturnType()); if (!method) @@ -1901,12 +1901,12 @@ for (unsigned i = 0, e = params.size(); i < e; ++i) { // If no name is provided, generate one. - Optional paramName = params[i].getName(); + auto paramName = params[i].getName(); std::string name = paramName ? paramName->str() : "odsArg" + std::to_string(i); StringRef defaultValue; - if (Optional defaultParamValue = params[i].getDefaultValue()) + if (auto defaultParamValue = params[i].getDefaultValue()) defaultValue = *defaultParamValue; arguments.emplace_back(params[i].getCppType(), std::move(name), @@ -1921,7 +1921,7 @@ for (const Builder &builder : op.getBuilders()) { SmallVector arguments = getBuilderSignature(builder); - Optional body = builder.getBody(); + auto body = builder.getBody(); auto properties = body ? Method::Static : Method::StaticDeclaration; auto *method = opClass.addMethod("void", "build", properties, std::move(arguments)); @@ -2960,7 +2960,7 @@ adaptor.addField("::mlir::ValueRange", "odsOperands"); adaptor.addField("::mlir::DictionaryAttr", "odsAttrs"); adaptor.addField("::mlir::RegionRange", "odsRegions"); - adaptor.addField("::llvm::Optional<::mlir::OperationName>", "odsOpName"); + adaptor.addField("::std::optional<::mlir::OperationName>", "odsOpName"); const auto *attrSizedOperands = op.getTrait("::m::OpTrait::AttrSizedOperandSegments"); diff --git a/mlir/tools/mlir-tblgen/OpDocGen.cpp b/mlir/tools/mlir-tblgen/OpDocGen.cpp --- a/mlir/tools/mlir-tblgen/OpDocGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDocGen.cpp @@ -363,7 +363,7 @@ std::vector dialectDefs = recordKeeper.getAllDerivedDefinitionsIfDefined("Dialect"); SmallVector dialects(dialectDefs.begin(), dialectDefs.end()); - Optional dialect = findDialectToGenerate(dialects); + auto dialect = findDialectToGenerate(dialects); if (!dialect) return true; diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -60,8 +60,8 @@ /// Return the constant builder call for the type of this attribute, or None /// if it doesn't have one. llvm::Optional getTypeBuilder() const { - llvm::Optional attrType = var->attr.getValueType(); - return attrType ? attrType->getBuilderCall() : std::nullopt; + auto attrType = var->attr.getValueType(); + return attrType ? attrType->getBuilderCall() : llvm::None; } /// Return if this attribute refers to a UnitAttr. @@ -252,7 +252,7 @@ TypeResolution() = default; /// Get the index into the buildable types for this type, or None. - Optional getBuilderIdx() const { return builderIdx; } + std::optional getBuilderIdx() const { return builderIdx; } void setBuilderIdx(int idx) { builderIdx = idx; } /// Get the variable this type is resolved to, or nullptr. @@ -264,10 +264,10 @@ return resolver.dyn_cast(); } /// Get the transformer for the type of the variable, or None. - Optional getVarTransformer() const { + std::optional getVarTransformer() const { return variableTransformer; } - void setResolver(ConstArgument arg, Optional transformer) { + void setResolver(ConstArgument arg, std::optional transformer) { resolver = arg; variableTransformer = transformer; assert(getVariable() || getAttribute()); @@ -276,13 +276,13 @@ private: /// If the type is resolved with a buildable type, this is the index into /// 'buildableTypes' in the parent format. - Optional builderIdx; + std::optional builderIdx; /// If the type is resolved based upon another operand or result, this is /// the variable or the attribute that this type is resolved to. ConstArgument resolver; /// If the type is resolved based upon another operand or result, this is /// a transformer to apply to the variable when resolving. - Optional variableTransformer; + std::optional variableTransformer; }; /// The context in which an element is generated. @@ -944,7 +944,7 @@ << "OperandsLoc = parser.getCurrentLocation();\n"; if (var->isOptional()) { body << llvm::formatv( - " ::llvm::Optional<::mlir::OpAsmParser::UnresolvedOperand> " + " ::std::optional<::mlir::OpAsmParser::UnresolvedOperand> " "{0}Operand;\n", var->name); } else if (var->isVariadicOfVariadic()) { @@ -973,7 +973,7 @@ body << llvm::formatv( " {0} {1}Operand = {1}Operands.empty() ? {0}() : " "{1}Operands[0];\n", - "::llvm::Optional<::mlir::OpAsmParser::UnresolvedOperand>", + "::std::optional<::mlir::OpAsmParser::UnresolvedOperand>", operand->getVar()->name); } else if (auto *type = dyn_cast(input)) { @@ -1102,7 +1102,7 @@ // If this attribute has a buildable type, use that when parsing the // attribute. std::string attrTypeStr; - if (Optional typeBuilder = attr->getTypeBuilder()) { + if (auto typeBuilder = attr->getTypeBuilder()) { llvm::raw_string_ostream os(attrTypeStr); os << tgfmt(*typeBuilder, &attrTypeCtx); } else { @@ -1372,7 +1372,7 @@ FmtContext verifierFCtx; for (TypeResolution &resolver : llvm::concat(resultTypes, operandTypes)) { - Optional transformer = resolver.getVarTransformer(); + auto transformer = resolver.getVarTransformer(); if (!transformer) continue; // Ensure that we don't verify the same variables twice. @@ -1405,10 +1405,10 @@ // Emit the code necessary for a type resolver. auto emitTypeResolver = [&](TypeResolution &resolver, StringRef curVar) { - if (Optional val = resolver.getBuilderIdx()) { + if (auto val = resolver.getBuilderIdx()) { body << "odsBuildableType" << *val; } else if (const NamedTypeConstraint *var = resolver.getVariable()) { - if (Optional tform = resolver.getVarTransformer()) { + if (auto tform = resolver.getVarTransformer()) { FmtContext fmtContext; fmtContext.addSubst("_ctxt", "parser.getContext()"); if (var->isVariadic()) @@ -1422,7 +1422,7 @@ body << "[0]"; } } else if (const NamedAttribute *attr = resolver.getAttribute()) { - if (Optional tform = resolver.getVarTransformer()) + if (auto tform = resolver.getVarTransformer()) body << tgfmt(*tform, &FmtContext().withSelf(attr->name + "Attr.getType()")); else @@ -2246,7 +2246,7 @@ /// properly resolve the type of a variable. struct TypeResolutionInstance { ConstArgument resolver; - Optional transformer; + std::optional transformer; }; /// Verify the state of operation attributes within the format. @@ -2472,7 +2472,7 @@ return success(); } // Return None to indicate that we reached the end. - return std::nullopt; + return llvm::None; } /// For the given elements, check whether any attributes are followed by a colon @@ -2751,9 +2751,9 @@ // Mark this value as the type resolver for the other variables. for (unsigned j = 0; j != i; ++j) - variableTyResolver[values[j]] = {arg, std::nullopt}; + variableTyResolver[values[j]] = {arg, llvm::None}; for (unsigned j = i + 1; j != e; ++j) - variableTyResolver[values[j]] = {arg, std::nullopt}; + variableTyResolver[values[j]] = {arg, llvm::None}; } } @@ -2774,11 +2774,11 @@ // Set the resolvers for each operand and result. for (unsigned i = 0, e = op.getNumOperands(); i != e; ++i) if (!seenOperandTypes.test(i)) - variableTyResolver[op.getOperand(i).name] = {resolver, std::nullopt}; + variableTyResolver[op.getOperand(i).name] = {resolver, llvm::None}; if (includeResults) { for (unsigned i = 0, e = op.getNumResults(); i != e; ++i) if (!seenResultTypes.test(i)) - variableTyResolver[op.getResultName(i)] = {resolver, std::nullopt}; + variableTyResolver[op.getResultName(i)] = {resolver, llvm::None}; } } diff --git a/mlir/tools/mlir-tblgen/OpInterfacesGen.cpp b/mlir/tools/mlir-tblgen/OpInterfacesGen.cpp --- a/mlir/tools/mlir-tblgen/OpInterfacesGen.cpp +++ b/mlir/tools/mlir-tblgen/OpInterfacesGen.cpp @@ -160,7 +160,7 @@ static void emitInterfaceMethodDoc(const InterfaceMethod &method, raw_ostream &os, StringRef prefix = "") { - if (Optional description = method.getDescription()) + if (auto description = method.getDescription()) tblgen::emitDescriptionComment(*description, os, prefix); } @@ -305,7 +305,7 @@ os << " {\n "; // Check for a provided body to the function. - if (Optional body = method.getBody()) { + if (auto body = method.getBody()) { if (method.isStatic()) os << body->trim(); else @@ -494,10 +494,9 @@ } // Emit any extra declarations. - if (Optional extraDecls = interface.getExtraClassDeclaration()) + if (auto extraDecls = interface.getExtraClassDeclaration()) os << *extraDecls << "\n"; - if (Optional extraDecls = - interface.getExtraSharedClassDeclaration()) + if (auto extraDecls = interface.getExtraSharedClassDeclaration()) os << tblgen::tgfmt(*extraDecls, &extraDeclsFmt); os << "};\n"; diff --git a/mlir/tools/mlir-tblgen/RewriterGen.cpp b/mlir/tools/mlir-tblgen/RewriterGen.cpp --- a/mlir/tools/mlir-tblgen/RewriterGen.cpp +++ b/mlir/tools/mlir-tblgen/RewriterGen.cpp @@ -1743,7 +1743,7 @@ StringRef StaticMatcherHelper::getVerifierName(DagLeaf leaf) { if (leaf.isAttrMatcher()) { - Optional constraint = + auto constraint = staticVerifierEmitter.getAttrConstraintFn(leaf.getAsConstraint()); assert(constraint && "attribute constraint was not uniqued"); return *constraint; diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp --- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -346,7 +346,7 @@ for (const auto &classCasePair : classCaseMap) { Availability avail = classCasePair.getValue().front().second; - os << formatv("llvm::Optional<{0}> {1}({2} value) {{\n", + os << formatv("std::optional<{0}> {1}({2} value) {{\n", avail.getMergeInstanceType(), avail.getQueryFnName(), enumName); @@ -388,7 +388,7 @@ for (const auto &classCasePair : classCaseMap) { Availability avail = classCasePair.getValue().front().second; - os << formatv("llvm::Optional<{0}> {1}({2} value) {{\n", + os << formatv("std::optional<{0}> {1}({2} value) {{\n", avail.getMergeInstanceType(), avail.getQueryFnName(), enumName); @@ -433,7 +433,7 @@ StringRef className = avail.getClass(); if (handledClasses.count(className)) continue; - os << formatv("llvm::Optional<{0}> {1}({2} value);\n", + os << formatv("std::optional<{0}> {1}({2} value);\n", avail.getMergeInstanceType(), avail.getQueryFnName(), enumName); handledClasses.insert(className); diff --git a/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp b/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp --- a/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp +++ b/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp @@ -37,7 +37,7 @@ } // Regions have no successors. - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) {} }; @@ -52,7 +52,7 @@ static StringRef getOperationName() { return "cftest.loop_regions_op"; } - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index) { @@ -76,7 +76,7 @@ return "cftest.double_loop_regions_op"; } - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index.has_value()) { @@ -95,7 +95,7 @@ static StringRef getOperationName() { return "cftest.sequential_regions_op"; } // Region 0 has Region 1 as a successor. - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index == 0u) { diff --git a/mlir/unittests/TableGen/EnumsGenTest.cpp b/mlir/unittests/TableGen/EnumsGenTest.cpp --- a/mlir/unittests/TableGen/EnumsGenTest.cpp +++ b/mlir/unittests/TableGen/EnumsGenTest.cpp @@ -54,8 +54,8 @@ } TEST(EnumsGenTest, GeneratedStringToSymbolFn) { - EXPECT_EQ(llvm::Optional(FooEnum::CaseA), ConvertToEnum("CaseA")); - EXPECT_EQ(llvm::Optional(FooEnum::CaseB), ConvertToEnum("CaseB")); + EXPECT_EQ(std::optional(FooEnum::CaseA), ConvertToEnum("CaseA")); + EXPECT_EQ(std::optional(FooEnum::CaseB), ConvertToEnum("CaseB")); EXPECT_EQ(std::nullopt, ConvertToEnum("X")); }