diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td --- a/flang/include/flang/Optimizer/Dialect/FIROps.td +++ b/flang/include/flang/Optimizer/Dialect/FIROps.td @@ -483,15 +483,15 @@ // The number of blocks that may be branched to unsigned getNumDest() { return (*this)->getNumSuccessors(); } - llvm::Optional getCompareOperands(unsigned cond); - llvm::Optional> getCompareOperands( + std::optional getCompareOperands(unsigned cond); + std::optional> getCompareOperands( llvm::ArrayRef operands, unsigned cond); - llvm::Optional getCompareOperands( + std::optional getCompareOperands( mlir::ValueRange operands, unsigned cond); - llvm::Optional> getSuccessorOperands( + std::optional> getSuccessorOperands( llvm::ArrayRef operands, unsigned cond); - llvm::Optional getSuccessorOperands( + std::optional getSuccessorOperands( mlir::ValueRange operands, unsigned cond); // Helper function to deal with Optional operand forms @@ -2426,16 +2426,16 @@ let builders = [ OpBuilder<(ins "fir::CharacterType":$inType, "llvm::StringRef":$value, - CArg<"llvm::Optional", "{}">:$len)>, + CArg<"std::optional", "{}">:$len)>, OpBuilder<(ins "fir::CharacterType":$inType, "llvm::ArrayRef":$xlist, - CArg<"llvm::Optional", "{}">:$len)>, + CArg<"std::optional", "{}">:$len)>, OpBuilder<(ins "fir::CharacterType":$inType, "llvm::ArrayRef":$xlist, - CArg<"llvm::Optional", "{}">:$len)>, + CArg<"std::optional", "{}">:$len)>, OpBuilder<(ins "fir::CharacterType":$inType, "llvm::ArrayRef":$xlist, - CArg<"llvm::Optional", "{}">:$len)>]; + CArg<"std::optional", "{}">:$len)>]; let extraClassDeclaration = [{ static constexpr const char *size() { return "size"; } diff --git a/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td b/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td --- a/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td +++ b/flang/include/flang/Optimizer/Dialect/FortranVariableInterface.td @@ -36,7 +36,7 @@ >, InterfaceMethod< /*desc=*/"Get Fortran attributes", - /*retTy=*/"llvm::Optional", + /*retTy=*/"std::optional", /*methodName=*/"getFortranAttrs", /*args=*/(ins), /*methodBody=*/[{}], @@ -91,7 +91,7 @@ } /// Return the rank of the entity if it is known at compile time. - llvm::Optional getRank() { + std::optional getRank() { if (auto sequenceType = getElementOrSequenceType().dyn_cast()) { if (sequenceType.hasUnknownShape()) diff --git a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td --- a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td +++ b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td @@ -202,7 +202,7 @@ "llvm::StringRef":$component, "mlir::Value":$component_shape, "llvm::ArrayRef>>":$subscripts, CArg<"mlir::ValueRange", "{}">:$substring, - CArg<"llvm::Optional", "{}">:$complex_part, + CArg<"std::optional", "{}">:$complex_part, CArg<"mlir::Value", "{}">:$shape, CArg<"mlir::ValueRange", "{}">:$typeparams, CArg<"fir::FortranVariableFlagsAttr", "{}">:$fortran_attrs)>, diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp --- a/flang/lib/Frontend/FrontendActions.cpp +++ b/flang/lib/Frontend/FrontendActions.cpp @@ -552,7 +552,7 @@ } // Translate to LLVM IR - llvm::Optional moduleName = mlirModule->getName(); + std::optional moduleName = mlirModule->getName(); llvmModule = mlir::translateModuleToLLVMIR( *mlirModule, *llvmCtx, moduleName ? *moduleName : "FIRModule"); diff --git a/flang/lib/Lower/ConvertExprToHLFIR.cpp b/flang/lib/Lower/ConvertExprToHLFIR.cpp --- a/flang/lib/Lower/ConvertExprToHLFIR.cpp +++ b/flang/lib/Lower/ConvertExprToHLFIR.cpp @@ -99,7 +99,7 @@ else resultType = fir::ReferenceType::get(resultValueType); - llvm::Optional complexPart; + std::optional complexPart; llvm::SmallVector substring; auto designate = getBuilder().create( getLoc(), resultType, partInfo.base.getBase(), "", diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -1618,7 +1618,7 @@ mlir::Value base, mlir::Value outerOffset, mlir::ValueRange cstInteriorIndices, mlir::ValueRange componentIndices, - llvm::Optional substringOffset) const { + std::optional substringOffset) const { llvm::SmallVector gepArgs{outerOffset}; mlir::Type resultTy = base.getType().cast().getElementType(); @@ -1907,7 +1907,7 @@ if (hasSlice || hasSubcomp || hasSubstr) { // Shift the base address. llvm::SmallVector fieldIndices; - llvm::Optional substringOffset; + std::optional substringOffset; if (hasSubcomp) getSubcomponentIndices(xbox, xbox.getMemref(), operands, fieldIndices); if (hasSubstr) @@ -2047,7 +2047,7 @@ base = rewriter.create(loc, llvmElePtrTy, base); llvm::SmallVector fieldIndices; - llvm::Optional substringOffset; + std::optional substringOffset; if (!rebox.getSubcomponent().empty()) getSubcomponentIndices(rebox, rebox.getBox(), operands, fieldIndices); if (!rebox.getSubstr().empty()) @@ -2725,7 +2725,7 @@ if (hasKnownShape && hasSubdimension) { offs.push_back(0); } - llvm::Optional dims; + std::optional dims; llvm::SmallVector arrIdx; for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { mlir::Value nxtOpnd = operands[i]; @@ -2930,7 +2930,7 @@ // TODO: String comparaison should be avoided. Replace linkName with an // enumeration. mlir::LLVM::Linkage - convertLinkage(llvm::Optional optLinkage) const { + convertLinkage(std::optional optLinkage) const { if (optLinkage) { auto name = *optLinkage; if (name == "internal") @@ -3002,7 +3002,7 @@ }; static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, - llvm::Optional destOps, + std::optional destOps, mlir::ConversionPatternRewriter &rewriter, mlir::Block *newBlock) { if (destOps) @@ -3013,7 +3013,7 @@ } template -static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional destOps, +static void genBrOp(A caseOp, mlir::Block *dest, std::optional destOps, mlir::ConversionPatternRewriter &rewriter) { if (destOps) rewriter.replaceOpWithNewOp(caseOp, *destOps, dest); @@ -3023,7 +3023,7 @@ static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, - llvm::Optional destOps, + std::optional destOps, mlir::ConversionPatternRewriter &rewriter) { auto *thisBlock = rewriter.getInsertionBlock(); auto *newBlock = createBlock(rewriter, dest); @@ -3069,9 +3069,9 @@ auto loc = caseOp.getLoc(); for (unsigned t = 0; t != conds; ++t) { mlir::Block *dest = caseOp.getSuccessor(t); - llvm::Optional destOps = + std::optional destOps = caseOp.getSuccessorOperands(adaptor.getOperands(), t); - llvm::Optional cmpOps = + std::optional cmpOps = *caseOp.getCompareOperands(adaptor.getOperands(), t); mlir::Value caseArg = *(cmpOps.value().begin()); mlir::Attribute attr = cases[t]; diff --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp --- a/flang/lib/Optimizer/Dialect/FIROps.cpp +++ b/flang/lib/Optimizer/Dialect/FIROps.cpp @@ -2529,11 +2529,11 @@ mlir::MutableOperandRange::OperandSegment(pos, targetOffsetAttr)); } -llvm::Optional fir::SelectOp::getCompareOperands(unsigned) { +std::optional fir::SelectOp::getCompareOperands(unsigned) { return {}; } -llvm::Optional> +std::optional> fir::SelectOp::getCompareOperands(llvm::ArrayRef, unsigned) { return {}; } @@ -2543,7 +2543,7 @@ oper, getTargetArgsMutable(), getTargetOffsetAttr())); } -llvm::Optional> +std::optional> fir::SelectOp::getSuccessorOperands(llvm::ArrayRef operands, unsigned oper) { auto a = @@ -2553,7 +2553,7 @@ return {getSubOperands(oper, getSubOperands(2, operands, segments), a)}; } -llvm::Optional +std::optional fir::SelectOp::getSuccessorOperands(mlir::ValueRange operands, unsigned oper) { auto a = (*this)->getAttrOfType(getTargetOffsetAttr()); @@ -2572,14 +2572,14 @@ // SelectCaseOp //===----------------------------------------------------------------------===// -llvm::Optional +std::optional fir::SelectCaseOp::getCompareOperands(unsigned cond) { auto a = (*this)->getAttrOfType(getCompareOffsetAttr()); return {getSubOperands(cond, getCompareArgs(), a)}; } -llvm::Optional> +std::optional> fir::SelectCaseOp::getCompareOperands(llvm::ArrayRef operands, unsigned cond) { auto a = @@ -2589,7 +2589,7 @@ return {getSubOperands(cond, getSubOperands(1, operands, segments), a)}; } -llvm::Optional +std::optional fir::SelectCaseOp::getCompareOperands(mlir::ValueRange operands, unsigned cond) { auto a = @@ -2604,7 +2604,7 @@ oper, getTargetArgsMutable(), getTargetOffsetAttr())); } -llvm::Optional> +std::optional> fir::SelectCaseOp::getSuccessorOperands(llvm::ArrayRef operands, unsigned oper) { auto a = @@ -2614,7 +2614,7 @@ return {getSubOperands(oper, getSubOperands(2, operands, segments), a)}; } -llvm::Optional +std::optional fir::SelectCaseOp::getSuccessorOperands(mlir::ValueRange operands, unsigned oper) { auto a = @@ -2864,12 +2864,12 @@ printIntegralSwitchTerminator(*this, p); } -llvm::Optional +std::optional fir::SelectRankOp::getCompareOperands(unsigned) { return {}; } -llvm::Optional> +std::optional> fir::SelectRankOp::getCompareOperands(llvm::ArrayRef, unsigned) { return {}; } @@ -2879,7 +2879,7 @@ oper, getTargetArgsMutable(), getTargetOffsetAttr())); } -llvm::Optional> +std::optional> fir::SelectRankOp::getSuccessorOperands(llvm::ArrayRef operands, unsigned oper) { auto a = @@ -2889,7 +2889,7 @@ return {getSubOperands(oper, getSubOperands(2, operands, segments), a)}; } -llvm::Optional +std::optional fir::SelectRankOp::getSuccessorOperands(mlir::ValueRange operands, unsigned oper) { auto a = @@ -2909,12 +2909,12 @@ // SelectTypeOp //===----------------------------------------------------------------------===// -llvm::Optional +std::optional fir::SelectTypeOp::getCompareOperands(unsigned) { return {}; } -llvm::Optional> +std::optional> fir::SelectTypeOp::getCompareOperands(llvm::ArrayRef, unsigned) { return {}; } @@ -2924,7 +2924,7 @@ oper, getTargetArgsMutable(), getTargetOffsetAttr())); } -llvm::Optional> +std::optional> fir::SelectTypeOp::getSuccessorOperands(llvm::ArrayRef operands, unsigned oper) { auto a = @@ -2934,7 +2934,7 @@ return {getSubOperands(oper, getSubOperands(2, operands, segments), a)}; } -llvm::Optional +std::optional fir::SelectTypeOp::getSuccessorOperands(mlir::ValueRange operands, unsigned oper) { auto a = @@ -3225,7 +3225,7 @@ void fir::StringLitOp::build(mlir::OpBuilder &builder, mlir::OperationState &result, fir::CharacterType inType, llvm::StringRef val, - llvm::Optional len) { + std::optional len) { auto valAttr = builder.getNamedAttr(value(), builder.getStringAttr(val)); int64_t length = len ? *len : inType.getLen(); auto lenAttr = mkNamedIntegerAttr(builder, size(), length); @@ -3247,7 +3247,7 @@ mlir::OperationState &result, fir::CharacterType inType, llvm::ArrayRef vlist, - llvm::Optional len) { + std::optional len) { auto valAttr = builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist)); std::int64_t length = len ? *len : inType.getLen(); @@ -3260,7 +3260,7 @@ mlir::OperationState &result, fir::CharacterType inType, llvm::ArrayRef vlist, - llvm::Optional len) { + std::optional len) { auto valAttr = builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist)); std::int64_t length = len ? *len : inType.getLen(); @@ -3273,7 +3273,7 @@ mlir::OperationState &result, fir::CharacterType inType, llvm::ArrayRef vlist, - llvm::Optional len) { + std::optional len) { auto valAttr = builder.getNamedAttr(xlist(), convertToArrayAttr(builder, vlist)); std::int64_t length = len ? *len : inType.getLen(); diff --git a/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp b/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp --- a/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp +++ b/flang/lib/Optimizer/Dialect/FortranVariableInterface.cpp @@ -57,7 +57,7 @@ shapeRank = shape.getType().cast().getRank(); } - llvm::Optional rank = getRank(); + std::optional rank = getRank(); if (!rank || *rank != shapeRank) return emitOpError("has conflicting shape and base operand ranks"); } else if (!sourceIsBox) { diff --git a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp --- a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp +++ b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp @@ -87,7 +87,7 @@ mlir::OpBuilder &builder, mlir::OperationState &result, mlir::Type result_type, mlir::Value memref, llvm::StringRef component, mlir::Value component_shape, llvm::ArrayRef subscripts, - mlir::ValueRange substring, llvm::Optional complex_part, + mlir::ValueRange substring, std::optional complex_part, mlir::Value shape, mlir::ValueRange typeparams, fir::FortranVariableFlagsAttr fortran_attrs) { auto componentAttr = diff --git a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp --- a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp +++ b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp @@ -397,7 +397,7 @@ for (unsigned idx : orderedTypeGuards) { auto *dest = selectType.getSuccessor(idx); - llvm::Optional destOps = + std::optional destOps = selectType.getSuccessorOperands(operands, idx); if (typeGuards[idx].dyn_cast()) rewriter.replaceOpWithNewOp(selectType, dest); @@ -470,12 +470,13 @@ return 0; } - mlir::LogicalResult - genTypeLadderStep(mlir::Location loc, mlir::Value selector, - mlir::Attribute attr, mlir::Block *dest, - llvm::Optional destOps, - mlir::ModuleOp mod, mlir::PatternRewriter &rewriter, - fir::KindMapping &kindMap) const { + mlir::LogicalResult genTypeLadderStep(mlir::Location loc, + mlir::Value selector, + mlir::Attribute attr, mlir::Block *dest, + std::optional destOps, + mlir::ModuleOp mod, + mlir::PatternRewriter &rewriter, + fir::KindMapping &kindMap) const { mlir::Value cmp; // TYPE IS type guard comparison are all done inlined. if (auto a = attr.dyn_cast()) { diff --git a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp --- a/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp +++ b/flang/lib/Optimizer/Transforms/MemoryAllocation.cpp @@ -116,7 +116,7 @@ auto loc = alloca.getLoc(); mlir::Type varTy = alloca.getInType(); auto unpackName = - [](llvm::Optional opt) -> llvm::StringRef { + [](std::optional opt) -> llvm::StringRef { if (opt) return *opt; return {}; diff --git a/mlir/docs/DefiningDialects/Operations.md b/mlir/docs/DefiningDialects/Operations.md --- a/mlir/docs/DefiningDialects/Operations.md +++ b/mlir/docs/DefiningDialects/Operations.md @@ -1341,9 +1341,9 @@ Case20 = 20, }; -llvm::Optional symbolizeMyIntEnum(uint32_t); +std::optional symbolizeMyIntEnum(uint32_t); llvm::StringRef ConvertToString(MyIntEnum); -llvm::Optional ConvertToEnum(llvm::StringRef); +std::optional ConvertToEnum(llvm::StringRef); inline constexpr unsigned getMaxEnumValForMyIntEnum() { return 20; } @@ -1387,13 +1387,13 @@ return ""; } -llvm::Optional ConvertToEnum(llvm::StringRef str) { - return llvm::StringSwitch>(str) +std::optional ConvertToEnum(llvm::StringRef str) { + return llvm::StringSwitch>(str) .Case("Case15", MyIntEnum::Case15) .Case("Case20", MyIntEnum::Case20) .Default(std::nullopt); } -llvm::Optional symbolizeMyIntEnum(uint32_t value) { +std::optional symbolizeMyIntEnum(uint32_t value) { switch (value) { case 15: return MyIntEnum::Case15; case 20: return MyIntEnum::Case20; @@ -1430,9 +1430,9 @@ Bit3 = 8, }; -llvm::Optional symbolizeMyBitEnum(uint32_t); +std::optional symbolizeMyBitEnum(uint32_t); std::string stringifyMyBitEnum(MyBitEnum); -llvm::Optional symbolizeMyBitEnum(llvm::StringRef); +std::optional symbolizeMyBitEnum(llvm::StringRef); inline constexpr MyBitEnum operator|(MyBitEnum a, MyBitEnum b) { return static_cast(static_cast(a) | static_cast(b)); @@ -1462,10 +1462,10 @@ } template -::llvm::Optional symbolizeEnum(::llvm::StringRef); +::std::optional symbolizeEnum(::llvm::StringRef); template <> -inline ::llvm::Optional symbolizeEnum(::llvm::StringRef str) { +inline ::std::optional symbolizeEnum(::llvm::StringRef str) { return symbolizeMyBitEnum(str); } @@ -1506,7 +1506,7 @@ return llvm::join(strs, "|"); } -llvm::Optional symbolizeMyBitEnum(llvm::StringRef str) { +std::optional symbolizeMyBitEnum(llvm::StringRef str) { // Special case for all bits unset. if (str == "None") return MyBitEnum::None; @@ -1515,7 +1515,7 @@ uint32_t val = 0; for (auto symbol : symbols) { - auto bit = llvm::StringSwitch>(symbol) + auto bit = llvm::StringSwitch>(symbol) .Case("tagged", 1) .Case("Bit1", 2) .Case("Bit2", 4) @@ -1526,7 +1526,7 @@ return static_cast(val); } -llvm::Optional symbolizeMyBitEnum(uint32_t value) { +std::optional symbolizeMyBitEnum(uint32_t value) { // Special case for all bits unset. if (value == 0) return MyBitEnum::None; diff --git a/mlir/docs/Dialects/Linalg/_index.md b/mlir/docs/Dialects/Linalg/_index.md --- a/mlir/docs/Dialects/Linalg/_index.md +++ b/mlir/docs/Dialects/Linalg/_index.md @@ -631,14 +631,14 @@ When `mlir-linalg-ods-gen -gen-impl=1` is called, the following C++ is produced: ``` -llvm::Optional> batchmatmul::referenceIterators() { +std::optional> batchmatmul::referenceIterators() { return SmallVector{ getParallelIteratorTypeName(), getParallelIteratorTypeName(), getParallelIteratorTypeName(), getReductionIteratorTypeName() }; } -llvm::Optional> batchmatmul::referenceIndexingMaps() { +std::optional> batchmatmul::referenceIndexingMaps() { MLIRContext *context = getContext(); AffineExpr d0, d1, d2, d3; bindDims(context, d0, d1, d2, d3); diff --git a/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h b/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h --- a/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h +++ b/mlir/include/mlir/Analysis/DataFlow/SparseAnalysis.h @@ -243,7 +243,7 @@ /// regions or the parent operation itself, and set either the argument or /// parent result lattices. void visitRegionSuccessors(ProgramPoint point, RegionBranchOpInterface branch, - Optional successorIndex, + std::optional successorIndex, ArrayRef lattices); }; diff --git a/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h b/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h --- a/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h +++ b/mlir/include/mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h @@ -61,7 +61,7 @@ int64_t alignedAllocationGetAlignment(ConversionPatternRewriter &rewriter, Location loc, OpType op, const DataLayout *defaultLayout) const { - if (Optional alignment = op.getAlignment()) + if (std::optional alignment = op.getAlignment()) return *alignment; // Whenever we don't have alignment set, we will use an alignment diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td @@ -708,7 +708,7 @@ unsigned getNumDims(); /// Get ranges as constants, may fail in dynamic case. - Optional> getConstantRanges(); + std::optional> getConstantRanges(); Block *getBody(); OpBuilder getBodyBuilder(); diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td --- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td +++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td @@ -268,7 +268,7 @@ let hasCanonicalizer = 1; let extraClassDeclaration = [{ - Optional> getShapeForUnroll(); + std::optional> getShapeForUnroll(); }]; } @@ -330,7 +330,7 @@ let hasCanonicalizer = 1; let extraClassDeclaration = [{ - Optional> getShapeForUnroll(); + std::optional> getShapeForUnroll(); }]; } @@ -373,7 +373,7 @@ let hasCanonicalizer = 1; let extraClassDeclaration = [{ - Optional> getShapeForUnroll(); + std::optional> getShapeForUnroll(); }]; } diff --git a/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h b/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h --- a/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h +++ b/mlir/include/mlir/Dialect/GPU/TransformOps/GPUTransformOps.h @@ -43,7 +43,7 @@ DiagnosedSilenceableFailure mapNestedForeachToThreadsImpl( RewriterBase &rewriter, Operation *target, const SmallVectorImpl &blockDim, bool syncAfterDistribute, - llvm::Optional transformOp, + std::optional transformOp, const ArrayRef &threadMappingAttributes); /// Maps the top level `scf.foreach_thread` op to GPU Thread Blocks. Mapping is diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -990,8 +990,8 @@ let extraClassDeclaration = !strconcat([{ static llvm::Intrinsic::ID getIntrinsicID( int64_t m, int64_t n, uint64_t k, - llvm::Optional b1Op, - llvm::Optional sat, + std::optional b1Op, + std::optional sat, mlir::NVVM::MMALayout layoutAEnum, mlir::NVVM::MMALayout layoutBEnum, mlir::NVVM::MMATypes eltypeAEnum, mlir::NVVM::MMATypes eltypeBEnum, mlir::NVVM::MMATypes eltypeCEnum, mlir::NVVM::MMATypes eltypeDEnum) { @@ -1006,7 +1006,7 @@ return 0; } - static Optional inferOperandMMAType(Type operandElType, + static std::optional inferOperandMMAType(Type operandElType, bool isAccumulator); MMATypes accumPtxType(); @@ -1016,10 +1016,10 @@ let builders = [ OpBuilder<(ins "Type":$resultType, "ValueRange":$operandA, "ValueRange":$operandB, "ValueRange":$operandC, - "ArrayRef":$shape, "Optional":$b1Op, - "Optional":$intOverflow, - "Optional>":$multiplicandPtxTypes, - "Optional>":$multiplicandLayouts)> + "ArrayRef":$shape, "std::optional":$b1Op, + "std::optional":$intOverflow, + "std::optional>":$multiplicandPtxTypes, + "std::optional>":$multiplicandLayouts)> ]; string llvmBuilder = [{ diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -47,7 +47,7 @@ } void getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // Op has a region, but conceptually the control flow does not enter the // region. diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.h @@ -48,7 +48,7 @@ RewriterBase &rewriter, transform::TransformState &state, TransformOpInterface transformOp, ArrayRef targets, ArrayRef mixedNumThreads, - ArrayRef mixedTileSizes, Optional mapping, + ArrayRef mixedTileSizes, std::optional mapping, SmallVector &tileOps, SmallVector &tiledOps); } // namespace transform diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -244,7 +244,7 @@ /// smallest constant value for the size of the buffer needed for each /// dimension. If that is not possible, contains the dynamic size of the /// subview. The call back should return the buffer to use. -using AllocBufferCallbackFn = std::function( +using AllocBufferCallbackFn = std::function( OpBuilder &b, memref::SubViewOp subView, ArrayRef boundingSubViewSize, DataLayout &layout)>; @@ -262,7 +262,7 @@ struct LinalgPromotionOptions { /// Indices of subViews to promote. If `None`, try to promote all operands. - Optional> operandsToPromote = std::nullopt; + std::optional> operandsToPromote = std::nullopt; LinalgPromotionOptions &setOperandsToPromote(ArrayRef operands) { operandsToPromote = DenseSet(); operandsToPromote->insert(operands.begin(), operands.end()); @@ -273,7 +273,7 @@ /// Otherwise the partial view will be used. The decision is defaulted to /// `useFullTileBuffersDefault` when `useFullTileBuffers` is None and for /// operands missing from `useFullTileBuffers`. - Optional useFullTileBuffers = std::nullopt; + std::optional useFullTileBuffers = std::nullopt; LinalgPromotionOptions &setUseFullTileBuffers(ArrayRef useFullTiles) { unsigned size = useFullTiles.size(); llvm::SmallBitVector tmp(size, false); @@ -290,7 +290,7 @@ return *this; } /// Alignment of promoted buffer. If `None` do not specify alignment. - Optional alignment = std::nullopt; + std::optional alignment = std::nullopt; LinalgPromotionOptions &setAlignment(unsigned align) { alignment = align; return *this; @@ -304,8 +304,8 @@ /// Callback function to do the allocation of the promoted buffer. If None, /// then the default allocation scheme of allocating a memref buffer /// followed by a view operation is used. - Optional allocationFn = std::nullopt; - Optional deallocationFn = std::nullopt; + std::optional allocationFn = std::nullopt; + std::optional deallocationFn = std::nullopt; LinalgPromotionOptions & setAllocationDeallocationFns(AllocBufferCallbackFn const &allocFn, DeallocBufferCallbackFn const &deallocFn) { @@ -315,8 +315,8 @@ } /// Callback function to do the copy of data to and from the promoted /// subview. If None then a memref.copy is used. - Optional copyInFn = std::nullopt; - Optional copyOutFn = std::nullopt; + std::optional copyInFn = std::nullopt; + std::optional copyOutFn = std::nullopt; LinalgPromotionOptions &setCopyInOutFns(CopyCallbackFn const ©In, CopyCallbackFn const ©Out) { copyInFn = copyIn; @@ -469,14 +469,14 @@ FailureOr tileToForeachThreadOp(RewriterBase &builder, TilingInterface op, ArrayRef numThreads, - Optional mapping); + std::optional mapping); /// Same as `tileToForeachThreadOp`, but calculate the number of threads /// required using the given tileSizes. FailureOr tileToForeachThreadOpUsingTileSizes(RewriterBase &builder, TilingInterface op, ArrayRef tileSizes, - Optional mapping); + std::optional mapping); /// Transformation information returned after reduction tiling. struct ForeachThreadReductionTilingResult { @@ -514,11 +514,10 @@ /// %6 = linalg.generic %1 ["parallel", "reduction"] /// : tensor<7x4xf32> -> tensor<7xf32> /// ``` -FailureOr -tileReductionUsingForeachThread(RewriterBase &b, PartialReductionOpInterface op, - ArrayRef numThreads, - ArrayRef tileSizes = {}, - Optional mapping = std::nullopt); +FailureOr tileReductionUsingForeachThread( + RewriterBase &b, PartialReductionOpInterface op, + ArrayRef numThreads, ArrayRef tileSizes = {}, + std::optional mapping = std::nullopt); /// All indices returned by IndexOp should be invariant with respect to /// tiling. Therefore, if an operation is tiled, we have to transform the @@ -623,7 +622,7 @@ SmallVector tileInterchange; /// When specified, specifies distribution of generated tile loops to /// processors. - Optional tileDistribution = std::nullopt; + std::optional tileDistribution = std::nullopt; LinalgTilingAndFusionOptions & setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) { tileDistribution = std::move(distributionOptions); @@ -676,7 +675,7 @@ /// When specified, specifies distribution of generated tile loops to /// processors. - Optional distribution = std::nullopt; + std::optional distribution = std::nullopt; LinalgTilingOptions & setDistributionOptions(LinalgLoopDistributionOptions distributionOptions) { @@ -806,7 +805,7 @@ }; /// Return vector::CombiningKind for the given op. -llvm::Optional getCombinerOpKind(Operation *combinerOp); +std::optional getCombinerOpKind(Operation *combinerOp); //===----------------------------------------------------------------------===// // Transformations exposed as rewrite patterns. @@ -966,7 +965,7 @@ /// /// See the documentation for tensor::bubbleUpPadSlice regarding zero slice /// guard. - using ControlFn = std::function(tensor::ExtractSliceOp)>; + using ControlFn = std::function(tensor::ExtractSliceOp)>; ExtractSliceOfPadTensorSwapPattern(MLIRContext *context, ControlFn controlFn = nullptr, diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -586,7 +586,7 @@ let extraClassDeclaration = [{ /// Helper function to get the index as a simple integer if it is constant. - Optional getConstantIndex(); + std::optional getConstantIndex(); /// Interface method of ShapedDimOpInterface: Return the source memref. Value getShapedValue() { return getSource(); } diff --git a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td --- a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td +++ b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td @@ -816,7 +816,7 @@ let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict"; let builders = [ OpBuilder<(ins "Type":$resultType, "Value":$inputOp, - "Optional":$index), [{ + "std::optional":$index), [{ build($_builder, $_state, resultType, inputOp, index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr()); }]>, @@ -883,7 +883,7 @@ let assemblyFormat = "($index^)? `of` $inputOp `:` type($value) attr-dict"; let builders = [ OpBuilder<(ins "Type":$resultType, "Value":$inputOp, - "Optional":$index), [{ + "std::optional":$index), [{ build($_builder, $_state, resultType, inputOp, index ? $_builder.getI32IntegerAttr(*index) : IntegerAttr()); }]>, diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td --- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td +++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td @@ -274,7 +274,7 @@ } /// Get the iter arg number for an operand. If it isnt an iter arg /// operand return std::nullopt. - Optional getIterArgNumberForOpOperand(OpOperand &opOperand) { + std::optional getIterArgNumberForOpOperand(OpOperand &opOperand) { if (opOperand.getOwner() != getOperation()) return std::nullopt; unsigned operandNumber = opOperand.getOperandNumber(); @@ -331,10 +331,10 @@ /// correspond to the loop iterator operands, i.e., those exclusing the /// induction variable. LoopOp only has one region, so 0 is the only valid /// value for `index`. - OperandRange getSuccessorEntryOperands(Optional index); + OperandRange getSuccessorEntryOperands(std::optional index); /// Returns the step as an `APInt` if it is constant. - Optional getConstantStep(); + std::optional getConstantStep(); /// Interface method for ConditionallySpeculatable. Speculation::Speculatability getSpeculatability(); @@ -496,7 +496,7 @@ let builders = [ // Bodyless builder, outputs must be specified. OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads, - "Optional":$mapping)>, + "std::optional":$mapping)>, // Builder that takes a bodyBuilder lambda. OpBuilder<(ins "ValueRange":$outputs, "ValueRange":$num_threads, "ArrayRef":$mapping, @@ -1003,7 +1003,7 @@ using BodyBuilderFn = function_ref; - OperandRange getSuccessorEntryOperands(Optional index); + OperandRange getSuccessorEntryOperands(std::optional index); ConditionOp getConditionOp(); YieldOp getYieldOp(); Block::BlockArgListType getBeforeArguments(); diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h --- a/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h +++ b/mlir/include/mlir/Dialect/SPIRV/IR/ParserUtils.h @@ -31,7 +31,8 @@ auto loc = parser.getCurrentLocation(); if (parser.parseKeyword(&keyword)) return failure(); - if (Optional attr = spirv::symbolizeEnum(keyword)) { + if (std::optional attr = + spirv::symbolizeEnum(keyword)) { value = *attr; return success(); } diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td @@ -492,11 +492,11 @@ let regions = (region AnyRegion); let builders = [ - OpBuilder<(ins CArg<"Optional", "std::nullopt">:$name)>, + OpBuilder<(ins CArg<"std::optional", "std::nullopt">:$name)>, OpBuilder<(ins "spirv::AddressingModel":$addressing_model, "spirv::MemoryModel":$memory_model, - CArg<"Optional", "std::nullopt">:$vce_triple, - CArg<"Optional", "std::nullopt">:$name)> + CArg<"std::optional", "std::nullopt">:$vce_triple, + CArg<"std::optional", "std::nullopt">:$name)> ]; // We need to ensure the block inside the region is properly terminated; @@ -511,7 +511,7 @@ bool isOptionalSymbol() { return true; } - Optional getName() { return getSymName(); } + std::optional getName() { return getSymName(); } static StringRef getVCETripleAttrName() { return "vce_triple"; } }]; diff --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td --- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td +++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td @@ -352,7 +352,7 @@ let extraClassDeclaration = [{ /// Get the `index` value as integer if it is constant. - Optional getConstantIndex(); + std::optional getConstantIndex(); /// Returns when two result types are compatible for this op; method used by /// InferTypeOpInterface @@ -383,7 +383,7 @@ let extraClassDeclaration = [{ /// Get the `dim` value as integer if it is constant. - Optional getConstantDim(); + std::optional getConstantDim(); /// Returns when two result types are compatible for this op; method used by /// InferTypeOpInterface static bool isCompatibleReturnTypes(TypeRange l, TypeRange r); diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td @@ -67,8 +67,8 @@ let extraClassDeclaration = [{ // Get the integer type used to store memory and dimension sizes. IntegerType getSizesType() const; - Type getFieldType(StorageSpecifierKind kind, Optional dim) const; - Type getFieldType(StorageSpecifierKind kind, Optional dim) const; + Type getFieldType(StorageSpecifierKind kind, std::optional dim) const; + Type getFieldType(StorageSpecifierKind kind, std::optional dim) const; }]; let assemblyFormat="`<` qualified($encoding) `>`"; diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -129,7 +129,7 @@ let extraClassDeclaration = [{ /// Helper function to get the index as a simple integer if it is constant. - Optional getConstantIndex(); + std::optional getConstantIndex(); /// Interface method of ShapedDimOpInterface: Return the source tensor. Value getShapedValue() { return getSource(); } @@ -380,7 +380,7 @@ /// Compute the rank-reduction mask that can be applied to map the source /// tensor type to the result tensor type by dropping unit dims. - llvm::Optional> + std::optional> computeRankReductionMask() { return ::mlir::computeRankReductionMask(getSourceType().getShape(), getType().getShape()); diff --git a/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h b/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h --- a/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h +++ b/mlir/include/mlir/Dialect/Utils/StaticValueUtils.h @@ -67,7 +67,7 @@ SmallVector getAsOpFoldResult(ArrayAttr arrayAttr); /// If ofr is a constant integer or an IntegerAttr, return the integer. -Optional getConstantIntValue(OpFoldResult ofr); +std::optional getConstantIntValue(OpFoldResult ofr); /// Return true if `ofr` is constant integer equal to `value`. bool isConstantIntValue(OpFoldResult ofr, int64_t value); diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h @@ -118,7 +118,7 @@ /// VectorToSCF, which reduces the rank of vector transfer ops. void populateVectorTransferLoweringPatterns( RewritePatternSet &patterns, - llvm::Optional maxTransferRank = std::nullopt, + std::optional maxTransferRank = std::nullopt, PatternBenefit benefit = 1); /// These patterns materialize masks for various vector ops such as transfers. diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -1370,19 +1370,19 @@ "Value":$source, "ValueRange":$indices, "AffineMap":$permutationMap, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, /// 3. Builder that sets permutation map to 'getMinorIdentityMap'. OpBuilder<(ins "VectorType":$vectorType, "Value":$source, "ValueRange":$indices, "Value":$padding, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, /// 4. Builder that sets padding to zero and permutation map to /// 'getMinorIdentityMap'. OpBuilder<(ins "VectorType":$vectorType, "Value":$source, "ValueRange":$indices, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, ]; let extraClassDeclaration = [{ @@ -1521,13 +1521,13 @@ "Value":$dest, "ValueRange":$indices, "AffineMap":$permutationMap, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, /// 4. Builder with type inference that sets an empty mask and sets permutation /// map to 'getMinorIdentityMap'. OpBuilder<(ins "Value":$vector, "Value":$dest, "ValueRange":$indices, - CArg<"Optional>", "::std::nullopt">:$inBounds)>, + CArg<"std::optional>", "::std::nullopt">:$inBounds)>, ]; let extraClassDeclaration = [{ diff --git a/mlir/include/mlir/IR/BuiltinOps.td b/mlir/include/mlir/IR/BuiltinOps.td --- a/mlir/include/mlir/IR/BuiltinOps.td +++ b/mlir/include/mlir/IR/BuiltinOps.td @@ -58,13 +58,13 @@ let regions = (region SizedRegion<1>:$bodyRegion); let assemblyFormat = "($sym_name^)? attr-dict-with-keyword $bodyRegion"; - let builders = [OpBuilder<(ins CArg<"Optional", "{}">:$name)>]; + let builders = [OpBuilder<(ins CArg<"std::optional", "{}">:$name)>]; let extraClassDeclaration = [{ /// Construct a module from the given location with an optional name. - static ModuleOp create(Location loc, Optional name = std::nullopt); + static ModuleOp create(Location loc, std::optional name = std::nullopt); /// Return the name of this module if present. - Optional getName() { return getSymName(); } + std::optional getName() { return getSymName(); } //===------------------------------------------------------------------===// // SymbolOpInterface Methods diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -63,7 +63,7 @@ type. If a shape is not provided, the current shape of the type is used. }], "::mlir::ShapedType", "cloneWith", (ins - "::llvm::Optional<::llvm::ArrayRef>":$shape, + "::std::optional<::llvm::ArrayRef>":$shape, "::mlir::Type":$elementType )>, diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -90,7 +90,7 @@ /// Clone this type with the given shape and element type. If the /// provided shape is `None`, the current shape of the type is used. - TensorType cloneWith(Optional> shape, + TensorType cloneWith(std::optional> shape, Type elementType) const; /// Return true if the specified element type is ok in a tensor. @@ -126,7 +126,7 @@ /// Clone this type with the given shape and element type. If the /// provided shape is `None`, the current shape of the type is used. - BaseMemRefType cloneWith(Optional> shape, + BaseMemRefType cloneWith(std::optional> shape, Type elementType) const; /// Return true if the specified element type is ok in a memref. @@ -337,7 +337,7 @@ /// which dimensions must be kept when e.g. compute MemRef strides under /// rank-reducing operations. Return std::nullopt if reducedShape cannot be /// obtained by dropping only `1` entries in `originalShape`. -llvm::Optional> +std::optional> computeRankReductionMask(ArrayRef originalShape, ArrayRef reducedShape); diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -1031,7 +1031,7 @@ /// Clone this vector type with the given shape and element type. If the /// provided shape is `None`, the current shape of the type is used. - VectorType cloneWith(Optional> shape, + VectorType cloneWith(std::optional> shape, Type elementType) const; }]; let skipDefaultBuilders = 1; diff --git a/mlir/include/mlir/IR/Diagnostics.h b/mlir/include/mlir/IR/Diagnostics.h --- a/mlir/include/mlir/IR/Diagnostics.h +++ b/mlir/include/mlir/IR/Diagnostics.h @@ -487,19 +487,19 @@ /// the diagnostic arguments directly instead of relying on the returned /// InFlightDiagnostic. template -LogicalResult emitOptionalError(Optional loc, Args &&...args) { +LogicalResult emitOptionalError(std::optional loc, Args &&...args) { if (loc) return emitError(*loc).append(std::forward(args)...); return failure(); } template -LogicalResult emitOptionalWarning(Optional loc, Args &&...args) { +LogicalResult emitOptionalWarning(std::optional loc, Args &&...args) { if (loc) return emitWarning(*loc).append(std::forward(args)...); return failure(); } template -LogicalResult emitOptionalRemark(Optional loc, Args &&...args) { +LogicalResult emitOptionalRemark(std::optional loc, Args &&...args) { if (loc) return emitRemark(*loc).append(std::forward(args)...); return failure(); diff --git a/mlir/include/mlir/IR/Dialect.h b/mlir/include/mlir/IR/Dialect.h --- a/mlir/include/mlir/IR/Dialect.h +++ b/mlir/include/mlir/IR/Dialect.h @@ -115,7 +115,8 @@ /// By default this will lookup for registered operations and return the /// `parse()` method registered on the RegisteredOperationName. Dialects can /// override this behavior and handle unregistered operations as well. - virtual Optional getParseOperationHook(StringRef opName) const; + virtual std::optional + getParseOperationHook(StringRef opName) const; /// Print an operation registered to this dialect. /// This hook is invoked for registered operation which don't override the diff --git a/mlir/include/mlir/IR/EnumAttr.td b/mlir/include/mlir/IR/EnumAttr.td --- a/mlir/include/mlir/IR/EnumAttr.td +++ b/mlir/include/mlir/IR/EnumAttr.td @@ -130,7 +130,7 @@ // type to the corresponding symbol. It will have the following signature: // // ```c++ - // llvm::Optional<> (); + // std::optional<> (); // ``` string underlyingToSymbolFnName = "symbolize" # name; @@ -138,7 +138,7 @@ // corresponding symbol. It will have the following signature: // // ```c++ - // llvm::Optional<> (llvm::StringRef); + // std::optional<> (llvm::StringRef); // ``` string stringToSymbolFnName = "symbolize" # name; diff --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td --- a/mlir/include/mlir/IR/OpBase.td +++ b/mlir/include/mlir/IR/OpBase.td @@ -1033,7 +1033,7 @@ // Rewrite the attribute to be optional. // Note: this has to be kept up to date with Attr above. let storageType = attr.storageType; - let returnType = "::llvm::Optional<" # attr.returnType #">"; + let returnType = "::std::optional<" # attr.returnType #">"; let convertFromStorage = "$_self ? " # returnType # "(" # attr.convertFromStorage # ") : (::std::nullopt)"; let valueType = attr.valueType; diff --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h --- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h +++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.h @@ -128,7 +128,7 @@ /// Return the `BlockArgument` corresponding to operand `operandIndex` in some /// successor if `operandIndex` is within the range of `operands`, or /// std::nullopt if `operandIndex` isn't a successor operand index. -Optional +std::optional getBranchSuccessorArgument(const SuccessorOperands &operands, unsigned operandIndex, Block *successor); @@ -164,8 +164,10 @@ RegionSuccessor(Region *region, Block::BlockArgListType regionInputs = {}) : region(region), inputs(regionInputs) {} /// Initialize a successor that branches back to/out of the parent operation. - RegionSuccessor(Optional results = {}) - : inputs(results ? ValueRange(*results) : ValueRange()) {} + RegionSuccessor(Operation::result_range results) + : inputs(ValueRange(results)) {} + /// Constructor with no arguments. + RegionSuccessor() : inputs(ValueRange()) {} /// Return the given region successor. Returns nullptr if the successor is the /// parent operation. @@ -190,7 +192,8 @@ public: /// Create invocation bounds. The lower bound must be at least 0 and only the /// upper bound can be unknown. - InvocationBounds(unsigned lb, Optional ub) : lower(lb), upper(ub) { + InvocationBounds(unsigned lb, std::optional ub) + : lower(lb), upper(ub) { assert((!ub || ub >= lb) && "upper bound cannot be less than lower bound"); } @@ -198,7 +201,7 @@ unsigned getLowerBound() const { return lower; } /// Return the upper bound. - Optional getUpperBound() const { return upper; } + std::optional getUpperBound() const { return upper; } /// Returns the unknown invocation bounds, i.e., there is no information on /// how many times a region may be invoked. @@ -209,7 +212,7 @@ unsigned lower; /// The maximum number of times the successor region will be invoked or /// `std::nullopt` if an upper bound is not known. - Optional upper; + std::optional upper; }; /// Return `true` if `a` and `b` are in mutually exclusive regions as per @@ -241,16 +244,16 @@ /// `OperandRange` represents all operands that are passed to the specified /// successor region. If `regionIndex` is `std::nullopt`, all operands that are /// passed to the parent operation will be returned. -Optional +std::optional getMutableRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex); + std::optional regionIndex); /// Returns the read only operands that are passed to the region with the given /// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more /// information. -Optional +std::optional getRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex); + std::optional regionIndex); //===----------------------------------------------------------------------===// // ControlFlow Traits diff --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td --- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td +++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td @@ -70,11 +70,11 @@ some successor, or None if `operandIndex` isn't a successor operand index. }], - "::llvm::Optional<::mlir::BlockArgument>", "getSuccessorBlockArgument", + "::std::optional<::mlir::BlockArgument>", "getSuccessorBlockArgument", (ins "unsigned":$operandIndex), [{ ::mlir::Operation *opaqueOp = $_op; for (unsigned i = 0, e = opaqueOp->getNumSuccessors(); i != e; ++i) { - if (::llvm::Optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument( + if (::std::optional<::mlir::BlockArgument> arg = ::mlir::detail::getBranchSuccessorArgument( $_op.getSuccessorOperands(i), operandIndex, opaqueOp->getSuccessor(i))) return arg; @@ -140,7 +140,7 @@ `getSuccessorRegions`. }], "::mlir::OperandRange", "getSuccessorEntryOperands", - (ins "::llvm::Optional":$index), [{}], + (ins "::std::optional":$index), [{}], /*defaultImplementation=*/[{ auto operandEnd = this->getOperation()->operand_end(); return ::mlir::OperandRange(operandEnd, operandEnd); @@ -161,7 +161,7 @@ successor region must be non-empty. }], "void", "getSuccessorRegions", - (ins "::llvm::Optional":$index, + (ins "::std::optional":$index, "::llvm::ArrayRef<::mlir::Attribute>":$operands, "::llvm::SmallVectorImpl<::mlir::RegionSuccessor> &":$regions) >, @@ -208,7 +208,7 @@ let extraClassDeclaration = [{ /// Convenience helper in case none of the operands is known. - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, SmallVectorImpl ®ions); /// Return `true` if control flow originating from the given region may @@ -239,7 +239,7 @@ the parent operation. }], "::mlir::MutableOperandRange", "getMutableSuccessorOperands", - (ins "::llvm::Optional":$index) + (ins "::std::optional":$index) >, InterfaceMethod<[{ Returns a range of operands that are semantically "returned" by passing @@ -248,7 +248,7 @@ operation. }], "::mlir::OperandRange", "getSuccessorOperands", - (ins "::llvm::Optional":$index), [{}], + (ins "::std::optional":$index), [{}], /*defaultImplementation=*/[{ return $_op.getMutableSuccessorOperands(index); }] diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.h +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.h @@ -235,12 +235,13 @@ // TODO: Consider generating typedefs for trait member functions if this usage // becomes more common. LogicalResult inferReturnTensorTypes( - function_ref location, ValueShapeRange operands, - DictionaryAttr attributes, RegionRange regions, - SmallVectorImpl &retComponents)> + function_ref< + LogicalResult(MLIRContext *, std::optional location, + ValueShapeRange operands, DictionaryAttr attributes, + RegionRange regions, + SmallVectorImpl &retComponents)> componentTypeFn, - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes); @@ -272,7 +273,7 @@ class InferTensorType : public TraitBase { public: static LogicalResult - inferReturnTypes(MLIRContext *context, Optional location, + inferReturnTypes(MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { diff --git a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td --- a/mlir/include/mlir/Interfaces/InferTypeOpInterface.td +++ b/mlir/include/mlir/Interfaces/InferTypeOpInterface.td @@ -41,7 +41,7 @@ /*retTy=*/"::mlir::LogicalResult", /*methodName=*/"inferReturnTypes", /*args=*/(ins "::mlir::MLIRContext *":$context, - "::llvm::Optional<::mlir::Location>":$location, + "::std::optional<::mlir::Location>":$location, "::mlir::ValueRange":$operands, "::mlir::DictionaryAttr":$attributes, "::mlir::RegionRange":$regions, @@ -72,7 +72,7 @@ /*retTy=*/"::mlir::LogicalResult", /*methodName=*/"refineReturnTypes", /*args=*/(ins "::mlir::MLIRContext *":$context, - "::llvm::Optional<::mlir::Location>":$location, + "::std::optional<::mlir::Location>":$location, "::mlir::ValueRange":$operands, "::mlir::DictionaryAttr":$attributes, "::mlir::RegionRange":$regions, @@ -144,7 +144,7 @@ /*retTy=*/"::mlir::LogicalResult", /*methodName=*/"inferReturnTypeComponents", /*args=*/(ins "::mlir::MLIRContext*":$context, - "::llvm::Optional<::mlir::Location>":$location, + "::std::optional<::mlir::Location>":$location, "::mlir::ValueShapeRange":$operands, "::mlir::DictionaryAttr":$attributes, "::mlir::RegionRange":$regions, diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.td b/mlir/include/mlir/Interfaces/LoopLikeInterface.td --- a/mlir/include/mlir/Interfaces/LoopLikeInterface.td +++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.td @@ -52,7 +52,7 @@ If there is a single induction variable return it, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::Value>", + /*retTy=*/"::std::optional<::mlir::Value>", /*methodName=*/"getSingleInductionVar", /*args=*/(ins), /*methodBody=*/"", @@ -64,7 +64,7 @@ Return the single lower bound value or attribute if it exists, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>", + /*retTy=*/"::std::optional<::mlir::OpFoldResult>", /*methodName=*/"getSingleLowerBound", /*args=*/(ins), /*methodBody=*/"", @@ -76,7 +76,7 @@ Return the single step value or attribute if it exists, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>", + /*retTy=*/"::std::optional<::mlir::OpFoldResult>", /*methodName=*/"getSingleStep", /*args=*/(ins), /*methodBody=*/"", @@ -88,7 +88,7 @@ Return the single upper bound value or attribute if it exists, otherwise return std::nullopt. }], - /*retTy=*/"::llvm::Optional<::mlir::OpFoldResult>", + /*retTy=*/"::std::optional<::mlir::OpFoldResult>", /*methodName=*/"getSingleUpperBound", /*args=*/(ins), /*methodBody=*/"", diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td --- a/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaceBase.td @@ -106,7 +106,7 @@ /// Return the effect of the given type `Effect` that is applied to the /// given value, or std::nullopt if no effect exists. template - ::llvm::Optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>> + ::std::optional<::mlir::SideEffects::EffectInstance<}] # baseEffect # [{>> getEffectOnValue(::mlir::Value value) { llvm::SmallVector<::mlir::SideEffects::EffectInstance< }] # baseEffect # [{>, 4> effects; diff --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td --- a/mlir/include/mlir/Interfaces/VectorInterfaces.td +++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td @@ -28,7 +28,7 @@ `targetShape`. Return `None` if the op cannot be unrolled to the target vector shape. }], - /*retTy=*/"::llvm::Optional<::llvm::SmallVector>", + /*retTy=*/"::std::optional<::llvm::SmallVector>", /*methodName=*/"getShapeForUnroll", /*args=*/(ins), /*methodBody=*/"", @@ -143,7 +143,7 @@ >, InterfaceMethod< /*desc=*/"Return the `in_bounds` boolean ArrayAttr.", - /*retTy=*/"::llvm::Optional<::mlir::ArrayAttr>", + /*retTy=*/"::std::optional<::mlir::ArrayAttr>", /*methodName=*/"in_bounds", /*args=*/(ins), /*methodBody=*/"return $_op.getInBounds();" diff --git a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp --- a/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp +++ b/mlir/lib/Analysis/AliasAnalysis/LocalAliasAnalysis.cpp @@ -45,7 +45,7 @@ // this region predecessor that correspond to the input values of `region`. If // an index could not be found, std::nullopt is returned instead. auto getOperandIndexIfPred = - [&](Optional predIndex) -> Optional { + [&](std::optional predIndex) -> std::optional { SmallVector successors; branch.getSuccessorRegions(predIndex, successors); for (RegionSuccessor &successor : successors) { @@ -75,12 +75,12 @@ }; // Check branches from the parent operation. - Optional regionIndex; + std::optional regionIndex; if (region) { // Determine the actual region number from the passed region. regionIndex = region->getRegionNumber(); } - if (Optional operandIndex = + if (std::optional operandIndex = getOperandIndexIfPred(/*predIndex=*/std::nullopt)) { collectUnderlyingAddressValues( branch.getSuccessorEntryOperands(regionIndex)[*operandIndex], maxDepth, @@ -89,7 +89,7 @@ // Check branches from each child region. Operation *op = branch.getOperation(); for (int i = 0, e = op->getNumRegions(); i != e; ++i) { - if (Optional operandIndex = getOperandIndexIfPred(i)) { + if (std::optional operandIndex = getOperandIndexIfPred(i)) { for (Block &block : op->getRegion(i)) { Operation *term = block.getTerminator(); // Try to determine possible region-branch successor operands for the @@ -211,7 +211,8 @@ /// non-null it specifies the parent operation that the allocation does not /// escape. If no scope is found, `allocScopeOp` is set to nullptr. static LogicalResult -getAllocEffectFor(Value value, Optional &effect, +getAllocEffectFor(Value value, + std::optional &effect, Operation *&allocScopeOp) { // Try to get a memory effect interface for the parent operation. Operation *op; @@ -249,7 +250,7 @@ if (lhs == rhs) return AliasResult::MustAlias; Operation *lhsAllocScope = nullptr, *rhsAllocScope = nullptr; - Optional lhsAlloc, rhsAlloc; + std::optional lhsAlloc, rhsAlloc; // Handle the case where lhs is a constant. Attribute lhsAttr, rhsAttr; diff --git a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp @@ -165,7 +165,7 @@ /// Given the results of getConstant{Lower,Upper}Bound() or getConstantStep() /// on a LoopLikeInterface return the lower/upper bound for that result if /// possible. - auto getLoopBoundFromFold = [&](Optional loopBound, + auto getLoopBoundFromFold = [&](std::optional loopBound, Type boundType, bool getUpper) { unsigned int width = ConstantIntRanges::getStorageBitwidth(boundType); if (loopBound.has_value()) { @@ -190,14 +190,14 @@ // Infer bounds for loop arguments that have static bounds if (auto loop = dyn_cast(op)) { - Optional iv = loop.getSingleInductionVar(); + std::optional iv = loop.getSingleInductionVar(); if (!iv) { return SparseDataFlowAnalysis ::visitNonControlFlowArguments( op, successor, argLattices, firstIndex); } - Optional lowerBound = loop.getSingleLowerBound(); - Optional upperBound = loop.getSingleUpperBound(); - Optional step = loop.getSingleStep(); + std::optional lowerBound = loop.getSingleLowerBound(); + std::optional upperBound = loop.getSingleUpperBound(); + std::optional step = loop.getSingleStep(); APInt min = getLoopBoundFromFold(lowerBound, iv->getType(), /*getUpper=*/false); APInt max = getLoopBoundFromFold(upperBound, iv->getType(), diff --git a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/SparseAnalysis.cpp @@ -208,7 +208,7 @@ void AbstractSparseDataFlowAnalysis::visitRegionSuccessors( ProgramPoint point, RegionBranchOpInterface branch, - Optional successorIndex, + std::optional successorIndex, ArrayRef lattices) { const auto *predecessors = getOrCreateFor(point, point); assert(predecessors->allPredecessorsKnown() && @@ -216,7 +216,7 @@ for (Operation *op : predecessors->getKnownPredecessors()) { // Get the incoming successor operands. - Optional operands; + std::optional operands; // Check if the predecessor is the parent op. if (op == branch) { @@ -390,7 +390,7 @@ forwarded.getBeginOperandIndex(), forwarded.size()); for (OpOperand &operand : operands) { unaccounted.reset(operand.getOperandNumber()); - if (Optional blockArg = + if (std::optional blockArg = detail::getBranchSuccessorArgument( successorOperands, operand.getOperandNumber(), block)) { meet(getLatticeElement(operand.get()), diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -1871,7 +1871,7 @@ if (iface && !iface->getDefaultDialect().empty()) defaultDialect = iface->getDefaultDialect(); } else { - Optional dialectHook; + std::optional dialectHook; Dialect *dialect = opNameInfo->getDialect(); if (!dialect) { InFlightDiagnostic diag = diff --git a/mlir/lib/CAPI/Interfaces/Interfaces.cpp b/mlir/lib/CAPI/Interfaces/Interfaces.cpp --- a/mlir/lib/CAPI/Interfaces/Interfaces.cpp +++ b/mlir/lib/CAPI/Interfaces/Interfaces.cpp @@ -46,7 +46,7 @@ if (!info) return mlirLogicalResultFailure(); - llvm::Optional maybeLocation; + std::optional maybeLocation; if (!mlirLocationIsNull(location)) maybeLocation = unwrap(location); SmallVector unwrappedOperands; diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp --- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp +++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp @@ -223,7 +223,7 @@ // initialization of the result values. Attribute reduction = std::get<0>(pair); Type resultType = std::get<1>(pair); - Optional reductionOp = + std::optional reductionOp = arith::symbolizeAtomicRMWKind( static_cast(reduction.cast().getInt())); assert(reductionOp && "Reduction operation cannot be of None Type"); @@ -243,7 +243,7 @@ "Unequal number of reductions and operands."); for (unsigned i = 0, end = reductions.size(); i < end; i++) { // For each of the reduction operations get the respective mlir::Value. - Optional reductionOp = + std::optional reductionOp = arith::symbolizeAtomicRMWKind( reductions[i].cast().getInt()); assert(reductionOp && "Reduction Operation cannot be of None Type"); diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -442,8 +442,8 @@ return rewriter.create(loc, sizePtr); } - Optional getConstantDimIndex(memref::DimOp dimOp) const { - if (Optional idx = dimOp.getConstantIndex()) + std::optional getConstantDimIndex(memref::DimOp dimOp) const { + if (auto idx = dimOp.getConstantIndex()) return idx; if (auto constantOp = dimOp.getIndex().getDefiningOp()) @@ -462,7 +462,7 @@ // Take advantage if index is constant. MemRefType memRefType = operandType.cast(); - if (Optional index = getConstantDimIndex(dimOp)) { + if (std::optional index = getConstantDimIndex(dimOp)) { int64_t i = *index; if (memRefType.isDynamicDim(i)) { // extract dynamic size from the memref descriptor. diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp --- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp +++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp @@ -298,14 +298,15 @@ FailureOr ptxTypeB = getNvvmMmaType(bType); if (failed(ptxTypeB)) return op->emitOpError("failed to deduce operand PTX types"); - Optional ptxTypeC = NVVM::MmaOp::inferOperandMMAType( - cType.getElementType(), /*isAccumulator=*/true); + std::optional ptxTypeC = + NVVM::MmaOp::inferOperandMMAType(cType.getElementType(), + /*isAccumulator=*/true); if (!ptxTypeC) return op->emitError( "could not infer the PTX type for the accumulator/result"); // TODO: add an attribute to the op to customize this behavior. - Optional overflow(std::nullopt); + std::optional overflow(std::nullopt); if (aType.getElementType().isa()) overflow = NVVM::MMAIntOverflow::satfinite; @@ -413,7 +414,7 @@ unsigned matBSize, unsigned matCSize, NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB, NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD, - Optional overflow) { + std::optional overflow) { auto ptxTypeStr = [](NVVM::MMATypes ptxType) { return NVVM::stringifyMMATypes(ptxType); }; @@ -449,7 +450,7 @@ static FailureOr emitMmaSparseSyncOpAsm( Location loc, NVVM::MMATypes ptxTypeA, NVVM::MMATypes ptxTypeB, NVVM::MMATypes ptxTypeC, NVVM::MMATypes ptxTypeD, - Optional overflow, ArrayRef unpackedAData, + std::optional overflow, ArrayRef unpackedAData, ArrayRef unpackedB, ArrayRef unpackedC, Value indexData, int64_t metadataSelector, const std::array &shape, Type intrinsicResultType, ConversionPatternRewriter &rewriter) { @@ -505,8 +506,9 @@ FailureOr ptxTypeB = getNvvmMmaType(bType); if (failed(ptxTypeB)) return op->emitOpError("failed to deduce operand PTX types"); - Optional ptxTypeC = NVVM::MmaOp::inferOperandMMAType( - cType.getElementType(), /*isAccumulator=*/true); + std::optional ptxTypeC = + NVVM::MmaOp::inferOperandMMAType(cType.getElementType(), + /*isAccumulator=*/true); if (!ptxTypeC) return op->emitError( "could not infer the PTX type for the accumulator/result"); @@ -517,7 +519,7 @@ return failure(); // TODO: add an attribute to the op to customize this behavior. - Optional overflow(std::nullopt); + std::optional overflow(std::nullopt); if (aType.getElementType().isa()) overflow = NVVM::MMAIntOverflow::satfinite; diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -595,7 +595,7 @@ // Grab the root kind if present. StringAttr rootKindAttr; if (pdl::OperationOp rootOp = root.getDefiningOp()) - if (Optional rootKind = rootOp.getOpName()) + if (std::optional rootKind = rootOp.getOpName()) rootKindAttr = builder.getStringAttr(*rootKind); builder.setInsertionPointToEnd(currentBlock); diff --git a/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h b/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h --- a/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h +++ b/mlir/lib/Conversion/PDLToPDLInterp/Predicate.h @@ -222,7 +222,7 @@ struct OperandGroupPosition : public PredicateBase< OperandGroupPosition, Position, - std::tuple, bool>, + std::tuple, bool>, Predicates::OperandGroupPos> { explicit OperandGroupPosition(const KeyTy &key); @@ -233,7 +233,9 @@ /// Returns the group number of this position. If std::nullopt, this group /// refers to all operands. - Optional getOperandGroupNumber() const { return std::get<1>(key); } + std::optional getOperandGroupNumber() const { + return std::get<1>(key); + } /// Returns if the operand group has unknown size. If false, the operand group /// has at max one element. @@ -298,7 +300,7 @@ struct ResultGroupPosition : public PredicateBase< ResultGroupPosition, Position, - std::tuple, bool>, + std::tuple, bool>, Predicates::ResultGroupPos> { explicit ResultGroupPosition(const KeyTy &key) : Base(key) { parent = std::get<0>(key); @@ -311,7 +313,9 @@ /// Returns the group number of this position. If std::nullopt, this group /// refers to all results. - Optional getResultGroupNumber() const { return std::get<1>(key); } + std::optional getResultGroupNumber() const { + return std::get<1>(key); + } /// Returns if the result group has unknown size. If false, the result group /// has at max one element. @@ -595,7 +599,7 @@ } /// Returns a position for a group of operands of the given operation. - Position *getOperandGroup(OperationPosition *p, Optional group, + Position *getOperandGroup(OperationPosition *p, std::optional group, bool isVariadic) { return OperandGroupPosition::get(uniquer, p, group, isVariadic); } @@ -609,7 +613,7 @@ } /// Returns a position for a group of results of the given operation. - Position *getResultGroup(OperationPosition *p, Optional group, + Position *getResultGroup(OperationPosition *p, std::optional group, bool isVariadic) { return ResultGroupPosition::get(uniquer, p, group, isVariadic); } diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp @@ -81,7 +81,7 @@ builder.getType(pos)); }) .Case([&](auto op) { - Optional index = op.getIndex(); + std::optional index = op.getIndex(); // Prevent traversal into a null value if the result has a proper index. if (index) @@ -106,11 +106,11 @@ }); } -static void getTreePredicates(std::vector &predList, - Value val, PredicateBuilder &builder, - DenseMap &inputs, - OperationPosition *pos, - Optional ignoreOperand = std::nullopt) { +static void +getTreePredicates(std::vector &predList, Value val, + PredicateBuilder &builder, + DenseMap &inputs, OperationPosition *pos, + std::optional ignoreOperand = std::nullopt) { assert(val.getType().isa() && "expected operation"); pdl::OperationOp op = cast(val.getDefiningOp()); OperationPosition *opPos = cast(pos); @@ -120,7 +120,7 @@ predList.emplace_back(pos, builder.getIsNotNull()); // Check that this is the correct root operation. - if (Optional opName = op.getOpName()) + if (std::optional opName = op.getOpName()) predList.emplace_back(pos, builder.getOperationName(*opName)); // Check that the operation has the proper number of operands. If there are @@ -302,7 +302,7 @@ // Ensure that the result isn't null if the result has an index. auto *parentPos = cast(inputs.lookup(op.getParent())); bool isVariadic = op.getType().isa(); - Optional index = op.getIndex(); + std::optional index = op.getIndex(); resultPos = builder.getResultGroup(parentPos, index, isVariadic); if (index) predList.emplace_back(resultPos, builder.getIsNotNull()); @@ -356,7 +356,7 @@ /// An op accepting a value at an optional index. struct OpIndex { Value parent; - Optional index; + std::optional index; }; /// The parent and operand index of each operation for each root, stored @@ -408,12 +408,13 @@ // * the operand index of the value in its parent; // * the depth of the visited value. struct Entry { - Entry(Value value, Value parent, Optional index, unsigned depth) + Entry(Value value, Value parent, std::optional index, + unsigned depth) : value(value), parent(parent), index(index), depth(depth) {} Value value; Value parent; - Optional index; + std::optional index; unsigned depth; }; diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -335,7 +335,7 @@ // The dim op is also okay if its operand memref is a view/subview whose // corresponding size is a valid symbol. - Optional index = getConstantIntValue(dimOp.getDimension()); + std::optional index = getConstantIntValue(dimOp.getDimension()); assert(index.has_value() && "expect only `dim` operations with a constant index"); int64_t i = index.value(); @@ -923,7 +923,7 @@ static std::enable_if_t(), OpFoldResult> createOrFold(OpBuilder &b, Location loc, ValueRange operands, - Args &&... leadingArguments) { + Args &&...leadingArguments) { // Identify the constant operands and extract their values as attributes. // Note that we cannot use the original values directly because the list of // operands may have changed due to canonicalization and composition. @@ -2009,7 +2009,7 @@ namespace { /// Returns constant trip count in trivial cases. -static Optional getTrivialConstantTripCount(AffineForOp forOp) { +static std::optional getTrivialConstantTripCount(AffineForOp forOp) { int64_t step = forOp.getStep(); if (!forOp.hasConstantBounds() || step <= 0) return std::nullopt; @@ -2030,7 +2030,7 @@ return failure(); if (forOp.getNumResults() == 0) return success(); - Optional tripCount = getTrivialConstantTripCount(forOp); + std::optional tripCount = getTrivialConstantTripCount(forOp); if (tripCount && *tripCount == 0) { // The initial values of the iteration arguments would be the op's // results. @@ -2082,7 +2082,8 @@ /// correspond to the loop iterator operands, i.e., those excluding the /// induction variable. AffineForOp only has one region, so zero is the only /// valid value for `index`. -OperandRange AffineForOp::getSuccessorEntryOperands(Optional index) { +OperandRange +AffineForOp::getSuccessorEntryOperands(std::optional index) { assert((!index || *index == 0) && "invalid region index"); // The initial operands map to the loop arguments after the induction @@ -2096,14 +2097,14 @@ /// correspond to a constant value for each operand, or null if that operand is /// not a constant. void AffineForOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { assert((!index.has_value() || index.value() == 0) && "expected loop region"); // The loop may typically branch back to its body or to the parent operation. // If the predecessor is the parent op and the trip count is known to be at // least one, branch into the body using the iterator arguments. And in cases // we know the trip count is zero, it can only branch back to its parent. - Optional tripCount = getTrivialConstantTripCount(*this); + std::optional tripCount = getTrivialConstantTripCount(*this); if (!index.has_value() && tripCount.has_value()) { if (tripCount.value() > 0) { regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs())); @@ -2130,7 +2131,7 @@ /// Returns true if the affine.for has zero iterations in trivial cases. static bool hasTrivialZeroTripCount(AffineForOp op) { - Optional tripCount = getTrivialConstantTripCount(op); + std::optional tripCount = getTrivialConstantTripCount(op); return tripCount && *tripCount == 0; } @@ -2262,23 +2263,23 @@ Region &AffineForOp::getLoopBody() { return getRegion(); } -Optional AffineForOp::getSingleInductionVar() { +std::optional AffineForOp::getSingleInductionVar() { return getInductionVar(); } -Optional AffineForOp::getSingleLowerBound() { +std::optional AffineForOp::getSingleLowerBound() { if (!hasConstantLowerBound()) return std::nullopt; OpBuilder b(getContext()); return OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound())); } -Optional AffineForOp::getSingleStep() { +std::optional AffineForOp::getSingleStep() { OpBuilder b(getContext()); return OpFoldResult(b.getI64IntegerAttr(getStep())); } -Optional AffineForOp::getSingleUpperBound() { +std::optional AffineForOp::getSingleUpperBound() { if (!hasConstantUpperBound()) return std::nullopt; OpBuilder b(getContext()); @@ -2541,7 +2542,7 @@ /// AffineIfOp has two regions -- `then` and `else`. The flow of data should be /// as follows: AffineIfOp -> `then`/`else` -> AffineIfOp void AffineIfOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // If the predecessor is an AffineIfOp, then branching into both `then` and // `else` region is valid. @@ -3567,7 +3568,7 @@ return AffineValueMap(getUpperBoundsMap(), getUpperBoundsOperands()); } -Optional> AffineParallelOp::getConstantRanges() { +std::optional> AffineParallelOp::getConstantRanges() { if (hasMinMaxBounds()) return std::nullopt; @@ -3985,7 +3986,7 @@ if (parser.parseAttribute(attrVal, builder.getNoneType(), "reduce", attrStorage)) return failure(); - llvm::Optional reduction = + std::optional reduction = arith::symbolizeAtomicRMWKind(attrVal.getValue()); if (!reduction) return parser.emitError(loc, "invalid reduction value: ") << attrVal; @@ -4231,7 +4232,7 @@ result.addOperands(linearIndex); SmallVector basisValues = llvm::to_vector(llvm::map_range(basis, [&](OpFoldResult ofr) -> Value { - Optional staticDim = getConstantIntValue(ofr); + std::optional staticDim = getConstantIntValue(ofr); if (staticDim.has_value()) return builder.create(result.location, *staticDim); diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp --- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp +++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp @@ -246,7 +246,8 @@ // AddUIExtendedOp //===----------------------------------------------------------------------===// -Optional> arith::AddUIExtendedOp::getShapeForUnroll() { +std::optional> +arith::AddUIExtendedOp::getShapeForUnroll() { if (auto vt = getType(0).dyn_cast()) return llvm::to_vector<4>(vt.getShape()); return std::nullopt; @@ -378,7 +379,8 @@ // MulSIExtendedOp //===----------------------------------------------------------------------===// -Optional> arith::MulSIExtendedOp::getShapeForUnroll() { +std::optional> +arith::MulSIExtendedOp::getShapeForUnroll() { if (auto vt = getType(0).dyn_cast()) return llvm::to_vector<4>(vt.getShape()); return std::nullopt; @@ -424,7 +426,8 @@ // MulUIExtendedOp //===----------------------------------------------------------------------===// -Optional> arith::MulUIExtendedOp::getShapeForUnroll() { +std::optional> +arith::MulUIExtendedOp::getShapeForUnroll() { if (auto vt = getType(0).dyn_cast()) return llvm::to_vector<4>(vt.getShape()); return std::nullopt; @@ -1639,7 +1642,7 @@ return DenseElementsAttr::get(shapedType, boolAttr); } -static Optional getIntegerWidth(Type t) { +static std::optional getIntegerWidth(Type t) { if (auto intType = t.dyn_cast()) { return intType.getWidth(); } @@ -1661,7 +1664,7 @@ if (matchPattern(getRhs(), m_Zero())) { if (auto extOp = getLhs().getDefiningOp()) { // extsi(%x : i1 -> iN) != 0 -> %x - Optional integerWidth = + std::optional integerWidth = getIntegerWidth(extOp.getOperand().getType()); if (integerWidth && integerWidth.value() == 1 && getPredicate() == arith::CmpIPredicate::ne) @@ -1669,7 +1672,7 @@ } if (auto extOp = getLhs().getDefiningOp()) { // extui(%x : i1 -> iN) != 0 -> %x - Optional integerWidth = + std::optional integerWidth = getIntegerWidth(extOp.getOperand().getType()); if (integerWidth && integerWidth.value() == 1 && getPredicate() == arith::CmpIPredicate::ne) diff --git a/mlir/lib/Dialect/Async/IR/Async.cpp b/mlir/lib/Dialect/Async/IR/Async.cpp --- a/mlir/lib/Dialect/Async/IR/Async.cpp +++ b/mlir/lib/Dialect/Async/IR/Async.cpp @@ -53,7 +53,7 @@ } MutableOperandRange -YieldOp::getMutableSuccessorOperands(Optional index) { +YieldOp::getMutableSuccessorOperands(std::optional index) { return getOperandsMutable(); } @@ -63,7 +63,8 @@ constexpr char kOperandSegmentSizesAttr[] = "operand_segment_sizes"; -OperandRange ExecuteOp::getSuccessorEntryOperands(Optional index) { +OperandRange +ExecuteOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index == 0 && "invalid region index"); return getBodyOperands(); } @@ -77,7 +78,7 @@ return getValueOrTokenType(lhs) == getValueOrTokenType(rhs); } -void ExecuteOp::getSuccessorRegions(Optional index, +void ExecuteOp::getSuccessorRegions(std::optional index, ArrayRef, SmallVectorImpl ®ions) { // The `body` region branch back to the parent operation. diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -348,7 +348,7 @@ LogicalResult matchAndRewrite(tensor::DimOp dimOp, PatternRewriter &rewriter) const override { - Optional maybeConstantIndex = dimOp.getConstantIndex(); + std::optional maybeConstantIndex = dimOp.getConstantIndex(); auto allocTensorOp = dimOp.getSource().getDefiningOp(); if (!allocTensorOp || !maybeConstantIndex) return failure(); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp @@ -104,7 +104,7 @@ successorRegions); for (RegionSuccessor &successorRegion : successorRegions) { // Determine the current region index (if any). - Optional regionIndex; + std::optional regionIndex; Region *regionSuccessor = successorRegion.getSuccessor(); if (regionSuccessor) regionIndex = regionSuccessor->getRegionNumber(); diff --git a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp --- a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp +++ b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp @@ -595,7 +595,7 @@ } Block *SwitchOp::getSuccessorForOperands(ArrayRef operands) { - Optional caseValues = getCaseValues(); + std::optional caseValues = getCaseValues(); if (!caseValues) return getDefaultDestination(); @@ -805,7 +805,8 @@ SuccessorRange predDests = predSwitch.getCaseDestinations(); auto it = llvm::find(predDests, currentBlock); if (it != predDests.end()) { - Optional predCaseValues = predSwitch.getCaseValues(); + std::optional predCaseValues = + predSwitch.getCaseValues(); foldSwitch(op, rewriter, predCaseValues->getValues()[it - predDests.begin()]); } else { diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp --- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp +++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp @@ -84,7 +84,7 @@ if (getCallee().empty()) return emitOpError("callee must not be empty"); - if (Optional argsAttr = getArgs()) { + if (std::optional argsAttr = getArgs()) { for (Attribute arg : *argsAttr) { auto intAttr = arg.dyn_cast(); if (intAttr && intAttr.getType().isa()) { @@ -102,7 +102,7 @@ } } - if (Optional templateArgsAttr = getTemplateArgs()) { + if (std::optional templateArgsAttr = getTemplateArgs()) { for (Attribute tArg : *templateArgsAttr) { if (!tArg.isa()) return emitOpError("template argument has invalid type"); diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -367,7 +367,8 @@ AllReduceOperationAttr &attr) { StringRef enumStr; if (!parser.parseOptionalKeyword(&enumStr)) { - Optional op = gpu::symbolizeAllReduceOperation(enumStr); + std::optional op = + gpu::symbolizeAllReduceOperation(enumStr); if (!op) return parser.emitError(parser.getCurrentLocation(), "invalid op kind"); attr = AllReduceOperationAttr::get(parser.getContext(), *op); diff --git a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp --- a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp +++ b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp @@ -26,8 +26,8 @@ /// Check if given mapping attributes are one of the desired attributes static DiagnosedSilenceableFailure checkAttributeType(ArrayRef threadMappingAttributes, - const Optional &foreachMapping, - Optional transformOp) { + const std::optional &foreachMapping, + std::optional transformOp) { if (!foreachMapping.has_value()) return transformOp->emitSilenceableError() << "mapping must be present"; @@ -52,11 +52,11 @@ /// Determines if the size of the kernel configuration is supported by the GPU /// architecture being used. It presently makes use of CUDA limitations, however /// that aspect may be enhanced for other GPUs. -static DiagnosedSilenceableFailure -checkGpuLimits(TransformOpInterface transformOp, Optional gridDimX, - Optional gridDimY, Optional gridDimZ, - Optional blockDimX, Optional blockDimY, - Optional blockDimZ) { +static DiagnosedSilenceableFailure checkGpuLimits( + TransformOpInterface transformOp, std::optional gridDimX, + std::optional gridDimY, std::optional gridDimZ, + std::optional blockDimX, std::optional blockDimY, + std::optional blockDimZ) { static constexpr int maxTotalBlockdim = 1024; static constexpr int maxBlockdimx = 1024; @@ -92,12 +92,12 @@ static DiagnosedSilenceableFailure createGpuLaunch(RewriterBase &rewriter, Location loc, TransformOpInterface transformOp, LaunchOp &launchOp, - Optional gridDimX = std::nullopt, - Optional gridDimY = std::nullopt, - Optional gridDimZ = std::nullopt, - Optional blockDimX = std::nullopt, - Optional blockDimY = std::nullopt, - Optional blockDimZ = std::nullopt) { + std::optional gridDimX = std::nullopt, + std::optional gridDimY = std::nullopt, + std::optional gridDimZ = std::nullopt, + std::optional blockDimX = std::nullopt, + std::optional blockDimY = std::nullopt, + std::optional blockDimZ = std::nullopt) { DiagnosedSilenceableFailure diag = checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ); @@ -126,12 +126,12 @@ static DiagnosedSilenceableFailure alterGpuLaunch(TrivialPatternRewriter &rewriter, LaunchOp gpuLaunch, TransformOpInterface transformOp, - Optional gridDimX = std::nullopt, - Optional gridDimY = std::nullopt, - Optional gridDimZ = std::nullopt, - Optional blockDimX = std::nullopt, - Optional blockDimY = std::nullopt, - Optional blockDimZ = std::nullopt) { + std::optional gridDimX = std::nullopt, + std::optional gridDimY = std::nullopt, + std::optional gridDimZ = std::nullopt, + std::optional blockDimX = std::nullopt, + std::optional blockDimY = std::nullopt, + std::optional blockDimZ = std::nullopt) { DiagnosedSilenceableFailure diag = checkGpuLimits(transformOp, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ); @@ -370,7 +370,7 @@ static DiagnosedSilenceableFailure rewriteOneForeachThreadToGpuThreads( RewriterBase &rewriter, scf::ForeachThreadOp foreachThreadOp, const SmallVectorImpl &globalBlockDims, bool syncAfterDistribute, - llvm::Optional transformOp, + std::optional transformOp, const ArrayRef &threadMappingAttributes) { // Step 0. Target-specific verifications. There is no good place to anchor // those right now: the ForeachThreadOp is target-independent and the @@ -502,7 +502,7 @@ DiagnosedSilenceableFailure mlir::transform::gpu::mapNestedForeachToThreadsImpl( RewriterBase &rewriter, Operation *target, const SmallVectorImpl &blockDim, bool syncAfterDistribute, - llvm::Optional transformOp, + std::optional transformOp, const ArrayRef &threadMappingAttributes) { DiagnosedSilenceableFailure diag = DiagnosedSilenceableFailure::success(); target->walk([&](scf::ForeachThreadOp foreachThreadOp) { diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -147,7 +147,7 @@ // Replace the string attribute `predicate` with an integer attribute. int64_t predicateValue = 0; if (std::is_same()) { - Optional predicate = + std::optional predicate = symbolizeICmpPredicate(predicateAttr.getValue()); if (!predicate) return parser.emitError(predicateLoc) @@ -155,7 +155,7 @@ << "' is an incorrect value of the 'predicate' attribute"; predicateValue = static_cast(*predicate); } else { - Optional predicate = + std::optional predicate = symbolizeFCmpPredicate(predicateAttr.getValue()); if (!predicate) return parser.emitError(predicateLoc) @@ -253,7 +253,7 @@ /// Checks that the elemental type is present in either the pointer type or /// the attribute, but not both. static LogicalResult verifyOpaquePtr(Operation *op, LLVMPointerType ptrType, - Optional ptrElementType) { + std::optional ptrElementType) { if (ptrType.isOpaque() && !ptrElementType.has_value()) { return op->emitOpError() << "expected '" << kElemTypeAttrName << "' attribute if opaque pointer type is used"; @@ -665,7 +665,7 @@ } Type LLVM::GEPOp::getSourceElementType() { - if (Optional elemType = getElemType()) + if (std::optional elemType = getElemType()) return *elemType; return extractVectorElementType(getBase().getType()) @@ -1853,7 +1853,7 @@ } } - Optional alignAttr = getAlignment(); + std::optional alignAttr = getAlignment(); if (alignAttr.has_value()) { uint64_t value = alignAttr.value(); if (!llvm::isPowerOf2_64(value)) diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -77,8 +77,8 @@ // Given the element type of an operand and whether or not it is an accumulator, // this function returns the PTX type (`NVVM::MMATypes`) that corresponds to the // operand's element type. -Optional MmaOp::inferOperandMMAType(Type operandElType, - bool isAccumulator) { +std::optional +MmaOp::inferOperandMMAType(Type operandElType, bool isAccumulator) { auto half2Type = LLVM::getFixedVectorType(Float16Type::get(operandElType.getContext()), 2); if (operandElType.isF64()) @@ -118,14 +118,14 @@ } MMATypes MmaOp::accumPtxType() { - Optional val = inferOperandMMAType( + std::optional val = inferOperandMMAType( getODSOperands(2).getTypes().front(), /*isAccum=*/true); assert(val.has_value() && "accumulator PTX type should always be inferrable"); return val.value(); } MMATypes MmaOp::resultPtxType() { - Optional val = + std::optional val = inferOperandMMAType(getResult().getType(), /*isAccum=*/true); assert(val.has_value() && "result PTX type should always be inferrable"); return val.value(); @@ -159,7 +159,7 @@ regTypes.push_back(this->getOperand(operandIdx).getType()); } } - Optional inferredType = + std::optional inferredType = inferOperandMMAType(regTypes.back(), /*isAccum=*/fragIdx >= 2); if (inferredType) ignoreAttrNames.push_back(frag.ptxTypeAttr); @@ -191,10 +191,10 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType, ValueRange operandA, ValueRange operandB, ValueRange operandC, - ArrayRef shape, Optional b1Op, - Optional intOverflow, - Optional> multiplicandPtxTypes, - Optional> multiplicandLayouts) { + ArrayRef shape, std::optional b1Op, + std::optional intOverflow, + std::optional> multiplicandPtxTypes, + std::optional> multiplicandLayouts) { assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)"); MLIRContext *ctx = builder.getContext(); @@ -247,7 +247,7 @@ // `->` type($res) ParseResult MmaOp::parse(OpAsmParser &parser, OperationState &result) { struct OperandFragment { - Optional elemtype; + std::optional elemtype; SmallVector regs; SmallVector regTypes; }; diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1569,7 +1569,7 @@ RewriterBase &rewriter, transform::TransformState &state, TransformOpInterface transformOp, ArrayRef targets, ArrayRef mixedNumThreads, - ArrayRef mixedTileSizes, Optional mapping, + ArrayRef mixedTileSizes, std::optional mapping, SmallVector &tileOps, SmallVector &tiledOps) { if (targets.empty()) return DiagnosedSilenceableFailure::success(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -44,7 +44,7 @@ static Value allocBuffer(ImplicitLocOpBuilder &b, const LinalgPromotionOptions &options, Type elementType, Value allocSize, DataLayout &layout, - Optional alignment = std::nullopt) { + std::optional alignment = std::nullopt) { auto width = layout.getTypeSize(elementType); IntegerAttr alignmentAttr; @@ -77,11 +77,10 @@ /// no call back to do so is provided. The default is to allocate a /// memref<..xi8> and return a view to get a memref type of shape /// boundingSubViewSize. -static Optional -defaultAllocBufferCallBack(const LinalgPromotionOptions &options, - OpBuilder &builder, memref::SubViewOp subView, - ArrayRef boundingSubViewSize, - Optional alignment, DataLayout &layout) { +static std::optional defaultAllocBufferCallBack( + const LinalgPromotionOptions &options, OpBuilder &builder, + memref::SubViewOp subView, ArrayRef boundingSubViewSize, + std::optional alignment, DataLayout &layout) { ShapedType viewType = subView.getType(); ImplicitLocOpBuilder b(subView.getLoc(), builder); auto zero = b.createOrFold(0); @@ -136,7 +135,7 @@ CopyCallbackFn copyOutFn; /// Alignment of promoted buffer. - Optional alignment; + std::optional alignment; }; } // namespace @@ -166,7 +165,7 @@ } else { allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp, ArrayRef boundingSubViewSize, - DataLayout &layout) -> Optional { + DataLayout &layout) -> std::optional { return defaultAllocBufferCallBack(options, b, subViewOp, boundingSubViewSize, alignment, layout); }; @@ -246,7 +245,8 @@ SmallVector dynSizes(fullSizes.size(), ShapedType::kDynamic); // If a callback is not specified, then use the default implementation for // allocating the promoted buffer. - Optional fullLocalView = allocationFn(b, subView, fullSizes, layout); + std::optional fullLocalView = + allocationFn(b, subView, fullSizes, layout); if (!fullLocalView) return failure(); SmallVector zeros(fullSizes.size(), b.getIndexAttr(0)); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -191,9 +191,9 @@ static bool canOmitTileOffsetInBoundsCheck(OpFoldResult tileSize, OpFoldResult numThreads, OpFoldResult iterationSize) { - Optional tileSizeConst = getConstantIntValue(tileSize); - Optional numThreadsConst = getConstantIntValue(numThreads); - Optional iterSizeConst = getConstantIntValue(iterationSize); + std::optional tileSizeConst = getConstantIntValue(tileSize); + std::optional numThreadsConst = getConstantIntValue(numThreads); + std::optional iterSizeConst = getConstantIntValue(iterationSize); if (!tileSizeConst || !numThreadsConst || !iterSizeConst) return false; return *tileSizeConst * (*numThreadsConst - 1) < *iterSizeConst; @@ -221,7 +221,7 @@ RewriterBase &b, Location loc, scf::ForeachThreadOp foreachThreadOp, ArrayRef numThreads, SmallVector loopRanges, bool omitTileOffsetBoundsCheck, - Optional> nominalTileSizes, + std::optional> nominalTileSizes, SmallVector &tiledOffsets, SmallVector &tiledSizes) { OpBuilder::InsertionGuard g(b); @@ -302,8 +302,8 @@ /// assume that `tileSize[i] * (numThread[i] -1) <= dimSize[i]` holds. static FailureOr tileToForeachThreadOpImpl( RewriterBase &b, TilingInterface op, ArrayRef numThreads, - Optional> nominalTileSizes, - Optional mapping, bool omitTileOffsetBoundsCheck) { + std::optional> nominalTileSizes, + std::optional mapping, bool omitTileOffsetBoundsCheck) { Location loc = op->getLoc(); OpBuilder::InsertionGuard g(b); @@ -399,7 +399,7 @@ FailureOr linalg::tileToForeachThreadOp(RewriterBase &b, TilingInterface op, ArrayRef numThreads, - Optional mapping) { + std::optional mapping) { return tileToForeachThreadOpImpl(b, op, numThreads, /*nominalTileSizes=*/std::nullopt, mapping, /*omitTileOffsetBoundsCheck=*/false); @@ -408,7 +408,7 @@ FailureOr linalg::tileToForeachThreadOpUsingTileSizes(RewriterBase &b, TilingInterface op, ArrayRef tileSizes, - Optional mapping) { + std::optional mapping) { SmallVector loopRanges = op.getIterationDomain(b); unsigned nLoops = loopRanges.size(); SmallVector numThreads; @@ -586,7 +586,7 @@ PartialReductionOpInterface op, ArrayRef numThreads, ArrayRef tileSizes, - Optional mapping) { + std::optional mapping) { Location loc = op.getLoc(); OpBuilder::InsertionGuard g(b); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -460,7 +460,7 @@ bool zeroSliceGuard = true; if (controlFn) { - if (Optional control = controlFn(sliceOp)) + if (std::optional control = controlFn(sliceOp)) zeroSliceGuard = *control; else return failure(); @@ -501,7 +501,7 @@ } // The size is less than or equal to tileSize because outer dims are all 1s. - Optional tileSize = + std::optional tileSize = getConstantIntValue(tileAndPosMapping.lookup(dim)); assert(tileSize.has_value() && "dynamic inner tile size is not supported"); paddedShape.push_back(tileSize.value()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -373,14 +373,13 @@ Operation *newOp; }; -llvm::Optional +std::optional mlir::linalg::getCombinerOpKind(Operation *combinerOp) { using ::mlir::vector::CombiningKind; if (!combinerOp) return std::nullopt; - return llvm::TypeSwitch>( - combinerOp) + return llvm::TypeSwitch>(combinerOp) .Case( [&](auto op) { return CombiningKind::ADD; }) .Case([&](auto op) { return CombiningKind::AND; }) @@ -1847,7 +1846,7 @@ Operation *reduceOp = matchLinalgReduction(linalgOp.getDpsInitOperand(0)); if (!reduceOp) return; - llvm::Optional maybeKind; + std::optional maybeKind; maybeKind = getCombinerOpKind(reduceOp); if (!maybeKind || *maybeKind != vector::CombiningKind::ADD) return; diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -815,7 +815,7 @@ // b. The subshape size is 1. According to the way the loops are set up, // tensors with "0" dimensions would never be constructed. int64_t shapeSize = shape[r]; - Optional sizeCst = getConstantIntValue(size); + std::optional sizeCst = getConstantIntValue(size); auto hasTileSizeOne = sizeCst && *sizeCst == 1; auto dividesEvenly = sizeCst && !ShapedType::isDynamic(shapeSize) && ((shapeSize % *sizeCst) == 0); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -184,7 +184,8 @@ ofr.get().cast().getInt()); continue; } - Optional maybeConstant = getConstantIntValue(ofr.get()); + std::optional maybeConstant = + getConstantIntValue(ofr.get()); if (maybeConstant) ofr = builder.getIndexAttr(*maybeConstant); } @@ -458,7 +459,7 @@ } void AllocaScopeOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index) { regions.push_back(RegionSuccessor(getResults())); @@ -922,7 +923,7 @@ build(builder, result, source, indexValue); } -Optional DimOp::getConstantIndex() { +std::optional DimOp::getConstantIndex() { return getConstantIntValue(getIndex()); } @@ -942,7 +943,7 @@ LogicalResult DimOp::verify() { // Assume unknown index to be in range. - Optional index = getConstantIndex(); + std::optional index = getConstantIndex(); if (!index) return success(); @@ -977,7 +978,7 @@ /// This accounts for cases where there are multiple unit-dims, but only a /// subset of those are dropped. For MemRefTypes these can be disambiguated /// using the strides. If a dimension is dropped the stride must be dropped too. -static llvm::Optional +static std::optional computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType, ArrayRef sizes) { llvm::SmallBitVector unusedDims(originalType.getRank()); @@ -1049,7 +1050,7 @@ llvm::SmallBitVector SubViewOp::getDroppedDims() { MemRefType sourceType = getSourceType(); MemRefType resultType = getType(); - llvm::Optional unusedDims = + std::optional unusedDims = computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes()); assert(unusedDims && "unable to find unused dims of subview"); return *unusedDims; @@ -1364,7 +1365,7 @@ /// The number and type of the results are inferred from the /// shape of the source. LogicalResult ExtractStridedMetadataOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { ExtractStridedMetadataOpAdaptor extractAdaptor(operands, attributes, regions); @@ -1625,7 +1626,7 @@ } } - if (Optional alignAttr = getAlignment()) { + if (std::optional alignAttr = getAlignment()) { uint64_t alignment = *alignAttr; if (!llvm::isPowerOf2_64(alignment)) @@ -2610,7 +2611,7 @@ return inferredType; // Compute which dimensions are dropped. - Optional> dimsToProject = + std::optional> dimsToProject = computeRankReductionMask(inferredType.getShape(), resultShape); assert(dimsToProject.has_value() && "invalid rank reduction"); @@ -2887,7 +2888,7 @@ auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets, mixedSizes, mixedStrides) .cast(); - llvm::Optional unusedDims = + std::optional unusedDims = computeMemRefRankReductionMask(currentSourceType, currentResultType, mixedSizes); // Return nullptr as failure mode. @@ -2970,14 +2971,14 @@ // Check offsets are zero. if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) { - Optional intValue = getConstantIntValue(ofr); + std::optional intValue = getConstantIntValue(ofr); return !intValue || intValue.value() != 0; })) return false; // Check strides are one. if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) { - Optional intValue = getConstantIntValue(ofr); + std::optional intValue = getConstantIntValue(ofr); return !intValue || intValue.value() != 1; })) return false; @@ -2985,7 +2986,7 @@ // Check all size values are static and matches the (static) source shape. ArrayRef sourceShape = subViewOp.getSourceType().getShape(); for (const auto &size : llvm::enumerate(mixedSizes)) { - Optional intValue = getConstantIntValue(size.value()); + std::optional intValue = getConstantIntValue(size.value()); if (!intValue || *intValue != sourceShape[size.index()]) return false; } diff --git a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp @@ -98,9 +98,9 @@ } if (!candidateLoop) return failure(); - llvm::Optional inductionVar = candidateLoop.getSingleInductionVar(); - llvm::Optional lowerBound = candidateLoop.getSingleLowerBound(); - llvm::Optional singleStep = candidateLoop.getSingleStep(); + std::optional inductionVar = candidateLoop.getSingleInductionVar(); + std::optional lowerBound = candidateLoop.getSingleLowerBound(); + std::optional singleStep = candidateLoop.getSingleStep(); if (!inductionVar || !lowerBound || !singleStep) return failure(); @@ -125,13 +125,12 @@ AffineExpr induc = getAffineDimExpr(0, allocOp.getContext()); unsigned dimCount = 1; auto getAffineExpr = [&](OpFoldResult e) -> AffineExpr { - if (Optional constValue = getConstantIntValue(e)) { + if (std::optional constValue = getConstantIntValue(e)) { return getAffineConstantExpr(*constValue, allocOp.getContext()); } auto value = getOrCreateValue(e, builder, candidateLoop->getLoc()); operands.push_back(value); return getAffineDimExpr(dimCount++, allocOp.getContext()); - }; auto init = getAffineExpr(*lowerBound); auto step = getAffineExpr(*singleStep); diff --git a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ResolveShapedTypeResultDims.cpp @@ -46,7 +46,7 @@ if (!shapedTypeOp) return failure(); - Optional dimIndex = dimOp.getConstantIndex(); + std::optional dimIndex = dimOp.getConstantIndex(); if (!dimIndex) return failure(); @@ -88,7 +88,7 @@ if (!rankedShapeTypeOp) return failure(); - Optional dimIndex = dimOp.getConstantIndex(); + std::optional dimIndex = dimOp.getConstantIndex(); if (!dimIndex) return failure(); diff --git a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp --- a/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp +++ b/mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp @@ -149,7 +149,7 @@ MemoryEffectOpInterface iface = dyn_cast(op); if (!iface) return; - Optional effect = + std::optional effect = iface.getEffectOnValue(shmMemRef); if (effect) { readOps.push_back(op); diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -117,7 +117,7 @@ SMLoc loc = parser.getCurrentLocation(); if (parser.parseKeyword(&enumStr)) return failure(); - if (Optional enumValue = symbolizeEnum(enumStr)) { + if (std::optional enumValue = symbolizeEnum(enumStr)) { attr = ClauseAttr::get(parser.getContext(), *enumValue); return success(); } @@ -173,9 +173,9 @@ //===----------------------------------------------------------------------===// // Parser, verifier and printer for Aligned Clause //===----------------------------------------------------------------------===// -static LogicalResult verifyAlignedClause(Operation *op, - Optional alignmentValues, - OperandRange alignedVariables) { +static LogicalResult +verifyAlignedClause(Operation *op, std::optional alignmentValues, + OperandRange alignedVariables) { // Check if number of alignment values equals to number of aligned variables if (!alignedVariables.empty()) { if (!alignmentValues || alignmentValues->size() != alignedVariables.size()) @@ -236,7 +236,7 @@ static void printAlignedClause(OpAsmPrinter &p, Operation *op, ValueRange alignedVars, TypeRange alignedVarTypes, - Optional alignmentValues) { + std::optional alignmentValues) { for (unsigned i = 0; i < alignedVars.size(); ++i) { if (i != 0) p << ", "; @@ -293,11 +293,11 @@ static ParseResult parseScheduleClause( OpAsmParser &parser, ClauseScheduleKindAttr &scheduleAttr, ScheduleModifierAttr &scheduleModifier, UnitAttr &simdModifier, - Optional &chunkSize, Type &chunkType) { + std::optional &chunkSize, Type &chunkType) { StringRef keyword; if (parser.parseKeyword(&keyword)) return failure(); - llvm::Optional schedule = + std::optional schedule = symbolizeClauseScheduleKind(keyword); if (!schedule) return parser.emitError(parser.getNameLoc()) << " expected schedule kind"; @@ -334,7 +334,7 @@ if (!modifiers.empty()) { SMLoc loc = parser.getCurrentLocation(); - if (Optional mod = + if (std::optional mod = symbolizeScheduleModifier(modifiers[0])) { scheduleModifier = ScheduleModifierAttr::get(parser.getContext(), *mod); } else { @@ -396,7 +396,7 @@ static void printReductionVarList(OpAsmPrinter &p, Operation *op, OperandRange reductionVars, TypeRange reductionTypes, - Optional reductions) { + std::optional reductions) { for (unsigned i = 0, e = reductions->size(); i < e; ++i) { if (i != 0) p << ", "; @@ -407,7 +407,7 @@ /// Verifies Reduction Clause static LogicalResult verifyReductionVarList(Operation *op, - Optional reductions, + std::optional reductions, OperandRange reductionVars) { if (!reductionVars.empty()) { if (!reductions || reductions->size() != reductionVars.size()) diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp --- a/mlir/lib/Dialect/PDL/IR/PDL.cpp +++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp @@ -112,7 +112,7 @@ LogicalResult AttributeOp::verify() { Value attrType = getValueType(); - Optional attrValue = getValue(); + std::optional attrValue = getValue(); if (!attrValue) { if (isa((*this)->getParentOp())) @@ -203,7 +203,7 @@ if (resultTypes.empty()) { // If we don't know the concrete operation, don't attempt any verification. // We can't make assumptions if we don't know the concrete operation. - Optional rawOpName = op.getOpName(); + std::optional rawOpName = op.getOpName(); if (!rawOpName) return success(); Optional opName = @@ -290,7 +290,7 @@ } bool OperationOp::hasTypeInference() { - if (Optional rawOpName = getOpName()) { + if (std::optional rawOpName = getOpName()) { OperationName opName(*rawOpName, getContext()); return opName.hasInterface(); } @@ -298,7 +298,7 @@ } bool OperationOp::mightHaveTypeInference() { - if (Optional rawOpName = getOpName()) { + if (std::optional rawOpName = getOpName()) { OperationName opName(*rawOpName, getContext()); return opName.mightHaveInterface(); } diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp --- a/mlir/lib/Dialect/SCF/IR/SCF.cpp +++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp @@ -248,7 +248,7 @@ /// correspond to a constant value for each operand, or null if that operand is /// not a constant. void ExecuteRegionOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // If the predecessor is the ExecuteRegionOp, branch into the body. if (!index) { @@ -265,7 +265,7 @@ //===----------------------------------------------------------------------===// MutableOperandRange -ConditionOp::getMutableSuccessorOperands(Optional index) { +ConditionOp::getMutableSuccessorOperands(std::optional index) { // Pass all operands except the condition to the successor region. return getArgsMutable(); } @@ -352,17 +352,19 @@ return success(); } -Optional ForOp::getSingleInductionVar() { return getInductionVar(); } +std::optional ForOp::getSingleInductionVar() { + return getInductionVar(); +} -Optional ForOp::getSingleLowerBound() { +std::optional ForOp::getSingleLowerBound() { return OpFoldResult(getLowerBound()); } -Optional ForOp::getSingleStep() { +std::optional ForOp::getSingleStep() { return OpFoldResult(getStep()); } -Optional ForOp::getSingleUpperBound() { +std::optional ForOp::getSingleUpperBound() { return OpFoldResult(getUpperBound()); } @@ -476,7 +478,7 @@ /// correspond to the loop iterator operands, i.e., those excluding the /// induction variable. LoopOp only has one region, so 0 is the only valid value /// for `index`. -OperandRange ForOp::getSuccessorEntryOperands(Optional index) { +OperandRange ForOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index == 0 && "invalid region index"); // The initial operands map to the loop arguments after the induction @@ -489,7 +491,7 @@ /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void ForOp::getSuccessorRegions(Optional index, +void ForOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // If the predecessor is the ForOp, branch into the body using the iterator @@ -721,7 +723,7 @@ /// Util function that tries to compute a constant diff between u and l. /// Returns std::nullopt when the difference between two AffineValueMap is /// dynamic. -static Optional computeConstDiff(Value l, Value u) { +static std::optional computeConstDiff(Value l, Value u) { IntegerAttr clb, cub; if (matchPattern(l, m_Constant(&clb)) && matchPattern(u, m_Constant(&cub))) { llvm::APInt lbValue = clb.getValue(); @@ -754,7 +756,7 @@ return success(); } - Optional diff = + std::optional diff = computeConstDiff(op.getLowerBound(), op.getUpperBound()); if (!diff) return failure(); @@ -765,7 +767,7 @@ return success(); } - llvm::Optional maybeStepValue = op.getConstantStep(); + std::optional maybeStepValue = op.getConstantStep(); if (!maybeStepValue) return failure(); @@ -1068,7 +1070,7 @@ LastTensorLoadCanonicalization, ForOpTensorCastFolder>(context); } -Optional ForOp::getConstantStep() { +std::optional ForOp::getConstantStep() { IntegerAttr step; if (matchPattern(getStep(), m_Constant(&step))) return step.getValue(); @@ -1212,7 +1214,7 @@ void ForeachThreadOp::build(mlir::OpBuilder &builder, mlir::OperationState &result, ValueRange outputs, ValueRange numThreads, - Optional mapping) { + std::optional mapping) { result.addOperands(numThreads); result.addOperands(outputs); if (mapping.has_value()) { @@ -1565,7 +1567,7 @@ /// during the flow of control. `operands` is a set of optional attributes that /// correspond to a constant value for each operand, or null if that operand is /// not a constant. -void IfOp::getSuccessorRegions(Optional index, +void IfOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // The `then` and the `else` region branch back to the parent operation. @@ -2723,7 +2725,7 @@ afterBuilder(odsBuilder, odsState.location, afterBlock->getArguments()); } -OperandRange WhileOp::getSuccessorEntryOperands(Optional index) { +OperandRange WhileOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index == 0 && "WhileOp is expected to branch only to the first region"); @@ -2746,7 +2748,7 @@ return getAfter().front().getArguments(); } -void WhileOp::getSuccessorRegions(Optional index, +void WhileOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // The parent op always branches to the condition region. @@ -3524,7 +3526,7 @@ } void IndexSwitchOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl &successors) { // All regions branch back to the parent op. if (index) { diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -453,8 +453,8 @@ /// Return `true` if the given loop may have 0 iterations. bool mayHaveZeroIterations(scf::ForOp forOp) { - Optional lb = getConstantIntValue(forOp.getLowerBound()); - Optional ub = getConstantIntValue(forOp.getUpperBound()); + std::optional lb = getConstantIntValue(forOp.getLowerBound()); + std::optional ub = getConstantIntValue(forOp.getUpperBound()); if (!lb.has_value() || !ub.has_value()) return true; return *ub <= *lb; @@ -1055,7 +1055,7 @@ bool mayHaveZeroIterations(scf::ForeachThreadOp foreachThreadOp) { int64_t p = 1; for (Value v : foreachThreadOp.getNumThreads()) { - if (Optional c = getConstantIntValue(v)) { + if (std::optional c = getConstantIntValue(v)) { p *= *c; } else { return true; diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp --- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp @@ -66,13 +66,13 @@ // Check if `stride` evenly divides the trip count `size - offset`. static bool tileDividesIterationDomain(Range loopRange) { - Optional offsetAsInt = getConstantIntValue(loopRange.offset); + std::optional offsetAsInt = getConstantIntValue(loopRange.offset); if (!offsetAsInt) return false; - Optional sizeAsInt = getConstantIntValue(loopRange.size); + std::optional sizeAsInt = getConstantIntValue(loopRange.size); if (!sizeAsInt) return false; - Optional strideAsInt = getConstantIntValue(loopRange.stride); + std::optional strideAsInt = getConstantIntValue(loopRange.stride); if (!strideAsInt) return false; return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0); @@ -83,7 +83,7 @@ static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc, Range loopRange, Value iv, Value tileSize) { - Optional ts = getConstantIntValue(tileSize); + std::optional ts = getConstantIntValue(tileSize); if (ts && ts.value() == 1) return getAsOpFoldResult(tileSize); @@ -484,10 +484,10 @@ /// `iter_args` of the outer most that is encountered. Traversing the iter_args /// indicates that this is a destination operand of the consumer. If there was /// no loop traversal needed, the second value of the returned tuple is empty. -static std::tuple> +static std::tuple> getUntiledProducerFromSliceSource(OpOperand *source, ArrayRef loops) { - Optional destinationIterArg; + std::optional destinationIterArg; auto loopIt = loops.rbegin(); while (auto iterArg = source->get().dyn_cast()) { scf::ForOp loop = *loopIt; @@ -633,7 +633,7 @@ // TODO: This can be modeled better if the `DestinationStyleOpInterface`. // Update to use that when it does become available. scf::ForOp outerMostLoop = tileAndFuseResult.loops.front(); - Optional iterArgNumber; + std::optional iterArgNumber; if (destinationIterArg) { iterArgNumber = outerMostLoop.getIterArgNumberForOpOperand( *destinationIterArg.value()); diff --git a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp --- a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp +++ b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp @@ -218,8 +218,8 @@ : constraints.appendSymbolVar(/*num=*/1); // If loop lower/upper bounds are constant: Add EQ constraint. - Optional lbInt = getConstantIntValue(lb); - Optional ubInt = getConstantIntValue(ub); + std::optional lbInt = getConstantIntValue(lb); + std::optional ubInt = getConstantIntValue(ub); if (lbInt) constraints.addBound(IntegerPolyhedron::EQ, symLb, *lbInt); if (ubInt) diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp @@ -142,15 +142,15 @@ // Forward declarations. template -static Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser); +static std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser); template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser); +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser); template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser); +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser); static Type parseAndVerifyType(SPIRVDialect const &dialect, DialectAsmParser &parser) { @@ -264,7 +264,7 @@ return failure(); SMLoc strideLoc = parser.getCurrentLocation(); - Optional optStride = parseAndVerify(dialect, parser); + std::optional optStride = parseAndVerify(dialect, parser); if (!optStride) return failure(); @@ -474,8 +474,8 @@ // Specialize this function to parse each of the parameters that define an // ImageType. By default it assumes this is an enum type. template -static Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +static std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser) { StringRef enumSpec; SMLoc enumLoc = parser.getCurrentLocation(); if (parser.parseKeyword(&enumSpec)) { @@ -489,8 +489,8 @@ } template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser) { // TODO: Further verify that the element type can be sampled auto ty = parseAndVerifyType(dialect, parser); if (!ty) @@ -499,8 +499,8 @@ } template -static Optional parseAndVerifyInteger(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +static std::optional parseAndVerifyInteger(SPIRVDialect const &dialect, + DialectAsmParser &parser) { IntTy offsetVal = std::numeric_limits::max(); if (parser.parseInteger(offsetVal)) return std::nullopt; @@ -508,8 +508,8 @@ } template <> -Optional parseAndVerify(SPIRVDialect const &dialect, - DialectAsmParser &parser) { +std::optional parseAndVerify(SPIRVDialect const &dialect, + DialectAsmParser &parser) { return parseAndVerifyInteger(dialect, parser); } @@ -520,7 +520,7 @@ // (termination condition) needs partial specialization. template struct ParseCommaSeparatedList { - Optional> + std::optional> operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const { auto parseVal = parseAndVerify(dialect, parser); if (!parseVal) @@ -541,8 +541,8 @@ // specs to parse the last element of the list. template struct ParseCommaSeparatedList { - Optional> operator()(SPIRVDialect const &dialect, - DialectAsmParser &parser) const { + std::optional> + operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const { if (auto value = parseAndVerify(dialect, parser)) return std::tuple(*value); return std::nullopt; diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -313,8 +313,8 @@ static void printMemoryAccessAttribute( MemoryOpTy memoryOp, OpAsmPrinter &printer, SmallVectorImpl &elidedAttrs, - Optional memoryAccessAtrrValue = std::nullopt, - Optional alignmentAttrValue = std::nullopt) { + std::optional memoryAccessAtrrValue = std::nullopt, + std::optional alignmentAttrValue = std::nullopt) { // Print optional memory access attribute. if (auto memAccess = (memoryAccessAtrrValue ? memoryAccessAtrrValue : memoryOp.getMemoryAccess())) { @@ -343,8 +343,8 @@ static void printSourceMemoryAccessAttribute( MemoryOpTy memoryOp, OpAsmPrinter &printer, SmallVectorImpl &elidedAttrs, - Optional memoryAccessAtrrValue = std::nullopt, - Optional alignmentAttrValue = std::nullopt) { + std::optional memoryAccessAtrrValue = std::nullopt, + std::optional alignmentAttrValue = std::nullopt) { printer << ", "; @@ -912,7 +912,7 @@ parser.parseOperand(valueInfo)) return failure(); - Optional clusterSizeInfo; + std::optional clusterSizeInfo; if (succeeded(parser.parseOptionalKeyword(kClusterSize))) { clusterSizeInfo = OpAsmParser::UnresolvedOperand(); if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) || @@ -3348,7 +3348,7 @@ //===----------------------------------------------------------------------===// void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state, - Optional name) { + std::optional name) { OpBuilder::InsertionGuard guard(builder); builder.createBlock(state.addRegion()); if (name) { @@ -3360,8 +3360,8 @@ void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state, spirv::AddressingModel addressingModel, spirv::MemoryModel memoryModel, - Optional vceTriple, - Optional name) { + std::optional vceTriple, + std::optional name) { state.addAttribute( "addressing_model", builder.getAttr(addressingModel)); @@ -3414,7 +3414,7 @@ } void spirv::ModuleOp::print(OpAsmPrinter &printer) { - if (Optional name = getName()) { + if (std::optional name = getName()) { printer << ' '; printer.printSymbolName(*name); } @@ -3428,7 +3428,7 @@ elidedAttrs.assign({addressingModelAttrName, memoryModelAttrName, mlir::SymbolTable::getSymbolAttrName()}); - if (Optional triple = getVceTriple()) { + if (std::optional triple = getVceTriple()) { printer << " requires " << *triple; elidedAttrs.push_back(spirv::ModuleOp::getVCETripleAttrName()); } @@ -3806,7 +3806,7 @@ ParseResult spirv::VariableOp::parse(OpAsmParser &parser, OperationState &result) { // Parse optional initializer - Optional initInfo; + std::optional initInfo; if (succeeded(parser.parseOptionalKeyword("init"))) { initInfo = OpAsmParser::UnresolvedOperand(); if (parser.parseLParen() || parser.parseOperand(*initInfo) || diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp @@ -148,7 +148,7 @@ // Specifies the spirv.ExecutionModeOp. if (DenseI32ArrayAttr workgroupSizeAttr = entryPointAttr.getWorkgroupSize()) { - Optional> caps = + std::optional> caps = spirv::getCapabilities(spirv::ExecutionMode::LocalSize); if (!caps || targetEnv.allows(*caps)) { builder.create(funcOp.getLoc(), funcOp, @@ -161,7 +161,7 @@ } } if (Optional subgroupSize = entryPointAttr.getSubgroupSize()) { - Optional> caps = + std::optional> caps = spirv::getCapabilities(spirv::ExecutionMode::SubgroupSize); if (!caps || targetEnv.allows(*caps)) { builder.create(funcOp.getLoc(), funcOp, diff --git a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp @@ -52,8 +52,8 @@ AliasedResourceMap aliasedResources; moduleOp->walk([&aliasedResources](spirv::GlobalVariableOp varOp) { if (varOp->getAttrOfType("aliased")) { - Optional set = varOp.getDescriptorSet(); - Optional binding = varOp.getBinding(); + std::optional set = varOp.getDescriptorSet(); + std::optional binding = varOp.getBinding(); if (set && binding) aliasedResources[{*set, *binding}].push_back(varOp); } diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -335,7 +335,7 @@ // See RegionBranchOpInterface in Interfaces/ControlFlowInterfaces.td void AssumingOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // AssumingOp has unconditional control flow into the region and back to the // parent, so return the correct RegionSuccessor purely based on the index @@ -394,7 +394,7 @@ //===----------------------------------------------------------------------===// LogicalResult mlir::shape::AddOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa() || @@ -911,7 +911,7 @@ } LogicalResult mlir::shape::ConstShapeOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { Builder b(context); @@ -1068,7 +1068,7 @@ // DimOp //===----------------------------------------------------------------------===// -Optional DimOp::getConstantIndex() { +std::optional DimOp::getConstantIndex() { if (auto constSizeOp = getIndex().getDefiningOp()) return constSizeOp.getValue().getLimitedValue(); if (auto constantOp = getIndex().getDefiningOp()) @@ -1081,7 +1081,7 @@ auto valShapedType = valType.dyn_cast(); if (!valShapedType || !valShapedType.hasRank()) return nullptr; - Optional index = getConstantIndex(); + std::optional index = getConstantIndex(); if (!index.has_value()) return nullptr; if (index.value() >= valShapedType.getRank()) @@ -1093,7 +1093,7 @@ } LogicalResult mlir::shape::DimOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { DimOpAdaptor dimOp(operands); @@ -1141,7 +1141,7 @@ } LogicalResult mlir::shape::DivOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa() || @@ -1327,7 +1327,7 @@ // GetExtentOp //===----------------------------------------------------------------------===// -Optional GetExtentOp::getConstantDim() { +std::optional GetExtentOp::getConstantDim() { if (auto constSizeOp = getDim().getDefiningOp()) return constSizeOp.getValue().getLimitedValue(); if (auto constantOp = getDim().getDefiningOp()) @@ -1339,7 +1339,7 @@ auto elements = operands[0].dyn_cast_or_null(); if (!elements) return nullptr; - Optional dim = getConstantDim(); + std::optional dim = getConstantDim(); if (!dim.has_value()) return nullptr; if (dim.value() >= elements.getNumElements()) @@ -1362,7 +1362,7 @@ } LogicalResult mlir::shape::GetExtentOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { inferredReturnTypes.assign({IndexType::get(context)}); @@ -1400,7 +1400,7 @@ //===----------------------------------------------------------------------===// LogicalResult mlir::shape::MeetOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands.empty()) @@ -1536,7 +1536,7 @@ } LogicalResult mlir::shape::RankOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa()) @@ -1572,7 +1572,7 @@ } LogicalResult mlir::shape::NumElementsOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa()) @@ -1604,7 +1604,7 @@ } LogicalResult mlir::shape::MaxOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType() == operands[1].getType()) @@ -1636,7 +1636,7 @@ } LogicalResult mlir::shape::MinOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType() == operands[1].getType()) @@ -1673,7 +1673,7 @@ } LogicalResult mlir::shape::MulOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa() || @@ -1760,7 +1760,7 @@ } LogicalResult mlir::shape::ShapeOfOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType().isa()) diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -333,7 +333,7 @@ } Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind, - Optional dim) const { + std::optional dim) const { if (kind != StorageSpecifierKind::ValMemSize) assert(dim); @@ -344,8 +344,8 @@ } Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind, - Optional dim) const { - Optional intDim = std::nullopt; + std::optional dim) const { + std::optional intDim = std::nullopt; if (dim) intDim = dim.value().getZExtValue(); return getFieldType(kind, intDim); @@ -369,10 +369,9 @@ return failure(); } -static LogicalResult -verifySparsifierGetterSetter(StorageSpecifierKind mdKind, Optional dim, - TypedValue md, - Operation *op) { +static LogicalResult verifySparsifierGetterSetter( + StorageSpecifierKind mdKind, std::optional dim, + TypedValue md, Operation *op) { if (mdKind == StorageSpecifierKind::ValMemSize && dim) { return op->emitError( "redundant dimension argument for querying value memory size"); @@ -482,7 +481,7 @@ OpFoldResult GetStorageSpecifierOp::fold(ArrayRef operands) { StorageSpecifierKind kind = getSpecifierKind(); - Optional dim = getDim(); + std::optional dim = getDim(); for (auto op = getSpecifierSetDef(*this); op; op = getSpecifierSetDef(op)) if (kind == op.getSpecifierKind() && dim == op.getDim()) return op.getValue(); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -133,9 +133,10 @@ /// Gets the dimension size for the given sparse tensor at the given /// original dimension 'dim'. Returns std::nullopt if no sparse encoding is /// attached to the given tensor type. -static Optional sizeFromTensorAtDim(OpBuilder &builder, Location loc, - SparseTensorDescriptor desc, - unsigned dim) { +static std::optional sizeFromTensorAtDim(OpBuilder &builder, + Location loc, + SparseTensorDescriptor desc, + unsigned dim) { RankedTensorType rtp = desc.getTensorType(); // Access into static dimension can query original type directly. // Note that this is typically already done by DimOp's folding. @@ -681,7 +682,7 @@ LogicalResult matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Optional index = op.getConstantIndex(); + std::optional index = op.getConstantIndex(); if (!index || !getSparseTensorEncoding(adaptor.getSource().getType())) return failure(); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -706,7 +706,7 @@ if (!enc) return failure(); // Only rewrite DimOp with constant index. - Optional dim = op.getConstantIndex(); + std::optional dim = op.getConstantIndex(); if (!dim) return failure(); // Generate the call. diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -380,7 +380,7 @@ build(builder, result, source, indexValue); } -Optional DimOp::getConstantIndex() { +std::optional DimOp::getConstantIndex() { return getConstantIntValue(getIndex()); } @@ -400,7 +400,7 @@ LogicalResult DimOp::verify() { // Assume unknown index to be in range. - Optional index = getConstantIndex(); + std::optional index = getConstantIndex(); if (!index) return success(); @@ -598,7 +598,7 @@ for (int64_t i = 0; i < op.getType().getRank(); ++i) { if (op.getType().isDynamicDim(i)) { Value dynamicSize = op.getDynamicSizes()[ctr++]; - Optional cst = getConstantIntValue(dynamicSize); + std::optional cst = getConstantIntValue(dynamicSize); if (cst.has_value()) { staticShape[i] = *cst; changedType = true; @@ -626,7 +626,7 @@ LogicalResult matchAndRewrite(tensor::DimOp dimOp, PatternRewriter &rewriter) const override { - Optional maybeConstantIndex = dimOp.getConstantIndex(); + std::optional maybeConstantIndex = dimOp.getConstantIndex(); auto emptyTensorOp = dimOp.getSource().getDefiningOp(); if (!emptyTensorOp || !maybeConstantIndex) return failure(); @@ -1445,7 +1445,7 @@ return failure(); // Only constant dimension values are supported. - Optional dim = dimOp.getConstantIndex(); + std::optional dim = dimOp.getConstantIndex(); if (!dim.has_value()) return failure(); @@ -1489,7 +1489,7 @@ return failure(); // Only constant dimension values are supported. - Optional dim = dimOp.getConstantIndex(); + std::optional dim = dimOp.getConstantIndex(); if (!dim.has_value()) return failure(); @@ -1732,7 +1732,7 @@ llvm::SmallBitVector droppedDims(mixedSizes.size()); unsigned shapePos = 0; for (const auto &size : enumerate(mixedSizes)) { - Optional sizeVal = getConstantIntValue(size.value()); + std::optional sizeVal = getConstantIntValue(size.value()); // If the size is not 1, or if the current matched dimension of the result // is the same static shape as the size value (which is 1), then the // dimension is preserved. @@ -2278,15 +2278,16 @@ })) return failure(); - auto getSourceOfCastOp = [](Value v) -> Optional { + auto getSourceOfCastOp = [](Value v) -> std::optional { auto castOp = v.getDefiningOp(); if (!castOp || !canFoldIntoConsumerOp(castOp)) return std::nullopt; return castOp.getSource(); }; - Optional sourceCastSource = + std::optional sourceCastSource = getSourceOfCastOp(insertSliceOp.getSource()); - Optional destCastSource = getSourceOfCastOp(insertSliceOp.getDest()); + std::optional destCastSource = + getSourceOfCastOp(insertSliceOp.getDest()); if (!sourceCastSource && !destCastSource) return failure(); @@ -2352,7 +2353,7 @@ SmallVector newSrcShape(srcType.getShape().begin(), srcType.getShape().end()); for (int64_t i = 0; i < srcType.getRank(); ++i) { - if (Optional constInt = + if (std::optional constInt = getConstantIntValue(insertSliceOp.getMixedSizes()[i])) newSrcShape[i] = *constInt; } @@ -2419,9 +2420,10 @@ void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand, Type typeToInfer, Type typeToInferFrom) {} -ParseResult parseInferType(OpAsmParser &parser, - Optional optOperand, - Type &typeToInfer, Type typeToInferFrom) { +ParseResult +parseInferType(OpAsmParser &parser, + std::optional optOperand, + Type &typeToInfer, Type typeToInferFrom) { if (optOperand) typeToInfer = typeToInferFrom; return success(); @@ -3151,7 +3153,7 @@ llvm::zip(packedType.getShape().take_back(mixedTiles.size()), mixedTiles), [](std::tuple it) { - Optional constTileSize = + std::optional constTileSize = getConstantIntValue(std::get<1>(it)); int64_t shape = std::get<0>(it); if (!constTileSize) { @@ -3232,7 +3234,7 @@ auto it = dimAndTileMapping.find(dim); if (it == dimAndTileMapping.end()) continue; - Optional constantTile = getConstantIntValue(it->second); + std::optional constantTile = getConstantIntValue(it->second); if (!constantTile) continue; if (inputShape[dim] % (*constantTile) != 0) @@ -3333,7 +3335,7 @@ SmallVector mixedTiles = op.getMixedTiles(); for (auto [dimDest, tile] : llvm::zip( packedType.getShape().take_back(mixedTiles.size()), mixedTiles)) { - Optional constTileSize = getConstantIntValue(tile); + std::optional constTileSize = getConstantIntValue(tile); if (!constTileSize || ShapedType::isDynamic(dimDest)) return false; } diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @@ -265,7 +265,7 @@ info.isAlignedToInnerTileSize = false; FailureOr cstSize = linalg::getConstantUpperBoundForIndex( getValueOrCreateConstantIndexOp(b, loc, tileSize)); - Optional cstInnerSize = getConstantIntValue(innerTileSize); + std::optional cstInnerSize = getConstantIntValue(innerTileSize); if (!failed(cstSize) && cstInnerSize) { if (cstSize.value() % cstInnerSize.value() == 0) info.isAlignedToInnerTileSize = true; diff --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp @@ -26,7 +26,7 @@ /// Returns true if the the given `attrOrValue` is a constant zero. static bool isZero(OpFoldResult attrOrValue) { - if (Optional val = getConstantIntValue(attrOrValue)) + if (std::optional val = getConstantIntValue(attrOrValue)) return *val == 0; return false; } diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -373,7 +373,7 @@ } LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -398,7 +398,7 @@ } LogicalResult tosa::ConcatOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { // Infer all dimension sizes by reducing based on inputs. @@ -455,7 +455,7 @@ } LogicalResult tosa::EqualOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outShape; @@ -476,7 +476,7 @@ } LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -496,9 +496,8 @@ } if (biasShape.hasRank()) { - outShape[1] = outShape[1] == ShapedType::kDynamic - ? biasShape.getDimSize(0) - : outShape[1]; + outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0) + : outShape[1]; } inferredReturnShapes.push_back(ShapedTypeComponents(outShape)); @@ -508,7 +507,7 @@ LogicalResult FullyConnectedOp::verify() { return verifyConvOp(*this); } LogicalResult tosa::MatMulOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor lhsShape = operands.getShape(0); @@ -524,9 +523,8 @@ } if (rhsShape.hasRank()) { - outShape[0] = outShape[0] == ShapedType::kDynamic - ? rhsShape.getDimSize(0) - : outShape[0]; + outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0) + : outShape[0]; outShape[2] = rhsShape.getDimSize(2); } @@ -535,7 +533,7 @@ } LogicalResult tosa::PadOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -597,7 +595,7 @@ } LogicalResult tosa::SliceOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ArrayAttr sizes = SliceOpAdaptor(operands, attributes).getSize(); @@ -607,13 +605,13 @@ outputShape.push_back(val.cast().getValue().getSExtValue()); } - inferredReturnShapes.push_back(ShapedTypeComponents( - convertToMlirShape(outputShape))); + inferredReturnShapes.push_back( + ShapedTypeComponents(convertToMlirShape(outputShape))); return success(); } LogicalResult tosa::TableOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -629,7 +627,7 @@ } LogicalResult tosa::TileOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { TileOpAdaptor adaptor(operands, attributes); @@ -663,7 +661,7 @@ } LogicalResult tosa::ReshapeOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ReshapeOpAdaptor adaptor(operands, attributes); @@ -703,7 +701,7 @@ } LogicalResult tosa::TransposeOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ShapeAdaptor inputShape = operands.getShape(0); @@ -770,7 +768,7 @@ } LogicalResult tosa::GatherOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape; @@ -795,7 +793,7 @@ } LogicalResult tosa::ResizeOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { ResizeOpAdaptor adaptor(operands, attributes); @@ -838,7 +836,7 @@ } LogicalResult tosa::ScatterOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape; @@ -887,7 +885,7 @@ #define REDUCE_SHAPE_INFER(OP) \ LogicalResult OP::inferReturnTypeComponents( \ - MLIRContext *context, ::llvm::Optional location, \ + MLIRContext *context, ::std::optional location, \ ValueShapeRange operands, DictionaryAttr attributes, \ RegionRange regions, \ SmallVectorImpl &inferredReturnShapes) { \ @@ -918,7 +916,7 @@ #define NARY_SHAPE_INFER(OP) \ LogicalResult OP::inferReturnTypeComponents( \ - MLIRContext *context, ::llvm::Optional location, \ + MLIRContext *context, ::std::optional location, \ ValueShapeRange operands, DictionaryAttr attributes, \ RegionRange regions, \ SmallVectorImpl &inferredReturnShapes) { \ @@ -1007,7 +1005,7 @@ } LogicalResult Conv2DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape(4, ShapedType::kDynamic); @@ -1074,7 +1072,7 @@ LogicalResult Conv2DOp::verify() { return verifyConvOp(*this); } LogicalResult Conv3DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape(5, ShapedType::kDynamic); @@ -1151,21 +1149,21 @@ LogicalResult Conv3DOp::verify() { return verifyConvOp(*this); } LogicalResult AvgPool2dOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { return poolingInferReturnTypes(operands, attributes, inferredReturnShapes); } LogicalResult MaxPool2dOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { return poolingInferReturnTypes(operands, attributes, inferredReturnShapes); } LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector outputShape(4, ShapedType::kDynamic); @@ -1245,7 +1243,7 @@ LogicalResult DepthwiseConv2DOp::verify() { return verifyConvOp(*this); } LogicalResult TransposeConv2DOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes); @@ -1313,7 +1311,7 @@ } LogicalResult IfOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector yieldOps; @@ -1357,7 +1355,7 @@ } LogicalResult WhileOp::inferReturnTypeComponents( - MLIRContext *context, ::llvm::Optional location, + MLIRContext *context, ::std::optional location, ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { llvm::SmallVector yieldOps; diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp @@ -46,7 +46,7 @@ private: void runOnOperation() override; - llvm::Optional profileType; + std::optional profileType; }; void TosaValidation::runOnOperation() { diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp --- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp +++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp @@ -117,8 +117,8 @@ // AlternativesOp //===----------------------------------------------------------------------===// -OperandRange -transform::AlternativesOp::getSuccessorEntryOperands(Optional index) { +OperandRange transform::AlternativesOp::getSuccessorEntryOperands( + std::optional index) { if (index && getOperation()->getNumOperands() == 1) return getOperation()->getOperands(); return OperandRange(getOperation()->operand_end(), @@ -126,7 +126,7 @@ } void transform::AlternativesOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { for (Region &alternative : llvm::drop_begin( getAlternatives(), index.has_value() ? *index + 1 : 0)) { @@ -338,7 +338,7 @@ } void transform::ForeachOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { Region *bodyRegion = &getBody(); if (!index) { @@ -353,7 +353,7 @@ } OperandRange -transform::ForeachOp::getSuccessorEntryOperands(Optional index) { +transform::ForeachOp::getSuccessorEntryOperands(std::optional index) { // The iteration variable op handle is mapped to a subset (one op to be // precise) of the payload ops of the ForeachOp operand. assert(index && *index == 0 && "unexpected region index"); @@ -737,8 +737,8 @@ } } -OperandRange -transform::SequenceOp::getSuccessorEntryOperands(Optional index) { +OperandRange transform::SequenceOp::getSuccessorEntryOperands( + std::optional index) { assert(index && *index == 0 && "unexpected region index"); if (getOperation()->getNumOperands() == 1) return getOperation()->getOperands(); @@ -747,7 +747,7 @@ } void transform::SequenceOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (!index) { Region *bodyRegion = &getBody(); diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp --- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp +++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp @@ -57,8 +57,7 @@ // dimensions should also be dynamic and product of all previous unprocessed // dimensions of the expanded shape should be 1. if (sourceShape[sourceDim] == ShapedType::kDynamic && - (currTargetShape != ShapedType::kDynamic || - prodOfCollapsedDims != 1)) + (currTargetShape != ShapedType::kDynamic || prodOfCollapsedDims != 1)) return std::nullopt; // If the collapsed dim is dynamic, the current expanded dim should also @@ -229,7 +228,7 @@ ArrayRef reassociationMaps, bool isExpandingReshape) { unsigned expandedDimStart = 0; for (const auto &map : llvm::enumerate(reassociationMaps)) { - Optional dynamicShape; + std::optional dynamicShape; int64_t linearizedStaticShape = 1; for (const auto &dim : llvm::enumerate( expandedShape.slice(expandedDimStart, map.value().size()))) { @@ -279,8 +278,8 @@ llvm::SmallBitVector mask(sliceInputShape.size()); unsigned idx = 0; for (const auto &[offset, size, stride] : sliceParams) { - Optional offsetConst = getConstantIntValue(offset); - Optional strideConst = getConstantIntValue(stride); + std::optional offsetConst = getConstantIntValue(offset); + std::optional strideConst = getConstantIntValue(stride); mask[idx] = !isEqualConstantIntOrValue(size, sliceInputShape[idx]) || (!strideConst || *strideConst != 1) || (!offsetConst || *offsetConst != 0); diff --git a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp --- a/mlir/lib/Dialect/Utils/StaticValueUtils.cpp +++ b/mlir/lib/Dialect/Utils/StaticValueUtils.cpp @@ -91,7 +91,7 @@ } /// If ofr is a constant integer or an IntegerAttr, return the integer. -Optional getConstantIntValue(OpFoldResult ofr) { +std::optional getConstantIntValue(OpFoldResult ofr) { // Case 1: Check for Constant integer. if (auto val = ofr.dyn_cast()) { APSInt intVal; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -315,7 +315,8 @@ return {}; } -Optional> MultiDimReductionOp::getShapeForUnroll() { +std::optional> +MultiDimReductionOp::getShapeForUnroll() { return llvm::to_vector<4>(getSourceVectorType().getShape()); } @@ -500,7 +501,7 @@ return nullptr; } -Optional> ReductionOp::getShapeForUnroll() { +std::optional> ReductionOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -939,7 +940,7 @@ getContext()); } -Optional> ContractionOp::getShapeForUnroll() { +std::optional> ContractionOp::getShapeForUnroll() { SmallVector shape; getIterationBounds(shape); return shape; @@ -1077,7 +1078,7 @@ } LogicalResult -ExtractOp::inferReturnTypes(MLIRContext *, Optional, +ExtractOp::inferReturnTypes(MLIRContext *, std::optional, ValueRange operands, DictionaryAttr attributes, RegionRange, SmallVectorImpl &inferredReturnTypes) { @@ -1721,7 +1722,7 @@ // FmaOp //===----------------------------------------------------------------------===// -Optional> FMAOp::getShapeForUnroll() { +std::optional> FMAOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -2001,7 +2002,7 @@ } LogicalResult -ShuffleOp::inferReturnTypes(MLIRContext *, Optional, +ShuffleOp::inferReturnTypes(MLIRContext *, std::optional, ValueRange operands, DictionaryAttr attributes, RegionRange, SmallVectorImpl &inferredReturnTypes) { @@ -3178,7 +3179,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result, VectorType vectorType, Value source, ValueRange indices, AffineMap permutationMap, - Optional> inBounds) { + std::optional> inBounds) { auto permutationMapAttr = AffineMapAttr::get(permutationMap); auto inBoundsAttr = (inBounds && !inBounds.value().empty()) ? builder.getBoolArrayAttr(inBounds.value()) @@ -3191,7 +3192,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result, VectorType vectorType, Value source, ValueRange indices, Value padding, - Optional> inBounds) { + std::optional> inBounds) { AffineMap permutationMap = getTransferMinorIdentityMap( source.getType().cast(), vectorType); auto permutationMapAttr = AffineMapAttr::get(permutationMap); @@ -3208,7 +3209,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result, VectorType vectorType, Value source, ValueRange indices, - Optional> inBounds) { + std::optional> inBounds) { Type elemType = source.getType().cast().getElementType(); Value padding = builder.create( result.location, elemType, builder.getZeroAttr(elemType)); @@ -3573,7 +3574,7 @@ return OpFoldResult(); } -Optional> TransferReadOp::getShapeForUnroll() { +std::optional> TransferReadOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -3800,7 +3801,7 @@ void TransferWriteOp::build(OpBuilder &builder, OperationState &result, Value vector, Value dest, ValueRange indices, AffineMap permutationMap, - Optional> inBounds) { + std::optional> inBounds) { auto permutationMapAttr = AffineMapAttr::get(permutationMap); auto inBoundsAttr = (inBounds && !inBounds.value().empty()) ? builder.getBoolArrayAttr(inBounds.value()) @@ -3813,7 +3814,7 @@ /// map to 'getMinorIdentityMap'. void TransferWriteOp::build(OpBuilder &builder, OperationState &result, Value vector, Value dest, ValueRange indices, - Optional> inBounds) { + std::optional> inBounds) { auto vectorType = vector.getType().cast(); AffineMap permutationMap = getTransferMinorIdentityMap( dest.getType().cast(), vectorType); @@ -4046,7 +4047,7 @@ return memref::foldMemRefCast(*this); } -Optional> TransferWriteOp::getShapeForUnroll() { +std::optional> TransferWriteOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } @@ -5037,7 +5038,7 @@ return success(); } -Optional> TransposeOp::getShapeForUnroll() { +std::optional> TransposeOp::getShapeForUnroll() { return llvm::to_vector<4>(getResultType().getShape()); } @@ -5580,7 +5581,7 @@ } void WarpExecuteOnLane0Op::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index) { regions.push_back(RegionSuccessor(getResults())); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -44,7 +44,7 @@ using namespace mlir::vector; // Helper to find an index in an affine map. -static Optional getResultIndex(AffineMap map, int64_t index) { +static std::optional getResultIndex(AffineMap map, int64_t index) { for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) { int64_t idx = map.getDimPosition(i); if (idx == index) @@ -147,11 +147,11 @@ } /// Helper to create arithmetic operation associated with a kind of contraction. -static Optional createContractArithOp(Location loc, Value x, Value y, - Value acc, - vector::CombiningKind kind, - PatternRewriter &rewriter, - bool isInt) { +static std::optional createContractArithOp(Location loc, Value x, + Value y, Value acc, + vector::CombiningKind kind, + PatternRewriter &rewriter, + bool isInt) { using vector::CombiningKind; Value mul; if (isInt) { @@ -169,12 +169,13 @@ return std::nullopt; // Special case for fused multiply-add. if (acc && acc.getType().isa() && kind == CombiningKind::ADD) { - return Optional(rewriter.create(loc, x, y, acc)); + return std::optional( + rewriter.create(loc, x, y, acc)); } mul = rewriter.create(loc, x, y); } if (!acc) - return Optional(mul); + return std::optional(mul); return makeArithReduction(rewriter, loc, kind, mul, acc); } @@ -191,7 +192,7 @@ /// Look for a given dimension in an affine map and return its position. Return /// std::nullopt if the dimension is not in the map results. -static llvm::Optional getDimPosition(AffineMap map, unsigned dim) { +static std::optional getDimPosition(AffineMap map, unsigned dim) { for (unsigned i = 0, e = map.getNumResults(); i < e; i++) { if (map.getDimPosition(i) == dim) return i; @@ -552,8 +553,8 @@ if (!rhsType) { // Special case: AXPY operation. Value b = rewriter.create(loc, lhsType, op.getRhs()); - Optional mult = createContractArithOp(loc, op.getLhs(), b, acc, - kind, rewriter, isInt); + std::optional mult = createContractArithOp( + loc, op.getLhs(), b, acc, kind, rewriter, isInt); if (!mult.has_value()) return failure(); rewriter.replaceOp(op, mult.value()); @@ -570,7 +571,7 @@ Value r = nullptr; if (acc) r = rewriter.create(loc, rhsType, acc, pos); - Optional m = + std::optional m = createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt); if (!m.has_value()) return failure(); @@ -645,7 +646,7 @@ // Loop through the parallel dimensions to calculate the dimensions to // broadcast and to permute in order to extract only parallel dimensions. for (unsigned i = 0; i < numParallelDims; i++) { - llvm::Optional lhsDim = + std::optional lhsDim = getDimPosition(lhsMap, accMap.getDimPosition(i)); if (lhsDim) { lhsTranspose.push_back(numLhsDimToBroadcast + *lhsDim); @@ -655,7 +656,7 @@ contractOp.getResultType().cast().getDimSize(i)); lhsTranspose.push_back(lhsDims.size() - 1); } - llvm::Optional rhsDim = + std::optional rhsDim = getDimPosition(rhsMap, accMap.getDimPosition(i)); if (rhsDim) { rhsTranspose.push_back(numRhsDimToBroadcast + *rhsDim); @@ -690,7 +691,7 @@ loc, newLhs, rewriter.getI64ArrayAttr(lhsOffsets)); newRhs = rewriter.create( loc, newRhs, rewriter.getI64ArrayAttr(rhsOffsets)); - Optional result = + std::optional result = createContractArithOp(loc, newLhs, newRhs, contractOp.getAcc(), contractOp.getKind(), rewriter, isInt); rewriter.replaceOp(contractOp, {*result}); @@ -2010,8 +2011,8 @@ // Use iterator index 0. int64_t iterIndex = 0; SmallVector iMap = op.getIndexingMapsArray(); - Optional lookupLhs = getResultIndex(iMap[0], iterIndex); - Optional lookupRhs = getResultIndex(iMap[1], iterIndex); + std::optional lookupLhs = getResultIndex(iMap[0], iterIndex); + std::optional lookupRhs = getResultIndex(iMap[1], iterIndex); if (!lookupLhs.has_value()) return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) { diag << "expected iterIndex=" << iterIndex << "to map to a LHS dimension"; @@ -2075,7 +2076,7 @@ struct TransferReadToVectorLoadLowering : public OpRewritePattern { TransferReadToVectorLoadLowering(MLIRContext *context, - llvm::Optional maxRank, + std::optional maxRank, PatternBenefit benefit = 1) : OpRewritePattern(context, benefit), maxTransferRank(maxRank) {} @@ -2151,7 +2152,7 @@ return success(); } - llvm::Optional maxTransferRank; + std::optional maxTransferRank; }; /// Replace a 0-d vector.load with a memref.load + vector.broadcast. @@ -2217,7 +2218,7 @@ struct TransferWriteToVectorStoreLowering : public OpRewritePattern { TransferWriteToVectorStoreLowering(MLIRContext *context, - llvm::Optional maxRank, + std::optional maxRank, PatternBenefit benefit = 1) : OpRewritePattern(context, benefit), maxTransferRank(maxRank) {} @@ -2280,7 +2281,7 @@ return success(); } - llvm::Optional maxTransferRank; + std::optional maxTransferRank; }; // Returns the values in `arrayAttr` as an integer vector. @@ -3026,7 +3027,7 @@ } void mlir::vector::populateVectorTransferLoweringPatterns( - RewritePatternSet &patterns, llvm::Optional maxTransferRank, + RewritePatternSet &patterns, std::optional maxTransferRank, PatternBenefit benefit) { patterns.add(patterns.getContext(), diff --git a/mlir/lib/IR/BuiltinDialect.cpp b/mlir/lib/IR/BuiltinDialect.cpp --- a/mlir/lib/IR/BuiltinDialect.cpp +++ b/mlir/lib/IR/BuiltinDialect.cpp @@ -126,7 +126,7 @@ //===----------------------------------------------------------------------===// void ModuleOp::build(OpBuilder &builder, OperationState &state, - Optional name) { + std::optional name) { state.addRegion()->emplaceBlock(); if (name) { state.attributes.push_back(builder.getNamedAttr( @@ -135,7 +135,7 @@ } /// Construct a module from the given context. -ModuleOp ModuleOp::create(Location loc, Optional name) { +ModuleOp ModuleOp::create(Location loc, std::optional name) { OpBuilder builder(loc->getContext()); return builder.create(loc, name); } diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -246,7 +246,7 @@ return VectorType(); } -VectorType VectorType::cloneWith(Optional> shape, +VectorType VectorType::cloneWith(std::optional> shape, Type elementType) const { return VectorType::get(shape.value_or(getShape()), elementType, getNumScalableDims()); @@ -268,7 +268,7 @@ return cast().getShape(); } -TensorType TensorType::cloneWith(Optional> shape, +TensorType TensorType::cloneWith(std::optional> shape, Type elementType) const { if (auto unrankedTy = dyn_cast()) { if (shape) @@ -346,7 +346,7 @@ return cast().getShape(); } -BaseMemRefType BaseMemRefType::cloneWith(Optional> shape, +BaseMemRefType BaseMemRefType::cloneWith(std::optional> shape, Type elementType) const { if (auto unrankedTy = dyn_cast()) { if (!shape) @@ -387,7 +387,7 @@ /// which dimensions must be kept when e.g. compute MemRef strides under /// rank-reducing operations. Return std::nullopt if reducedShape cannot be /// obtained by dropping only `1` entries in `originalShape`. -llvm::Optional> +std::optional> mlir::computeRankReductionMask(ArrayRef originalShape, ArrayRef reducedShape) { size_t originalRank = originalShape.size(), reducedRank = reducedShape.size(); diff --git a/mlir/lib/IR/Dialect.cpp b/mlir/lib/IR/Dialect.cpp --- a/mlir/lib/IR/Dialect.cpp +++ b/mlir/lib/IR/Dialect.cpp @@ -75,7 +75,7 @@ return Type(); } -Optional +std::optional Dialect::getParseOperationHook(StringRef opName) const { return std::nullopt; } diff --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp --- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp +++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp @@ -36,7 +36,7 @@ /// Returns the `BlockArgument` corresponding to operand `operandIndex` in some /// successor if 'operandIndex' is within the range of 'operands', or /// std::nullopt if `operandIndex` isn't a successor operand index. -Optional +std::optional detail::getBranchSuccessorArgument(const SuccessorOperands &operands, unsigned operandIndex, Block *successor) { OperandRange forwardedOperands = operands.getForwardedOperands(); @@ -90,17 +90,17 @@ /// inputs that flow from `sourceIndex' to the given region, or std::nullopt if /// the exact type match verification is not necessary (e.g., if the Op verifies /// the match itself). -static LogicalResult -verifyTypesAlongAllEdges(Operation *op, Optional sourceNo, - function_ref(Optional)> - getInputsTypesForRegion) { +static LogicalResult verifyTypesAlongAllEdges( + Operation *op, std::optional sourceNo, + function_ref(std::optional)> + getInputsTypesForRegion) { auto regionInterface = cast(op); SmallVector successors; regionInterface.getSuccessorRegions(sourceNo, successors); for (RegionSuccessor &succ : successors) { - Optional succRegionNo; + std::optional succRegionNo; if (!succ.isParent()) succRegionNo = succ.getSuccessor()->getRegionNumber(); @@ -119,7 +119,8 @@ return diag; }; - Optional sourceTypes = getInputsTypesForRegion(succRegionNo); + std::optional sourceTypes = + getInputsTypesForRegion(succRegionNo); if (!sourceTypes.has_value()) continue; @@ -151,7 +152,8 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) { auto regionInterface = cast(op); - auto inputTypesFromParent = [&](Optional regionNo) -> TypeRange { + auto inputTypesFromParent = + [&](std::optional regionNo) -> TypeRange { return regionInterface.getSuccessorEntryOperands(regionNo).getTypes(); }; @@ -179,7 +181,7 @@ // implementing the `RegionBranchTerminatorOpInterface`, all should have the // same operand types when passing them to the same region. - Optional regionReturnOperands; + std::optional regionReturnOperands; for (Block &block : region) { Operation *terminator = block.getTerminator(); auto terminatorOperands = @@ -202,7 +204,7 @@ } auto inputTypesFromRegion = - [&](Optional regionNo) -> Optional { + [&](std::optional regionNo) -> std::optional { // If there is no return-like terminator, the op itself should verify // type consistency. if (!regionReturnOperands) @@ -307,7 +309,7 @@ } void RegionBranchOpInterface::getSuccessorRegions( - Optional index, SmallVectorImpl ®ions) { + std::optional index, SmallVectorImpl ®ions) { unsigned numInputs = 0; if (index) { // If the predecessor is a region, get the number of operands from an @@ -367,9 +369,9 @@ /// `OperandRange` represents all operands that are passed to the specified /// successor region. If `regionIndex` is `std::nullopt`, all operands that are /// passed to the parent operation will be returned. -Optional -mlir::getMutableRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex) { +std::optional +mlir::getMutableRegionBranchSuccessorOperands( + Operation *operation, std::optional regionIndex) { // Try to query a RegionBranchTerminatorOpInterface to determine // all successor operands that will be passed to the successor // input arguments. @@ -388,9 +390,9 @@ /// Returns the read only operands that are passed to the region with the given /// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more /// information. -Optional +std::optional mlir::getRegionBranchSuccessorOperands(Operation *operation, - Optional regionIndex) { + std::optional regionIndex) { auto range = getMutableRegionBranchSuccessorOperands(operation, regionIndex); - return range ? Optional(*range) : std::nullopt; + return range ? std::optional(*range) : std::nullopt; } diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp --- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp +++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp @@ -174,12 +174,13 @@ } LogicalResult mlir::detail::inferReturnTensorTypes( - function_ref location, ValueShapeRange operands, - DictionaryAttr attributes, RegionRange regions, - SmallVectorImpl &retComponents)> + function_ref< + LogicalResult(MLIRContext *, std::optional location, + ValueShapeRange operands, DictionaryAttr attributes, + RegionRange regions, + SmallVectorImpl &retComponents)> componentTypeFn, - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { SmallVector retComponents; diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp --- a/mlir/lib/Rewrite/ByteCode.cpp +++ b/mlir/lib/Rewrite/ByteCode.cpp @@ -46,7 +46,7 @@ llvm::to_vector<8>(generatedOpsAttr.getAsValueRange()); // Check to see if this is pattern matches a specific operation type. - if (Optional rootKind = matchOp.getRootKind()) + if (std::optional rootKind = matchOp.getRootKind()) return PDLByteCodePattern(rewriterAddr, configSet, *rootKind, benefit, ctx, generatedOps); return PDLByteCodePattern(rewriterAddr, configSet, MatchAnyOpTypeTag(), @@ -940,7 +940,7 @@ } void Generator::generate(pdl_interp::GetOperandsOp op, ByteCodeWriter &writer) { Value result = op.getValue(); - Optional index = op.getIndex(); + std::optional index = op.getIndex(); writer.append(OpCode::GetOperands, index.value_or(std::numeric_limits::max()), op.getInputOp()); @@ -960,7 +960,7 @@ } void Generator::generate(pdl_interp::GetResultsOp op, ByteCodeWriter &writer) { Value result = op.getValue(); - Optional index = op.getIndex(); + std::optional index = op.getIndex(); writer.append(OpCode::GetResults, index.value_or(std::numeric_limits::max()), op.getInputOp()); diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp --- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp +++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp @@ -42,7 +42,7 @@ } DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) { - Optional emissionKind = + std::optional emissionKind = symbolizeDIEmissionKind(node->getEmissionKind()); return DICompileUnitAttr::get(context, node->getSourceLanguage(), translate(node->getFile()), @@ -51,7 +51,7 @@ } DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) { - Optional flags = symbolizeDIFlags(node->getFlags()); + std::optional flags = symbolizeDIFlags(node->getFlags()); SmallVector elements; for (llvm::DINode *element : node->getElements()) { assert(element && "expected a non-null element type"); @@ -102,7 +102,7 @@ } DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) { - Optional subprogramFlags = + std::optional subprogramFlags = symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags()); return DISubprogramAttr::get( context, translate(node->getUnit()), translate(node->getScope()), diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -27,7 +27,7 @@ namespace { static llvm::omp::ScheduleKind -convertToScheduleKind(Optional schedKind) { +convertToScheduleKind(std::optional schedKind) { if (!schedKind.has_value()) return llvm::omp::OMP_SCHEDULE_Default; switch (schedKind.value()) { @@ -398,7 +398,7 @@ static void collectReductionDecls(omp::WsLoopOp loop, SmallVectorImpl &reductions) { - Optional attr = loop.getReductions(); + std::optional attr = loop.getReductions(); if (!attr) return; @@ -855,7 +855,8 @@ // TODO: Handle doacross loops when the ordered clause has a parameter. bool isOrdered = loop.getOrderedVal().has_value(); - Optional scheduleModifier = loop.getScheduleModifier(); + std::optional scheduleModifier = + loop.getScheduleModifier(); bool isSimd = loop.getSimdModifier(); ompBuilder->applyWorkshareLoop( @@ -989,11 +990,11 @@ ompBuilder->collapseLoops(ompLoc.DL, loopInfos, {}); llvm::ConstantInt *simdlen = nullptr; - if (llvm::Optional simdlenVar = loop.getSimdlen()) + if (std::optional simdlenVar = loop.getSimdlen()) simdlen = builder.getInt64(simdlenVar.value()); llvm::ConstantInt *safelen = nullptr; - if (llvm::Optional safelenVar = loop.getSafelen()) + if (std::optional safelenVar = loop.getSafelen()) safelen = builder.getInt64(safelenVar.value()); llvm::MapVector alignedVars; @@ -1009,7 +1010,7 @@ /// Convert an Atomic Ordering attribute to llvm::AtomicOrdering. llvm::AtomicOrdering -convertAtomicOrdering(Optional ao) { +convertAtomicOrdering(std::optional ao) { if (!ao) return llvm::AtomicOrdering::Monotonic; // Default Memory Ordering diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -687,7 +687,7 @@ addRuntimePreemptionSpecifier(op.getDsoLocal(), var); - Optional alignment = op.getAlignment(); + std::optional alignment = op.getAlignment(); if (alignment.has_value()) var->setAlignment(llvm::MaybeAlign(alignment.value())); @@ -783,7 +783,7 @@ /// attribute and the second string beings its value. Note that even integer /// attributes are expected to have their values expressed as strings. static LogicalResult -forwardPassthroughAttributes(Location loc, Optional attributes, +forwardPassthroughAttributes(Location loc, std::optional attributes, llvm::Function *llvmFunc) { if (!attributes) return success(); @@ -1111,7 +1111,7 @@ llvm::LLVMContext &ctx = llvmModule->getContext(); llvm::SmallVector operands; operands.push_back({}); // Placeholder for self-reference - if (Optional description = op.getDescription()) + if (std::optional description = op.getDescription()) operands.push_back(llvm::MDString::get(ctx, *description)); llvm::MDNode *domain = llvm::MDNode::get(ctx, operands); domain->replaceOperandWith(0, domain); // Self-reference for uniqueness @@ -1130,7 +1130,7 @@ llvm::SmallVector operands; operands.push_back({}); // Placeholder for self-reference operands.push_back(domain); - if (Optional description = op.getDescription()) + if (std::optional description = op.getDescription()) operands.push_back(llvm::MDString::get(ctx, *description)); llvm::MDNode *scope = llvm::MDNode::get(ctx, operands); scope->replaceOperandWith(0, scope); // Self-reference for uniqueness diff --git a/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp b/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp --- a/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp +++ b/mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp @@ -79,7 +79,7 @@ int patternIndex = 0; for (pdl::PatternOp pattern : module.getOps()) { // If the pattern has a name, use that. Otherwise, generate a unique name. - if (Optional patternName = pattern.getSymName()) { + if (std::optional patternName = pattern.getSymName()) { patternNames.insert(patternName->str()); } else { std::string name; diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -84,8 +84,8 @@ // Check the contents of the string attribute to see what the test alias // should be named. - Optional aliasName = - StringSwitch>(strAttr.getValue()) + std::optional aliasName = + StringSwitch>(strAttr.getValue()) .Case("alias_test:dot_in_name", StringRef("test.alias")) .Case("alias_test:trailing_digit", StringRef("test_alias0")) .Case("alias_test:prefixed_digit", StringRef("0_test_alias")) @@ -383,7 +383,7 @@ } ::mlir::LogicalResult FormatInferType2Op::inferReturnTypes( - ::mlir::MLIRContext *context, ::llvm::Optional<::mlir::Location> location, + ::mlir::MLIRContext *context, ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { @@ -424,7 +424,7 @@ return success(); } -Optional +std::optional TestDialect::getParseOperationHook(StringRef opName) const { if (opName == "test.dialect_custom_printer") { return ParseOpHook{[](OpAsmParser &parser, OperationState &state) { @@ -569,7 +569,8 @@ // Parsing static ParseResult parseCustomOptionalOperand( - OpAsmParser &parser, Optional &optOperand) { + OpAsmParser &parser, + std::optional &optOperand) { if (succeeded(parser.parseOptionalLParen())) { optOperand.emplace(); if (parser.parseOperand(*optOperand) || parser.parseRParen()) @@ -580,7 +581,7 @@ static ParseResult parseCustomDirectiveOperands( OpAsmParser &parser, OpAsmParser::UnresolvedOperand &operand, - Optional &optOperand, + std::optional &optOperand, SmallVectorImpl &varOperands) { if (parser.parseOperand(operand)) return failure(); @@ -633,7 +634,7 @@ } static ParseResult parseCustomDirectiveOperandsAndTypes( OpAsmParser &parser, OpAsmParser::UnresolvedOperand &operand, - Optional &optOperand, + std::optional &optOperand, SmallVectorImpl &varOperands, Type &operandType, Type &optOperandType, SmallVectorImpl &varOperandTypes) { @@ -689,7 +690,8 @@ return parser.parseOptionalAttrDict(attrs); } static ParseResult parseCustomDirectiveOptionalOperandRef( - OpAsmParser &parser, Optional &optOperand) { + OpAsmParser &parser, + std::optional &optOperand) { int64_t operandCount = 0; if (parser.parseInteger(operandCount)) return failure(); @@ -1125,7 +1127,7 @@ } LogicalResult OpWithInferTypeInterfaceOp::inferReturnTypes( - MLIRContext *, Optional location, ValueRange operands, + MLIRContext *, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnTypes) { if (operands[0].getType() != operands[1].getType()) { @@ -1140,7 +1142,7 @@ // TODO: We should be able to only define either inferReturnType or // refineReturnType, currently only refineReturnType can be omitted. LogicalResult OpWithRefineTypeInterfaceOp::inferReturnTypes( - MLIRContext *context, Optional location, ValueRange operands, + MLIRContext *context, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &returnTypes) { returnTypes.clear(); @@ -1149,7 +1151,7 @@ } LogicalResult OpWithRefineTypeInterfaceOp::refineReturnTypes( - MLIRContext *, Optional location, ValueRange operands, + MLIRContext *, std::optional location, ValueRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &returnTypes) { if (operands[0].getType() != operands[1].getType()) { @@ -1168,8 +1170,8 @@ } LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents( - MLIRContext *context, Optional location, ValueShapeRange operands, - DictionaryAttr attributes, RegionRange regions, + MLIRContext *context, std::optional location, + ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions, SmallVectorImpl &inferredReturnShapes) { // Create return type consisting of the last element of the first operand. auto operandType = operands.front().getType(); @@ -1177,8 +1179,7 @@ if (!sval) { return emitOptionalError(location, "only shaped type operands allowed"); } - int64_t dim = - sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic; + int64_t dim = sval.hasRank() ? sval.getShape().front() : ShapedType::kDynamic; auto type = IntegerType::get(context, 17); Attribute encoding; @@ -1451,13 +1452,14 @@ parser.getCurrentLocation(), result.operands); } -OperandRange RegionIfOp::getSuccessorEntryOperands(Optional index) { +OperandRange +RegionIfOp::getSuccessorEntryOperands(std::optional index) { assert(index && *index < 2 && "invalid region index"); return getOperands(); } void RegionIfOp::getSuccessorRegions( - Optional index, ArrayRef operands, + std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // We always branch to the join region. if (index.has_value()) { @@ -1484,7 +1486,7 @@ // AnyCondOp //===----------------------------------------------------------------------===// -void AnyCondOp::getSuccessorRegions(Optional index, +void AnyCondOp::getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { // The parent op branches into the only region, and the region branches back diff --git a/mlir/test/lib/Dialect/Test/TestDialect.td b/mlir/test/lib/Dialect/Test/TestDialect.td --- a/mlir/test/lib/Dialect/Test/TestDialect.td +++ b/mlir/test/lib/Dialect/Test/TestDialect.td @@ -31,7 +31,7 @@ void registerTypes(); // Provides a custom printing/parsing for some operations. - ::llvm::Optional + ::std::optional getParseOperationHook(::llvm::StringRef opName) const override; ::llvm::unique_function diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -421,7 +421,7 @@ let extraClassDeclaration = [{ static mlir::LogicalResult inferReturnTypes(mlir::MLIRContext *context, - llvm::Optional<::mlir::Location> location, mlir::ValueRange operands, + std::optional<::mlir::Location> location, mlir::ValueRange operands, mlir::DictionaryAttr attributes, mlir::RegionRange regions, llvm::SmallVectorImpl &inferredReturnTypes) { inferredReturnTypes.assign({mlir::IntegerType::get(context, 16)}); @@ -2404,7 +2404,7 @@ let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { inferredReturnTypes.assign({::mlir::IntegerType::get(context, 16)}); @@ -2427,7 +2427,7 @@ let results = (outs Variadic:$outs); let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { ::mlir::TypeRange operandTypes = operands.getTypes(); @@ -2474,7 +2474,7 @@ let assemblyFormat = "$region attr-dict"; let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { if (regions.empty()) @@ -2495,7 +2495,7 @@ let assemblyFormat = "`(` $a `:` type($a) `)` `(` $b `:` type($b) `)` attr-dict"; let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *context, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { FormatInferTypeVariadicOperandsOpAdaptor adaptor(operands, attributes); @@ -2640,7 +2640,7 @@ return getBody(2)->getArguments(); } ::mlir::OperandRange getSuccessorEntryOperands( - ::llvm::Optional index); + ::std::optional index); }]; let hasCustomAssemblyFormat = 1; } @@ -2703,7 +2703,7 @@ let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes(::mlir::MLIRContext *, - ::llvm::Optional<::mlir::Location> location, ::mlir::ValueRange operands, + ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { inferredReturnTypes.assign({operands[0].getType()}); diff --git a/mlir/test/mlir-tblgen/enums-gen.td b/mlir/test/mlir-tblgen/enums-gen.td --- a/mlir/test/mlir-tblgen/enums-gen.td +++ b/mlir/test/mlir-tblgen/enums-gen.td @@ -27,9 +27,9 @@ // DECL: Bit3 = 8, // DECL: } -// DECL: ::llvm::Optional symbolizeMyBitEnum(uint32_t); +// DECL: ::std::optional symbolizeMyBitEnum(uint32_t); // DECL: std::string stringifyMyBitEnum(MyBitEnum); -// DECL: ::llvm::Optional symbolizeMyBitEnum(::llvm::StringRef); +// DECL: ::std::optional symbolizeMyBitEnum(::llvm::StringRef); // DECL: struct FieldParser<::MyBitEnum, ::MyBitEnum> { // DECL: template @@ -40,7 +40,7 @@ // DECL: if (failed(parser.parseOptionalKeywordOrString(&enumKeyword))) // DECL: return parser.emitError(loc, "expected keyword for An example bit enum"); // DECL: // Symbolize the keyword. -// DECL: if (::llvm::Optional<::MyBitEnum> attr = ::symbolizeEnum<::MyBitEnum>(enumKeyword)) +// DECL: if (::std::optional<::MyBitEnum> attr = ::symbolizeEnum<::MyBitEnum>(enumKeyword)) // DECL: return *attr; // DECL: return parser.emitError(loc, "invalid An example bit enum specification: ") << enumKeyword; // DECL: } @@ -66,7 +66,7 @@ // DEF: if (2u == (2u & val)) // DEF-NEXT: push_back("Bit1") -// DEF-LABEL: ::llvm::Optional symbolizeMyBitEnum(::llvm::StringRef str) +// DEF-LABEL: ::std::optional symbolizeMyBitEnum(::llvm::StringRef str) // DEF: if (str == "none") return MyBitEnum::None; // DEF: .Case("tagged", 1) // DEF: .Case("Bit1", 2) diff --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td --- a/mlir/test/mlir-tblgen/op-attribute.td +++ b/mlir/test/mlir-tblgen/op-attribute.td @@ -110,9 +110,9 @@ // DEF: some-attr-kind AOp::getCAttrAttr() // DEF-NEXT: ::mlir::impl::getAttrFromSortedRange((*this)->getAttrs().begin() + 1, (*this)->getAttrs().end() - 0, getCAttrAttrName()).dyn_cast_or_null() -// DEF: ::llvm::Optional AOp::getCAttr() { +// DEF: ::std::optional AOp::getCAttr() { // DEF-NEXT: auto attr = getCAttrAttr() -// DEF-NEXT: return attr ? ::llvm::Optional(attr.some-convert-from-storage()) : (::std::nullopt); +// DEF-NEXT: return attr ? ::std::optional(attr.some-convert-from-storage()) : (::std::nullopt); // DEF: some-attr-kind AOp::getDAttrAttr() // DEF-NEXT: ::mlir::impl::getAttrFromSortedRange((*this)->getAttrs().begin() + 1, (*this)->getAttrs().end() - 0, getDAttrAttrName()).dyn_cast_or_null() @@ -135,7 +135,7 @@ // DEF-NEXT: (*this)->setAttr(getBAttrAttrName(), some-const-builder-call(::mlir::Builder((*this)->getContext()), attrValue)); // DEF: void AOp::setCAttrAttr(some-attr-kind attr) { // DEF-NEXT: (*this)->setAttr(getCAttrAttrName(), attr); -// DEF: void AOp::setCAttr(::llvm::Optional attrValue) { +// DEF: void AOp::setCAttr(::std::optional attrValue) { // DEF-NEXT: if (attrValue) // DEF-NEXT: return (*this)->setAttr(getCAttrAttrName(), some-const-builder-call(::mlir::Builder((*this)->getContext()), *attrValue)); // DEF-NEXT: (*this)->removeAttr(getCAttrAttrName()); @@ -247,9 +247,9 @@ // DEF: some-attr-kind AgetOp::getCAttrAttr() // DEF-NEXT: return ::mlir::impl::getAttrFromSortedRange({{.*}}).dyn_cast_or_null() -// DEF: ::llvm::Optional AgetOp::getCAttr() { +// DEF: ::std::optional AgetOp::getCAttr() { // DEF-NEXT: auto attr = getCAttrAttr() -// DEF-NEXT: return attr ? ::llvm::Optional(attr.some-convert-from-storage()) : (::std::nullopt); +// DEF-NEXT: return attr ? ::std::optional(attr.some-convert-from-storage()) : (::std::nullopt); // Test setter methods // --- diff --git a/mlir/test/mlir-tblgen/op-decl-and-defs.td b/mlir/test/mlir-tblgen/op-decl-and-defs.td --- a/mlir/test/mlir-tblgen/op-decl-and-defs.td +++ b/mlir/test/mlir-tblgen/op-decl-and-defs.td @@ -61,7 +61,7 @@ // CHECK: ::mlir::IntegerAttr getAttr1Attr(); // CHECK: uint32_t getAttr1(); // CHECK: ::mlir::FloatAttr getSomeAttr2Attr(); -// CHECK: ::llvm::Optional< ::llvm::APFloat > getSomeAttr2(); +// CHECK: ::std::optional< ::llvm::APFloat > getSomeAttr2(); // CHECK: ::mlir::Region &getSomeRegion(); // CHECK: ::mlir::RegionRange getSomeRegions(); // CHECK: private: @@ -88,7 +88,7 @@ // CHECK: ::mlir::IntegerAttr getAttr1Attr() // CHECK: uint32_t getAttr1(); // CHECK: ::mlir::FloatAttr getSomeAttr2Attr() -// CHECK: ::llvm::Optional< ::llvm::APFloat > getSomeAttr2(); +// CHECK: ::std::optional< ::llvm::APFloat > getSomeAttr2(); // CHECK: ::mlir::Attribute removeSomeAttr2Attr(); // CHECK: static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value val); // CHECK: static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, int integer = 0); diff --git a/mlir/test/python/python_test_ops.td b/mlir/test/python/python_test_ops.td --- a/mlir/test/python/python_test_ops.td +++ b/mlir/test/python/python_test_ops.td @@ -71,7 +71,7 @@ let extraClassDeclaration = [{ static ::mlir::LogicalResult inferReturnTypes( - ::mlir::MLIRContext *context, ::llvm::Optional<::mlir::Location> location, + ::mlir::MLIRContext *context, ::std::optional<::mlir::Location> location, ::mlir::ValueRange operands, ::mlir::DictionaryAttr attributes, ::mlir::RegionRange regions, ::llvm::SmallVectorImpl<::mlir::Type> &inferredReturnTypes) { diff --git a/mlir/tools/mlir-tblgen/DialectGen.cpp b/mlir/tools/mlir-tblgen/DialectGen.cpp --- a/mlir/tools/mlir-tblgen/DialectGen.cpp +++ b/mlir/tools/mlir-tblgen/DialectGen.cpp @@ -254,7 +254,7 @@ /// initialize(). /// {2}: The dialect parent class. static const char *const dialectConstructorStr = R"( -{0}::{0}(::mlir::MLIRContext *context) +{0}::{0}(::mlir::MLIRContext *context) : ::mlir::{2}(getDialectNamespace(), context, ::mlir::TypeID::get<{0}>()) {{ {1} initialize(); diff --git a/mlir/tools/mlir-tblgen/EnumsGen.cpp b/mlir/tools/mlir-tblgen/EnumsGen.cpp --- a/mlir/tools/mlir-tblgen/EnumsGen.cpp +++ b/mlir/tools/mlir-tblgen/EnumsGen.cpp @@ -97,7 +97,7 @@ return parser.emitError(loc, "expected keyword for {2}"); // Symbolize the keyword. - if (::llvm::Optional<{0}> attr = {1}::symbolizeEnum<{0}>(enumKeyword)) + if (::std::optional<{0}> attr = {1}::symbolizeEnum<{0}>(enumKeyword)) return *attr; return parser.emitError(loc, "invalid {2} specification: ") << enumKeyword; } @@ -227,7 +227,7 @@ // Returns the EnumAttrCase whose value is zero if exists; returns std::nullopt // otherwise. -static llvm::Optional +static std::optional getAllBitsUnsetCase(llvm::ArrayRef cases) { for (auto attrCase : cases) { if (attrCase.getValue() == 0) @@ -381,9 +381,9 @@ StringRef strToSymFnName = enumAttr.getStringToSymbolFnName(); auto enumerants = enumAttr.getAllCases(); - os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef str) {{\n", + os << formatv("::std::optional<{0}> {1}(::llvm::StringRef str) {{\n", enumName, strToSymFnName); - os << formatv(" return ::llvm::StringSwitch<::llvm::Optional<{0}>>(str)\n", + os << formatv(" return ::llvm::StringSwitch<::std::optional<{0}>>(str)\n", enumName); for (const auto &enumerant : enumerants) { auto symbol = enumerant.getSymbol(); @@ -405,7 +405,7 @@ auto enumerants = enumAttr.getAllCases(); auto allBitsUnsetCase = getAllBitsUnsetCase(enumerants); - os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef str) {{\n", + os << formatv("::std::optional<{0}> {1}(::llvm::StringRef str) {{\n", enumName, strToSymFnName); if (allBitsUnsetCase) { @@ -425,7 +425,7 @@ // Convert each symbol to the bit ordinal and set the corresponding bit. os << formatv(" auto bit = " - "llvm::StringSwitch<::llvm::Optional<{0}>>(symbol.trim())\n", + "llvm::StringSwitch<::std::optional<{0}>>(symbol.trim())\n", underlyingType); for (const auto &enumerant : enumerants) { // Skip the special enumerant for None. @@ -456,7 +456,7 @@ })) return; - os << formatv("::llvm::Optional<{0}> {1}({2} value) {{\n", enumName, + os << formatv("::std::optional<{0}> {1}({2} value) {{\n", enumName, underlyingToSymFnName, underlyingType.empty() ? std::string("unsigned") : underlyingType) @@ -539,7 +539,7 @@ auto enumerants = enumAttr.getAllCases(); auto allBitsUnsetCase = getAllBitsUnsetCase(enumerants); - os << formatv("::llvm::Optional<{0}> {1}({2} value) {{\n", enumName, + os << formatv("::std::optional<{0}> {1}({2} value) {{\n", enumName, underlyingToSymFnName, underlyingType); if (allBitsUnsetCase) { os << " // Special case for all bits unset.\n"; @@ -579,11 +579,11 @@ return enumerant.getValue() >= 0; })) { os << formatv( - "::llvm::Optional<{0}> {1}({2});\n", enumName, underlyingToSymFnName, + "::std::optional<{0}> {1}({2});\n", enumName, underlyingToSymFnName, underlyingType.empty() ? std::string("unsigned") : underlyingType); } os << formatv("{2} {1}({0});\n", enumName, symToStrFnName, symToStrFnRetType); - os << formatv("::llvm::Optional<{0}> {1}(::llvm::StringRef);\n", enumName, + os << formatv("::std::optional<{0}> {1}(::llvm::StringRef);\n", enumName, strToSymFnName); if (enumAttr.isBitEnum()) { @@ -605,10 +605,10 @@ // specified by the user. const char *const symbolizeEnumStr = R"( template -::llvm::Optional symbolizeEnum(::llvm::StringRef); +::std::optional symbolizeEnum(::llvm::StringRef); template <> -inline ::llvm::Optional<{0}> symbolizeEnum<{0}>(::llvm::StringRef str) { +inline ::std::optional<{0}> symbolizeEnum<{0}>(::llvm::StringRef str) { return {1}(str); } )"; diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -1129,7 +1129,7 @@ method = createMethod("bool"); else if (isOptional) method = - createMethod("::llvm::Optional<" + baseAttr.getReturnType() + ">"); + createMethod("::std::optional<" + baseAttr.getReturnType() + ">"); else method = createMethod(attr.getReturnType()); if (!method) @@ -1148,7 +1148,7 @@ // TODO: Handle unit attr parameters specially, given that it is treated as // optional but not in the same way as the others (i.e. it uses bool over - // llvm::Optional<>). + // std::optional<>). StringRef paramStr = isUnitAttr ? "attrValue" : "*attrValue"; const char *optionalCodeBody = R"( if (attrValue) @@ -2949,7 +2949,7 @@ adaptor.addField("::mlir::ValueRange", "odsOperands"); adaptor.addField("::mlir::DictionaryAttr", "odsAttrs"); adaptor.addField("::mlir::RegionRange", "odsRegions"); - adaptor.addField("::llvm::Optional<::mlir::OperationName>", "odsOpName"); + adaptor.addField("::std::optional<::mlir::OperationName>", "odsOpName"); const auto *attrSizedOperands = op.getTrait("::m::OpTrait::AttrSizedOperandSegments"); diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -944,7 +944,7 @@ << "OperandsLoc = parser.getCurrentLocation();\n"; if (var->isOptional()) { body << llvm::formatv( - " ::llvm::Optional<::mlir::OpAsmParser::UnresolvedOperand> " + " ::std::optional<::mlir::OpAsmParser::UnresolvedOperand> " "{0}Operand;\n", var->name); } else if (var->isVariadicOfVariadic()) { @@ -973,7 +973,7 @@ body << llvm::formatv( " {0} {1}Operand = {1}Operands.empty() ? {0}() : " "{1}Operands[0];\n", - "::llvm::Optional<::mlir::OpAsmParser::UnresolvedOperand>", + "::std::optional<::mlir::OpAsmParser::UnresolvedOperand>", operand->getVar()->name); } else if (auto *type = dyn_cast(input)) { diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp --- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -346,7 +346,7 @@ for (const auto &classCasePair : classCaseMap) { Availability avail = classCasePair.getValue().front().second; - os << formatv("llvm::Optional<{0}> {1}({2} value) {{\n", + os << formatv("std::optional<{0}> {1}({2} value) {{\n", avail.getMergeInstanceType(), avail.getQueryFnName(), enumName); @@ -388,7 +388,7 @@ for (const auto &classCasePair : classCaseMap) { Availability avail = classCasePair.getValue().front().second; - os << formatv("llvm::Optional<{0}> {1}({2} value) {{\n", + os << formatv("std::optional<{0}> {1}({2} value) {{\n", avail.getMergeInstanceType(), avail.getQueryFnName(), enumName); @@ -433,7 +433,7 @@ StringRef className = avail.getClass(); if (handledClasses.count(className)) continue; - os << formatv("llvm::Optional<{0}> {1}({2} value);\n", + os << formatv("std::optional<{0}> {1}({2} value);\n", avail.getMergeInstanceType(), avail.getQueryFnName(), enumName); handledClasses.insert(className); diff --git a/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp b/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp --- a/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp +++ b/mlir/unittests/Interfaces/ControlFlowInterfacesTest.cpp @@ -37,7 +37,7 @@ } // Regions have no successors. - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) {} }; @@ -52,7 +52,7 @@ static StringRef getOperationName() { return "cftest.loop_regions_op"; } - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index) { @@ -76,7 +76,7 @@ return "cftest.double_loop_regions_op"; } - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index.has_value()) { @@ -95,7 +95,7 @@ static StringRef getOperationName() { return "cftest.sequential_regions_op"; } // Region 0 has Region 1 as a successor. - void getSuccessorRegions(Optional index, + void getSuccessorRegions(std::optional index, ArrayRef operands, SmallVectorImpl ®ions) { if (index == 0u) { diff --git a/mlir/unittests/TableGen/EnumsGenTest.cpp b/mlir/unittests/TableGen/EnumsGenTest.cpp --- a/mlir/unittests/TableGen/EnumsGenTest.cpp +++ b/mlir/unittests/TableGen/EnumsGenTest.cpp @@ -54,8 +54,8 @@ } TEST(EnumsGenTest, GeneratedStringToSymbolFn) { - EXPECT_EQ(llvm::Optional(FooEnum::CaseA), ConvertToEnum("CaseA")); - EXPECT_EQ(llvm::Optional(FooEnum::CaseB), ConvertToEnum("CaseB")); + EXPECT_EQ(std::optional(FooEnum::CaseA), ConvertToEnum("CaseA")); + EXPECT_EQ(std::optional(FooEnum::CaseB), ConvertToEnum("CaseB")); EXPECT_EQ(std::nullopt, ConvertToEnum("X")); }