diff --git a/mlir/docs/DefiningDialects/Operations.md b/mlir/docs/DefiningDialects/Operations.md --- a/mlir/docs/DefiningDialects/Operations.md +++ b/mlir/docs/DefiningDialects/Operations.md @@ -910,9 +910,9 @@ ```tablegen def FooOp : ... { - let arguments = (ins UnitAttr:$is_read_only); + let arguments = (ins UnitAttr:$read_only); - let assemblyFormat = "attr-dict (`is_read_only` $is_read_only^)?"; + let assemblyFormat = "attr-dict (`read_only` $read_only^)?"; } ``` @@ -920,7 +920,7 @@ ```mlir // When the unit attribute is present: -foo.op is_read_only +foo.op read_only // When the unit attribute is not present: foo.op diff --git a/mlir/include/mlir/Dialect/AMX/AMX.td b/mlir/include/mlir/Dialect/AMX/AMX.td --- a/mlir/include/mlir/Dialect/AMX/AMX.td +++ b/mlir/include/mlir/Dialect/AMX/AMX.td @@ -231,8 +231,8 @@ let arguments = (ins VectorOfRankAndType<[2], [I32, I8]>:$lhs, VectorOfRankAndType<[2], [I32, I8]>:$rhs, VectorOfRankAndType<[2], [I32, I8]>:$acc, - UnitAttr:$isZextLhs, - UnitAttr:$isZextRhs + UnitAttr:$zextLhs, + UnitAttr:$zextRhs ); let results = (outs VectorOfRankAndType<[2], [I32, I8]>:$res); let extraClassDeclaration = [{ @@ -246,7 +246,7 @@ return ::llvm::cast(getRes().getType()); } }]; - let assemblyFormat = "$lhs (`zext` $isZextLhs^)? `,` $rhs (`zext` $isZextRhs^)? `,` $acc attr-dict `:` " + let assemblyFormat = "$lhs (`zext` $zextLhs^)? `,` $rhs (`zext` $zextRhs^)? `,` $acc attr-dict `:` " "type($lhs) `,` type($rhs) `,` type($acc) "; let hasVerifier = 1; } diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td --- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td +++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td @@ -159,7 +159,7 @@ emitc.include <"myheader.h"> // Generic form of the same operation. - "emitc.include" (){include = "myheader.h", is_standard_include} : () -> () + "emitc.include" (){include = "myheader.h", standard_include} : () -> () // Custom form defining the inclusion of `"myheader"`. emitc.include "myheader.h" @@ -170,7 +170,7 @@ }]; let arguments = (ins Arg:$include, - UnitAttr:$is_standard_include + UnitAttr:$standard_include ); let hasCustomAssemblyFormat = 1; } diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td @@ -1905,7 +1905,7 @@ StrAttr:$asm_string, StrAttr:$constraints, UnitAttr:$has_side_effects, - UnitAttr:$is_align_stack, + UnitAttr:$align_stack, OptionalAttr< DefaultValuedAttr>:$asm_dialect, OptionalAttr:$operand_attrs); @@ -1914,7 +1914,7 @@ let assemblyFormat = [{ (`has_side_effects` $has_side_effects^)? - (`is_align_stack` $is_align_stack^)? + (`align_stack` $align_stack^)? (`asm_dialect` `=` $asm_dialect^)? (`operand_attrs` `=` $operand_attrs^)? attr-dict diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td @@ -181,15 +181,15 @@ let arguments = (ins TransformHandleTypeInterface:$operand_handle, DenseI64ArrayAttr:$raw_dim_list, - UnitAttr:$is_inverted, - UnitAttr:$is_all, + UnitAttr:$inverted, + UnitAttr:$all, UnitAttr:$parallel, UnitAttr:$reduction); let results = (outs Optional:$result); let assemblyFormat = "$operand_handle `[`" - "custom($raw_dim_list, $is_inverted, $is_all)" + "custom($raw_dim_list, $inverted, $all)" "`]` attr-dict `:` " "custom(type($operand_handle), type($result))"; @@ -237,8 +237,8 @@ // out-of-bounds accesses. let arguments = (ins TransformHandleTypeInterface:$operand_handle, DenseI64ArrayAttr:$raw_position_list, - UnitAttr:$is_inverted, - UnitAttr:$is_all, + UnitAttr:$inverted, + UnitAttr:$all, UnitAttr:$permutation, UnitAttr:$projected_permutation); @@ -247,7 +247,7 @@ let results = (outs Optional:$result); let assemblyFormat = "$operand_handle `[`" - "custom($raw_position_list, $is_inverted, $is_all)" + "custom($raw_position_list, $inverted, $all)" "`]` attr-dict " "`:` custom(type($operand_handle), type($result))"; diff --git a/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td b/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td --- a/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td +++ b/mlir/include/mlir/Dialect/MLProgram/IR/MLProgramOps.td @@ -152,14 +152,14 @@ let arguments = (ins SymbolNameAttr:$sym_name, TypeAttr:$type, - UnitAttr:$is_mutable, + UnitAttr:$global_mutable, OptionalAttr:$value, OptionalAttr:$sym_visibility ); let assemblyFormat = [{ custom($sym_visibility) - (`mutable` $is_mutable^)? + (`mutable` $global_mutable^)? $sym_name `` custom($type, $value) attr-dict diff --git a/mlir/include/mlir/TableGen/Operator.h b/mlir/include/mlir/TableGen/Operator.h --- a/mlir/include/mlir/TableGen/Operator.h +++ b/mlir/include/mlir/TableGen/Operator.h @@ -344,6 +344,13 @@ /// Returns the getter name for the accessor of `name`. std::string getGetterName(StringRef name) const; + /// Returns the getter name for the accessor of `name`. + /// Searches for the `name` in attributes to check for the UnitAttr type. + std::string getGetterNameUnitAttr(StringRef name) const; + + /// Returns the getter name for the accessor of `name`. + std::string getGetterNameAttr(const NamedAttribute &attr) const; + /// Returns the setter name for the accessor of `name`. std::string getSetterName(StringRef name) const; diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp --- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp +++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp @@ -349,7 +349,7 @@ destElem = destType.getElementType(); if (sourceElem.isF32() && destElem.isF32()) { - if (mfma.getReducePrecision() && chipset.minorVersion >= 0x40) { + if (mfma.isReducePrecision() && chipset.minorVersion >= 0x40) { if (m == 32 && n == 32 && k == 4 && b == 1) return ROCDL::mfma_f32_32x32x4_xf32::getOperationName(); if (m == 16 && n == 16 && k == 8 && b == 1) @@ -487,11 +487,11 @@ if (chipset.majorVersion != 9 || chipset.minorVersion < 0x08) return op->emitOpError("MFMA only supported on gfx908+"); uint32_t getBlgpField = static_cast(op.getBlgp()); - if (op.getNegateA() || op.getNegateB() || op.getNegateC()) { + if (op.isNegateA() || op.isNegateB() || op.isNegateC()) { if (chipset.minorVersion < 0x40) return op.emitOpError("negation unsupported on older than gfx840"); getBlgpField |= - op.getNegateA() | (op.getNegateB() << 1) | (op.getNegateC() << 2); + op.isNegateA() | (op.isNegateB() << 1) | (op.isNegateC() << 2); } std::optional maybeIntrinsic = mfmaOpToIntrinsic(op, chipset); if (!maybeIntrinsic.has_value()) diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp --- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -649,7 +649,7 @@ LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite( gpu::AllocOp allocOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const { - if (adaptor.getHostShared()) + if (adaptor.isHostShared()) return rewriter.notifyMatchFailure( allocOp, "host_shared allocation is not supported"); diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -89,7 +89,7 @@ matchAndRewrite(gpu::SubgroupReduceOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - if (!op.getUniform()) + if (!op.isUniform()) return rewriter.notifyMatchFailure( op, "cannot be lowered to redux as the op must be run " "uniformly (entire subgroup)."); diff --git a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp --- a/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/WmmaOpsToNvvm.cpp @@ -87,7 +87,7 @@ // Get the shape of the MMAMatrix type being returned. The shape will // choose which intrinsic this op will be lowered to. - NVVM::MMALayout layout = subgroupMmaLoadMatrixOp.getTranspose() + NVVM::MMALayout layout = subgroupMmaLoadMatrixOp.isTranspose() ? NVVM::MMALayout::col : NVVM::MMALayout::row; gpu::MMAMatrixType retType = @@ -159,7 +159,7 @@ gpu::MMAMatrixType srcType = cast(subgroupMmaStoreMatrixOp.getSrc().getType()); ArrayRef srcTypeShape = srcType.getShape(); - NVVM::MMALayout layout = subgroupMmaStoreMatrixOp.getTranspose() + NVVM::MMALayout layout = subgroupMmaStoreMatrixOp.isTranspose() ? NVVM::MMALayout::col : NVVM::MMALayout::row; NVVM::MMATypes eltype = getElementType(srcType); @@ -231,10 +231,10 @@ int64_t m = cTypeShape[0]; int64_t n = cTypeShape[1]; int64_t k = aTypeShape[1]; - NVVM::MMALayout aLayout = subgroupMmaComputeOp.getATranspose() + NVVM::MMALayout aLayout = subgroupMmaComputeOp.isATranspose() ? NVVM::MMALayout::col : NVVM::MMALayout::row; - NVVM::MMALayout bLayout = subgroupMmaComputeOp.getBTranspose() + NVVM::MMALayout bLayout = subgroupMmaComputeOp.isBTranspose() ? NVVM::MMALayout::col : NVVM::MMALayout::row; NVVM::MMATypes sourceType = getElementType(aType); diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp --- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp +++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp @@ -547,7 +547,7 @@ auto result = createGroupReduceOp(rewriter, op.getLoc(), adaptor.getValue(), *opType, - /*isGroup*/ true, op.getUniform()); + /*isGroup*/ true, op.isUniform()); if (!result) return failure(); @@ -568,7 +568,7 @@ auto opType = op.getOp(); auto result = createGroupReduceOp(rewriter, op.getLoc(), adaptor.getValue(), opType, - /*isGroup*/ false, op.getUniform()); + /*isGroup*/ false, op.isUniform()); if (!result) return failure(); diff --git a/mlir/lib/Conversion/GPUToSPIRV/WmmaOpsToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/WmmaOpsToSPIRV.cpp --- a/mlir/lib/Conversion/GPUToSPIRV/WmmaOpsToSPIRV.cpp +++ b/mlir/lib/Conversion/GPUToSPIRV/WmmaOpsToSPIRV.cpp @@ -92,7 +92,7 @@ auto i32Type = rewriter.getI32Type(); auto strideValue = rewriter.create( loc, i32Type, IntegerAttr::get(i32Type, stride)); - bool isColMajor = static_cast(subgroupMmaLoadMatrixOp.getTranspose()); + bool isColMajor = static_cast(subgroupMmaLoadMatrixOp.isTranspose()); auto columnMajor = rewriter.create( loc, rewriter.getI1Type(), rewriter.getBoolAttr(isColMajor)); rewriter.replaceOpWithNewOp( @@ -123,7 +123,7 @@ auto strideValue = rewriter.create( loc, i32Type, IntegerAttr::get(i32Type, stride)); bool useColMajor = - static_cast(subgroupMmaStoreMatrixOp.getTranspose()); + static_cast(subgroupMmaStoreMatrixOp.isTranspose()); auto columnMajor = rewriter.create( loc, rewriter.getI1Type(), rewriter.getBoolAttr(useColMajor)); rewriter.replaceOpWithNewOp( diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -685,7 +685,7 @@ return global.emitOpError( "memory space cannot be converted to an integer address space"); auto newGlobal = rewriter.replaceOpWithNewOp( - global, arrayTy, global.getConstant(), linkage, global.getSymName(), + global, arrayTy, global.isConstant(), linkage, global.getSymName(), initialValue, alignment, *addressSpace); if (!global.isExternal() && global.isUninitialized()) { Block *blk = new Block(); diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp --- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp +++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp @@ -623,7 +623,7 @@ // bypass L1 is only supported for byte sizes of 16, we drop the hint // otherwise. UnitAttr bypassL1 = - sizeInBytes == 16 ? adaptor.getBypassL1Attr() : UnitAttr(); + sizeInBytes == 16 ? adaptor.isBypassL1Attr() : UnitAttr(); // When the optional SrcElements argument is present, the source (global // memory) of CpAsyncOp is read only for SrcElements number of elements. The diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp --- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp +++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp @@ -276,7 +276,7 @@ return emitOpError( "block ID for permuting A (abid) must be below 2 ** cbsz"); - if ((getNegateA() || getNegateB() || getNegateC()) && !destElem.isF64()) + if ((isNegateA() || isNegateB() || isNegateC()) && !destElem.isF64()) return emitOpError( "negation flags only available for double-precision operations"); diff --git a/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp --- a/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp +++ b/mlir/lib/Dialect/AMX/Transforms/LegalizeForLLVMExport.cpp @@ -187,8 +187,8 @@ getTileSizes(rewriter, *getTypeConverter(), bType, op.getLoc()); // Replace operation with intrinsic. Type resType = typeConverter->convertType(cType); - bool zexta = op.getIsZextLhs(); - bool zextb = op.getIsZextRhs(); + bool zexta = op.isZextLhs(); + bool zextb = op.isZextRhs(); if (zexta && zextb) rewriter.replaceOpWithNewOp( op, resType, tsza.first, tszb.second, tsza.second, adaptor.getAcc(), diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -562,7 +562,7 @@ //===----------------------------------------------------------------------===// bool ToTensorOp::isWritable(Value value, const AnalysisState &state) { - return getWritable(); + return isWritable(); } OpFoldResult ToTensorOp::fold(FoldAdaptor) { diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp @@ -949,7 +949,7 @@ // attribute. Such tensors may alias any other tensor, which is currently // not handled in the analysis. if (auto toTensorOp = dyn_cast(op.getOperation())) { - if (!toTensorOp.getRestrict()) { + if (!toTensorOp.isRestrict()) { op->emitError("to_tensor ops without `restrict` are not supported by " "One-Shot Analysis"); return WalkResult::interrupt(); diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp --- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp +++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp @@ -148,7 +148,7 @@ //===----------------------------------------------------------------------===// void IncludeOp::print(OpAsmPrinter &p) { - bool standardInclude = getIsStandardInclude(); + bool standardInclude = isStandardInclude(); p << " "; if (standardInclude) @@ -172,8 +172,7 @@ << "expected trailing '>' for standard include"; if (standardInclude) - result.addAttribute("is_standard_include", - UnitAttr::get(parser.getContext())); + result.addAttribute("standard_include", UnitAttr::get(parser.getContext())); return success(); } diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -449,7 +449,7 @@ } OpFoldResult gpu::AllReduceOp::fold(FoldAdaptor /*adaptor*/) { - if (!getUniform() && canMakeGroupOpUniform(*this)) { + if (!isUniform() && canMakeGroupOpUniform(*this)) { setUniform(true); return getResult(); } @@ -491,7 +491,7 @@ } OpFoldResult gpu::SubgroupReduceOp::fold(FoldAdaptor /*adaptor*/) { - if (!getUniform() && canMakeGroupOpUniform(*this)) { + if (!isUniform() && canMakeGroupOpUniform(*this)) { setUniform(true); return getResult(); } diff --git a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp --- a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp +++ b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp @@ -669,7 +669,7 @@ IRRewriter rewriter(getContext()); auto transformOp = cast(getOperation()); - if (!getGenerateGpuLaunch() && !gpuLaunch) { + if (!isGenerateGpuLaunch() && !gpuLaunch) { DiagnosedSilenceableFailure diag = emitSilenceableError() << "Given target is not gpu.launch, set `generate_gpu_launch` " @@ -687,14 +687,14 @@ } SmallVector gridDims{getGridDims()}; - if (!getGenerateGpuLaunch() && gridDims.size() != 3) + if (!isGenerateGpuLaunch() && gridDims.size() != 3) return transformOp.emitDefiniteFailure("transform require size-3 mapping"); OpBuilder::InsertionGuard guard(rewriter); rewriter.setInsertionPoint(topLevelForallOp); // Generate gpu launch here and move the forall inside - if (getGenerateGpuLaunch()) { + if (isGenerateGpuLaunch()) { DiagnosedSilenceableFailure diag = createGpuLaunch(rewriter, target->getLoc(), transformOp, gpuLaunch); if (!diag.succeeded()) { diff --git a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp --- a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp @@ -397,7 +397,7 @@ SmallVector reduceOps; auto callback = [&](gpu::AllReduceOp reduceOp) -> WalkResult { - if (!reduceOp.getUniform()) + if (!reduceOp.isUniform()) return WalkResult::interrupt(); reduceOps.emplace_back(reduceOp); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -186,7 +186,7 @@ auto funcTy = FunctionType::get(getContext(), {getArraySize().getType()}, {getType()}); - if (getInalloca()) + if (isInalloca()) p << " inalloca"; p << ' ' << getArraySize() << " x " << elemTy; @@ -540,8 +540,7 @@ result.addAttribute(getRawConstantIndicesAttrName(result.name), builder.getDenseI32ArrayAttr(rawConstantIndices)); if (inbounds) { - result.addAttribute(getInboundsAttrName(result.name), - builder.getUnitAttr()); + result.addAttribute(getInboundsAttrName(result.name), builder.getUnitAttr()); } if (llvm::cast(extractVectorElementType(basePtr.getType())) .isOpaque()) @@ -1285,7 +1284,7 @@ // Consistency of llvm.landingpad result types is checked in // LLVMFuncOp::verify(). - if (!getCleanup() && getOperands().empty()) + if (!isCleanup() && getOperands().empty()) return emitError("landingpad instruction expects at least one clause or " "cleanup attribute"); @@ -1317,7 +1316,7 @@ } void LandingpadOp::print(OpAsmPrinter &p) { - p << (getCleanup() ? " cleanup " : " "); + p << (isCleanup() ? " cleanup " : " "); // Clauses for (auto value : getOperands()) { @@ -1606,13 +1605,11 @@ builder.getStringAttr(name)); result.addAttribute(getGlobalTypeAttrName(result.name), TypeAttr::get(type)); if (isConstant) - result.addAttribute(getConstantAttrName(result.name), - builder.getUnitAttr()); + result.addAttribute(getConstantAttrName(result.name), builder.getUnitAttr()); if (value) result.addAttribute(getValueAttrName(result.name), value); if (dsoLocal) - result.addAttribute(getDsoLocalAttrName(result.name), - builder.getUnitAttr()); + result.addAttribute(getDsoLocalAttrName(result.name), builder.getUnitAttr()); if (threadLocal) result.addAttribute(getThreadLocal_AttrName(result.name), builder.getUnitAttr()); @@ -1638,14 +1635,14 @@ StringRef visibility = stringifyVisibility(getVisibility_()); if (!visibility.empty()) p << visibility << ' '; - if (getThreadLocal_()) + if (isThreadLocal_()) p << "thread_local "; if (auto unnamedAddr = getUnnamedAddr()) { StringRef str = stringifyUnnamedAddr(*unnamedAddr); if (!str.empty()) p << str << ' '; } - if (getConstant()) + if (isConstant()) p << "constant "; p.printSymbolName(getSymName()); p << '('; @@ -2003,8 +2000,7 @@ CConvAttr::get(builder.getContext(), cconv)); result.attributes.append(attrs.begin(), attrs.end()); if (dsoLocal) - result.addAttribute(getDsoLocalAttrName(result.name), - builder.getUnitAttr()); + result.addAttribute(getDsoLocalAttrName(result.name), builder.getUnitAttr()); if (functionEntryCount) result.addAttribute(getFunctionEntryCountAttrName(result.name), builder.getI64IntegerAttr(functionEntryCount.value())); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp @@ -83,7 +83,7 @@ // be removed (provided it loads the exact stored value and is not // volatile). return blockingUse == slot.ptr && getAddr() == slot.ptr && - getResult().getType() == slot.elemType && !getVolatile_(); + getResult().getType() == slot.elemType && !isVolatile_(); } DeletionKind LLVM::LoadOp::removeBlockingUses( @@ -106,7 +106,7 @@ // store OF the slot pointer, only INTO the slot pointer. return blockingUse == slot.ptr && getAddr() == slot.ptr && getValue() != slot.ptr && getValue().getType() == slot.elemType && - !getVolatile_(); + !isVolatile_(); } DeletionKind LLVM::StoreOp::removeBlockingUses( diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -70,7 +70,7 @@ LogicalResult CpAsyncOp::verify() { if (getSize() != 4 && getSize() != 8 && getSize() != 16) return emitError("expected byte size to be either 4, 8 or 16."); - if (getBypassL1() && getSize() != 16) + if (isBypassL1() && getSize() != 16) return emitError("bypass l1 is only support for 16 bytes copy."); return success(); } diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp @@ -179,7 +179,7 @@ } return DiagnosedSilenceableFailure::success(); } - if (getPassthrough()) { + if (isPassthrough()) { Block &body = linalgOp->getRegion(0).front(); if (body.getTerminator()->getOperands() != linalgOp.getRegionInputArgs()) { return emitSilenceableError() << "not a passthrough"; @@ -190,7 +190,7 @@ } LogicalResult transform::MatchStructuredBodyOp::verify() { - if (getReductionPosition() && getPassthrough()) { + if (getReductionPosition() && isPassthrough()) { return emitOpError() << "reduction position and passthrough conditions are " "mutually exclusive"; } @@ -320,17 +320,17 @@ return diag; // If asked to check for the kind of dimension, perform the check. - if (getParallel() || getReduction()) { + if (isParallel() || isReduction()) { SmallVector reference; - if (getParallel()) + if (isParallel()) linalgOp.getParallelDims(reference); - else if (getReduction()) + else if (isReduction()) linalgOp.getReductionDims(reference); DiagnosedSilenceableFailure diag = containsAll(reference, dimensions, getLoc(), - getParallel() ? "expects dimension #{0} to be parallel" - : "expects dimension #{0} to be reduction"); + isParallel() ? "expects dimension #{0} to be parallel" + : "expects dimension #{0} to be reduction"); if (!diag.succeeded()) return diag; } @@ -351,9 +351,8 @@ DiagnosedSilenceableFailure transform::MatchStructuredDimOp::getDimensionsFor( linalg::LinalgOp op, SmallVectorImpl &dims) { - DiagnosedSilenceableFailure diag = - expandTargetSpecification(getLoc(), getIsAll(), getIsInverted(), - getRawDimList(), op.getNumLoops(), dims); + DiagnosedSilenceableFailure diag = expandTargetSpecification( + getLoc(), isAll(), isInverted(), getRawDimList(), op.getNumLoops(), dims); if (diag.isSilenceableFailure()) { diag.attachNote(op->getLoc()) << "while considering dimensions of this payload operation"; @@ -362,12 +361,12 @@ } LogicalResult transform::MatchStructuredDimOp::verify() { - if (getParallel() && getReduction()) { + if (isParallel() && isReduction()) { return emitOpError() << "cannot request the same dimension to be both " "parallel and reduction"; } return verifyStructuredTransformDimsOp(getOperation(), getRawDimList(), - getIsInverted(), getIsAll()); + isInverted(), isAll()); } //===----------------------------------------------------------------------===// @@ -414,11 +413,11 @@ for (int64_t position : positions) { AffineMap indexingMap = linalgOp.getMatchingIndexingMap(linalgOp.getDpsInputOperand(position)); - if (getPermutation() && !indexingMap.isPermutation()) { + if (isPermutation() && !indexingMap.isPermutation()) { return emitSilenceableError() << "the indexing map for input #" << position << " is not a permutation"; } - if (getProjectedPermutation() && !indexingMap.isProjectedPermutation()) { + if (isProjectedPermutation() && !indexingMap.isProjectedPermutation()) { return emitSilenceableError() << "the indexing map for input #" << position << " is not a projected permutation"; @@ -449,7 +448,7 @@ DiagnosedSilenceableFailure transform::MatchStructuredInputOp::getPositionsFor( linalg::LinalgOp op, SmallVectorImpl &positions) { DiagnosedSilenceableFailure diag = expandTargetSpecification( - getLoc(), getIsAll(), getIsInverted(), getRawPositionList(), + getLoc(), isAll(), isInverted(), getRawPositionList(), op.getNumDpsInputs(), positions); if (diag.isSilenceableFailure()) { diag.attachNote(op->getLoc()) @@ -462,7 +461,7 @@ /// attributes specifying the operand positions. template LogicalResult verifyStructuredOperandOp(OpTy op) { - if (op.getPermutation() && op.getProjectedPermutation()) { + if (op.isPermutation() && op.isProjectedPermutation()) { return op.emitOpError() << op.getPermutationAttrName() << " and " << op.getProjectedPermutationAttrName() << " are mutually exclusive"; @@ -479,7 +478,7 @@ if (failed(verifyStructuredOperandOp(*this))) return failure(); return verifyStructuredTransformDimsOp(getOperation(), getRawPositionList(), - getIsInverted(), getIsAll()); + isInverted(), isAll()); } //===----------------------------------------------------------------------===// @@ -500,11 +499,11 @@ for (int64_t position : positions) { AffineMap indexingMap = linalgOp.getMatchingIndexingMap(linalgOp.getDpsInitOperand(position)); - if (getPermutation() && !indexingMap.isPermutation()) { + if (isPermutation() && !indexingMap.isPermutation()) { return emitSilenceableError() << "the indexing map for output(init) #" << position << " is not a permutation"; } - if (getProjectedPermutation() && !indexingMap.isProjectedPermutation()) { + if (isProjectedPermutation() && !indexingMap.isProjectedPermutation()) { return emitSilenceableError() << "the indexing map for output(init) #" << position << " is not a permutation"; } @@ -534,7 +533,7 @@ DiagnosedSilenceableFailure transform::MatchStructuredInitOp::getPositionsFor( linalg::LinalgOp op, SmallVectorImpl &positions) { DiagnosedSilenceableFailure diag = expandTargetSpecification( - getLoc(), getIsAll(), getIsInverted(), getRawPositionList(), + getLoc(), isAll(), isInverted(), getRawPositionList(), op.getNumDpsInits(), positions); if (diag.isSilenceableFailure()) { diag.attachNote(op->getLoc()) @@ -547,7 +546,7 @@ if (failed(verifyStructuredOperandOp(*this))) return failure(); return verifyStructuredTransformDimsOp(getOperation(), getRawPositionList(), - getIsInverted(), getIsAll()); + isInverted(), isAll()); } //===----------------------------------------------------------------------===// @@ -618,11 +617,11 @@ << "no users of the result #" << getPosition(); } Operation *firstUser = *result.getUsers().begin(); - if (getAny()) { + if (isAny()) { results.set(cast(getResult()), {firstUser}); return DiagnosedSilenceableFailure::success(); } - if (getSingle()) { + if (isSingle()) { if (!llvm::hasSingleElement(result.getUsers())) { return emitSilenceableError() << "more than one result user with single user requested"; @@ -648,12 +647,12 @@ } LogicalResult transform::MatchStructuredResultOp::verify() { - if ((getAny() || getSingle()) ^ + if ((isAny() || isSingle()) ^ isa(getResult().getType())) { return emitOpError() << "expects either the any/single keyword or the type " "value handle result type"; } - if (getAny() && getSingle()) { + if (isAny() && isSingle()) { return emitOpError() << "'any' and 'single' are mutually exclusive"; } return success(); diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1615,11 +1615,11 @@ if (!getOperandsToPromote().empty()) promotionOptions = promotionOptions.setOperandsToPromote( extractFromI64ArrayAttr(getOperandsToPromote())); - if (getUseFullTilesByDefault()) + if (isUseFullTilesByDefault()) promotionOptions = promotionOptions.setUseFullTileBuffersByDefault( - getUseFullTilesByDefault()); - if (getUseAlloca()) - promotionOptions = promotionOptions.setUseAlloca(getUseAlloca()); + isUseFullTilesByDefault()); + if (isUseAlloca()) + promotionOptions = promotionOptions.setUseAlloca(isUseAlloca()); if (!getUseFullTileBuffers().empty()) promotionOptions = promotionOptions.setUseFullTileBuffers( llvm::to_vector(getUseFullTileBuffers().getAsValueRange())); @@ -2018,15 +2018,15 @@ ControlSplitReductionFn splitFn = [&](LinalgOp) { return linalg::SplitReductionOptions{int64_t(getSplitFactor()), unsigned(getInsertSplitDimension()), - bool(getInnerParallel())}; + bool(isInnerParallel())}; }; TrackingListener listener(state, *this); IRRewriter rewriter(getContext(), &listener); rewriter.setInsertionPoint(target); FailureOr splitResult = - (getUseScalingAlgorithm()) - ? splitReductionByScaling(rewriter, target, splitFn, getUseAlloc()) - : splitReduction(rewriter, target, splitFn, getUseAlloc()); + (isUseScalingAlgorithm()) + ? splitReductionByScaling(rewriter, target, splitFn, isUseAlloc()) + : splitReduction(rewriter, target, splitFn, isUseAlloc()); if (failed(splitResult)) return emitDefaultDefiniteFailure(target); @@ -2868,12 +2868,12 @@ MLIRContext *ctx = getContext(); RewritePatternSet patterns(ctx); - patterns.add(ctx, getVectorizeNdExtract()); + patterns.add(ctx, isVectorizeNdExtract()); - if (!getDisableTransferPermutationMapLoweringPatterns()) + if (!isDisableTransferPermutationMapLoweringPatterns()) vector::populateVectorTransferPermutationMapLoweringPatterns(patterns); - if (!getDisableMultiReductionToContractPatterns()) + if (!isDisableMultiReductionToContractPatterns()) vector::populateVectorReductionToContractPatterns(patterns); patterns.add(ctx); - if (getVectorizePadding()) + if (isVectorizePadding()) linalg::populatePadOpVectorizationPatterns(patterns); TrackingListener listener(state, *this); @@ -2967,7 +2967,7 @@ } if (failed(linalg::vectorize(rewriter, linalgOp, vectorSizes, - getVectorizeNdExtract()))) { + isVectorizeNdExtract()))) { return mlir::emitSilenceableFailure(target->getLoc()) << "failed to vectorize linalg op"; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp @@ -301,7 +301,7 @@ // If the `padOp` has a nofold attribute and all paddings are known to be 0, // explicitly insert a `linalg.copy`. - if (padOp.getNofoldAttr() && + if (padOp.isNofoldAttr() && llvm::all_of(padOp.getMixedLowPad(), isZeroIndex) && llvm::all_of(padOp.getMixedHighPad(), isZeroIndex)) { using bufferization::AllocTensorOp; diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp @@ -726,7 +726,7 @@ auto newPadOp = rewriter.create( loc, /*result=*/Type(), unpackOp.getSource(), lowPad, highPad, - paddingVal, padOp.getNofold()); + paddingVal, padOp.isNofold()); // Inject the tensor.unpack right after the packed padOp. Value outputUnPack = rewriter.create( diff --git a/mlir/lib/Dialect/MLProgram/IR/MLProgramOps.cpp b/mlir/lib/Dialect/MLProgram/IR/MLProgramOps.cpp --- a/mlir/lib/Dialect/MLProgram/IR/MLProgramOps.cpp +++ b/mlir/lib/Dialect/MLProgram/IR/MLProgramOps.cpp @@ -168,7 +168,7 @@ //===----------------------------------------------------------------------===// LogicalResult GlobalOp::verify() { - if (!getIsMutable() && !getValue()) + if (!isGlobalMutable() && !getValue()) return emitOpError() << "immutable global must have an initial value"; return success(); } @@ -212,7 +212,7 @@ if (!referrent) return emitOpError() << "undefined global: " << getGlobal(); - if (referrent.getIsMutable()) + if (referrent.isGlobalMutable()) return emitOpError() << "cannot load as const from mutable global " << getGlobal(); @@ -263,7 +263,7 @@ if (!referrent) return emitOpError() << "undefined global: " << getGlobal(); - if (!referrent.getIsMutable()) { + if (!referrent.isGlobalMutable()) { return emitOpError() << "cannot store to an immutable global " << getGlobal(); } @@ -292,7 +292,7 @@ if (!referrent) return emitOpError() << "undefined global: " << getGlobal(); - if (!referrent.getIsMutable()) { + if (!referrent.isGlobalMutable()) { return emitOpError() << "cannot store to an immutable global " << getGlobal(); } diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -1631,7 +1631,7 @@ ElementsAttr GlobalOp::getConstantInitValue() { auto initVal = getInitialValue(); - if (getConstant() && initVal.has_value()) + if (isConstant() && initVal.has_value()) return llvm::cast(initVal.value()); return {}; } diff --git a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp --- a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp +++ b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp @@ -57,7 +57,7 @@ } auto newBuffer = - memref::multiBuffer(rewriter, target, getFactor(), getSkipAnalysis()); + memref::multiBuffer(rewriter, target, getFactor(), isSkipAnalysis()); if (failed(newBuffer)) { LLVM_DEBUG(DBGS() << "--op failed to multibuffer\n";); diff --git a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp --- a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp @@ -404,7 +404,7 @@ .Case([&](gpu::SubgroupMmaLoadMatrixOp op) { rewriter.replaceOpWithNewOp( op, op.getType(), subViewOp.getSource(), sourceIndices, - op.getLeadDimension(), op.getTransposeAttr()); + op.getLeadDimension(), op.isTransposeAttr()); }) .Default([](Operation *) { llvm_unreachable("unexpected operation."); }); return success(); @@ -528,7 +528,7 @@ .Case([&](gpu::SubgroupMmaStoreMatrixOp op) { rewriter.replaceOpWithNewOp( op, op.getSrc(), subViewOp.getSource(), sourceIndices, - op.getLeadDimension(), op.getTransposeAttr()); + op.getLeadDimension(), op.isTransposeAttr()); }) .Default([](Operation *) { llvm_unreachable("unexpected operation."); }); return success(); @@ -650,7 +650,7 @@ foldedDstIndices, (srcSubViewOp ? srcSubViewOp.getSource() : copyOp.getSrc()), foldedSrcIndices, copyOp.getDstElements(), copyOp.getSrcElements(), - copyOp.getBypassL1Attr()); + copyOp.isBypassL1Attr()); return success(); } diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp --- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp +++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp @@ -604,16 +604,15 @@ LogicalResult acc::LoopOp::verify() { // auto, independent and seq attribute are mutually exclusive. - if ((getAuto_() && (getIndependent() || getSeq())) || - (getIndependent() && getSeq())) { + if ((isAuto_() && (isIndependent() || isSeq())) || + (isIndependent() && isSeq())) { return emitError() << "only one of \"" << acc::LoopOp::getAutoAttrStrName() << "\", " << getIndependentAttrName() << ", " - << getSeqAttrName() - << " can be present at the same time"; + << getSeqAttrName() << " can be present at the same time"; } // Gang, worker and vector are incompatible with seq. - if (getSeq() && (getHasGang() || getHasWorker() || getHasVector())) + if (isSeq() && (isHasGang() || isHasWorker() || isHasVector())) return emitError("gang, worker or vector cannot appear with the seq attr"); // Check non-empty body(). @@ -667,12 +666,12 @@ // The async attribute represent the async clause without value. Therefore the // attribute and operand cannot appear at the same time. - if (getAsyncOperand() && getAsync()) + if (getAsyncOperand() && isAsync()) return emitError("async attribute cannot appear with asyncOperand"); // The wait attribute represent the wait clause without values. Therefore the // attribute and operands cannot appear at the same time. - if (!getWaitOperands().empty() && getWait()) + if (!getWaitOperands().empty() && isWait()) return emitError("wait attribute cannot appear with waitOperands"); if (getWaitDevnum() && getWaitOperands().empty()) @@ -711,12 +710,12 @@ // The async attribute represent the async clause without value. Therefore the // attribute and operand cannot appear at the same time. - if (getAsyncOperand() && getAsync()) + if (getAsyncOperand() && isAsync()) return emitError("async attribute cannot appear with asyncOperand"); // The wait attribute represent the wait clause without values. Therefore the // attribute and operands cannot appear at the same time. - if (!getWaitOperands().empty() && getWait()) + if (!getWaitOperands().empty() && isWait()) return emitError("wait attribute cannot appear with waitOperands"); if (getWaitDevnum() && getWaitOperands().empty()) @@ -781,12 +780,12 @@ // The async attribute represent the async clause without value. Therefore the // attribute and operand cannot appear at the same time. - if (getAsyncOperand() && getAsync()) + if (getAsyncOperand() && isAsync()) return emitError("async attribute cannot appear with asyncOperand"); // The wait attribute represent the wait clause without values. Therefore the // attribute and operands cannot appear at the same time. - if (!getWaitOperands().empty() && getWait()) + if (!getWaitOperands().empty() && isWait()) return emitError("wait attribute cannot appear with waitOperands"); if (getWaitDevnum() && getWaitOperands().empty()) @@ -825,7 +824,7 @@ LogicalResult acc::WaitOp::verify() { // The async attribute represent the async clause without value. Therefore the // attribute and operand cannot appear at the same time. - if (getAsyncOperand() && getAsync()) + if (getAsyncOperand() && isAsync()) return emitError("async attribute cannot appear with asyncOperand"); if (getWaitDevnum() && getWaitOperands().empty()) diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -1084,7 +1084,7 @@ getInReductionVars()))) return failure(); - if (!getReductionVars().empty() && getNogroup()) + if (!getReductionVars().empty() && isNogroup()) return emitError("if a reduction clause is present on the taskloop " "directive, the nogroup clause must not be specified"); for (auto var : getReductionVars()) { @@ -1166,7 +1166,7 @@ LogicalResult OrderedRegionOp::verify() { // TODO: The code generation for ordered simd directive is not supported yet. - if (getSimd()) + if (isSimd()) return failure(); if (auto container = (*this)->getParentOfType()) { @@ -1388,7 +1388,7 @@ return emitOpError() << "cancel loop must appear " << "inside a worksharing-loop region"; } - if (cast(parentOp).getNowaitAttr()) { + if (cast(parentOp).isNowaitAttr()) { return emitError() << "A worksharing construct that is canceled " << "must not have a nowait clause"; } @@ -1403,7 +1403,7 @@ << "inside a sections region"; } if (isa_and_nonnull(parentOp->getParentOp()) && - cast(parentOp->getParentOp()).getNowaitAttr()) { + cast(parentOp->getParentOp()).isNowaitAttr()) { return emitError() << "A sections construct that is canceled " << "must not have a nowait clause"; } diff --git a/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp b/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp --- a/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp +++ b/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp @@ -48,7 +48,7 @@ //===----------------------------------------------------------------------===// LogicalResult CreateOperationOp::verify() { - if (!getInferredResultTypes()) + if (!isInferredResultTypes()) return success(); if (!getInputResultTypes().empty()) { return emitOpError("with inferred results cannot also have " diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp --- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp +++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp @@ -274,11 +274,11 @@ rewriter.setInsertionPoint(ifOp); Region ®ion = - getTakeElseBranch() ? ifOp.getElseRegion() : ifOp.getThenRegion(); + isTakeElseBranch() ? ifOp.getElseRegion() : ifOp.getThenRegion(); if (!llvm::hasSingleElement(region)) { return emitDefiniteFailure() << "requires an scf.if op with a single-block " - << ((getTakeElseBranch()) ? "`else`" : "`then`") << " region"; + << ((isTakeElseBranch()) ? "`else`" : "`then`") << " region"; } replaceOpWithRegion(rewriter, ifOp, region); return DiagnosedSilenceableFailure::success(); diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -881,7 +881,7 @@ if (!left.empty()) { RETURN_FAILURE_IF_FAILED( verifyNumBlockArgs(this, left, "left", TypeRange{leftType}, outputType)) - } else if (getLeftIdentity()) { + } else if (isLeftIdentity()) { if (leftType != outputType) return emitError("left=identity requires first argument to have the same " "type as the output"); @@ -889,7 +889,7 @@ if (!right.empty()) { RETURN_FAILURE_IF_FAILED(verifyNumBlockArgs( this, right, "right", TypeRange{rightType}, outputType)) - } else if (getRightIdentity()) { + } else if (isRightIdentity()) { if (rightType != outputType) return emitError("right=identity requires second argument to have the " "same type as the output"); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp @@ -1307,7 +1307,7 @@ auto nValue = dyn_cast_or_null(n.getDefiningOp()); bool nIsOne = (nValue && nValue.value() == 1); - if (!op.getInbounds()) { + if (!op.isInbounds()) { Value cond = rewriter.create( loc, arith::CmpIPredicate::ugt, newSize, capacity); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -825,7 +825,7 @@ // Prepare descriptor. auto desc = getDescriptorFromTensorTuple(adaptor.getTensor()); // Generate optional insertion finalization code. - if (op.getHasInserts()) + if (op.isHasInserts()) genEndInsert(rewriter, op.getLoc(), desc); // Replace operation with resulting memrefs. rewriter.replaceOp(op, genTuple(rewriter, op.getLoc(), desc)); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -1129,7 +1129,7 @@ LogicalResult matchAndRewrite(LoadOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - if (op.getHasInserts()) { + if (op.isHasInserts()) { // Finalize any pending insertions. StringRef name = "endInsert"; createFuncCall(rewriter, op->getLoc(), name, {}, adaptor.getOperands(), diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -1030,8 +1030,8 @@ Block &rightBlock = rightRegion.front(); rightYield = rightBlock.getTerminator(); } - bool includeLeft = binop.getLeftIdentity() || !leftRegion.empty(); - bool includeRight = binop.getRightIdentity() || !rightRegion.empty(); + bool includeLeft = binop.isLeftIdentity() || !leftRegion.empty(); + bool includeRight = binop.isRightIdentity() || !rightRegion.empty(); return combiSet(TensorExp::Kind::kBinary, child0, child1, binop, includeLeft, TensorExp::Kind::kBinaryBranch, leftYield, includeRight, TensorExp::Kind::kBinaryBranch, rightYield); @@ -1262,9 +1262,9 @@ return addExp(TensorExp::Kind::kShlI, e0, e1); if (auto binop = dyn_cast(def)) { if (isAdmissibleBranch(binop, binop.getOverlapRegion()) && - (binop.getLeftIdentity() || + (binop.isLeftIdentity() || isAdmissibleBranch(binop, binop.getLeftRegion())) && - (binop.getRightIdentity() || + (binop.isRightIdentity() || isAdmissibleBranch(binop, binop.getRightRegion()))) return addExp(TensorExp::Kind::kBinary, e0, e1, def); } diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -2678,7 +2678,7 @@ PatternRewriter &rewriter) const override { if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad()) return failure(); - if (padTensorOp.getNofold()) + if (padTensorOp.isNofold()) return failure(); rewriter.replaceOpWithNewOp( padTensorOp, padTensorOp.getResult().getType(), @@ -2710,7 +2710,7 @@ auto newOp = rewriter.create( padTensorOp->getLoc(), newResultType, padTensorOp.getSource(), padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(), - padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.getNofold(), + padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.isNofold(), getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames())); IRMapping mapper; padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper); @@ -2743,7 +2743,7 @@ padTensorOp.getLoc(), tensorCastOp.getDest().getType(), padTensorOp.getSource(), padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(), padTensorOp.getLow(), - padTensorOp.getHigh(), padTensorOp.getNofold(), + padTensorOp.getHigh(), padTensorOp.isNofold(), getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames())); replacementOp.getRegion().takeBody(padTensorOp.getRegion()); @@ -2797,7 +2797,7 @@ if (!innerSliceOp) return failure(); auto outerPadOp = innerSliceOp.getSource().getDefiningOp(); - if (!outerPadOp || outerPadOp.getNofold()) + if (!outerPadOp || outerPadOp.isNofold()) return failure(); auto outerSliceOp = outerPadOp.getSource().getDefiningOp(); if (!outerSliceOp) @@ -2902,7 +2902,7 @@ innerSliceOp.getMixedStrides()); auto newPadOp = rewriter.create( padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(), - padOp.getMixedLowPad(), newHighPad, padOp.getNofold(), + padOp.getMixedLowPad(), newHighPad, padOp.isNofold(), getPrunedAttributeList(padOp, PadOp::getAttributeNames())); rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(), newPadOp.getRegion().begin()); @@ -2995,7 +2995,7 @@ newOutDims, padTensorOp.getType().getElementType()); auto newOp = rewriter.create( padTensorOp->getLoc(), newResultType, input, staticLow, staticHigh, - padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.getNofold(), + padTensorOp.getLow(), padTensorOp.getHigh(), padTensorOp.isNofold(), getPrunedAttributeList(padTensorOp, PadOp::getAttributeNames())); IRMapping mapper; @@ -3041,7 +3041,7 @@ OpFoldResult PadOp::fold(FoldAdaptor) { if (getResultType().hasStaticShape() && getResultType() == getSourceType() && - !getNofold()) + !isNofold()) return getSource(); return {}; } @@ -3144,7 +3144,7 @@ "scatter", "dest"))) return failure(); - if (!getUnique()) + if (!isUnique()) return emitOpError("requires 'unique' attribute to be set"); // TODO: we could also check statically that there are fewer leading index // tensor dims than the dest dims. If this is not the case, the unique diff --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @@ -619,7 +619,7 @@ loc, padOp.getSource(), newOffsets, newLengths, newStrides); auto newPadOp = b.create( loc, Type(), newSliceOp, newLows, newHighs, - /*nofold=*/padOp.getNofold(), + /*nofold=*/padOp.isNofold(), getPrunedAttributeList(padOp, PadOp::getAttributeNames())); // Copy region to new PadOp. diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp @@ -29,7 +29,7 @@ PatternRewriter &rewriter) const override { auto padOp = packOp.getSource().getDefiningOp(); - if (!padOp || padOp.getNofold() || !padOp.hasZeroLowPad()) + if (!padOp || padOp.isNofold() || !padOp.hasZeroLowPad()) return failure(); Value constantPaddingValue = padOp.getConstantPaddingValue(); diff --git a/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp b/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp @@ -66,7 +66,7 @@ // Create a new tensor::PadOp. auto newPadOp = b.create( loc, padOp.getResultType(), padOp.getSource(), newMixedLow, newMixedHigh, - constantPadding, padOp.getNofold(), /*attrs=*/ArrayRef{}); + constantPadding, padOp.isNofold(), /*attrs=*/ArrayRef{}); // Create a tensor::ExtractSliceOp. // Reify the result sizes of the old tensor::PadOp. diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp --- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp +++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp @@ -1274,7 +1274,7 @@ SmallVector operations; for (Value operand : getHandles()) llvm::append_range(operations, state.getPayloadOps(operand)); - if (!getDeduplicate()) { + if (!isDeduplicate()) { results.set(llvm::cast(getResult()), operations); return DiagnosedSilenceableFailure::success(); } @@ -1286,7 +1286,7 @@ bool transform::MergeHandlesOp::allowsRepeatedHandleOperands() { // Handles may be the same if deduplicating is enabled. - return getDeduplicate(); + return isDeduplicate(); } void transform::MergeHandlesOp::getEffects( @@ -1299,7 +1299,7 @@ } OpFoldResult transform::MergeHandlesOp::fold(FoldAdaptor adaptor) { - if (getDeduplicate() || getHandles().size() != 1) + if (isDeduplicate() || getHandles().size() != 1) return {}; // If deduplication is not required and there is only one operand, it can be diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp --- a/mlir/lib/Rewrite/ByteCode.cpp +++ b/mlir/lib/Rewrite/ByteCode.cpp @@ -819,7 +819,7 @@ void Generator::generate(pdl_interp::CheckOperandCountOp op, ByteCodeWriter &writer) { writer.append(OpCode::CheckOperandCount, op.getInputOp(), op.getCount(), - static_cast(op.getCompareAtLeast()), + static_cast(op.isCompareAtLeast()), op.getSuccessors()); } void Generator::generate(pdl_interp::CheckOperationNameOp op, @@ -830,7 +830,7 @@ void Generator::generate(pdl_interp::CheckResultCountOp op, ByteCodeWriter &writer) { writer.append(OpCode::CheckResultCount, op.getInputOp(), op.getCount(), - static_cast(op.getCompareAtLeast()), + static_cast(op.isCompareAtLeast()), op.getSuccessors()); } void Generator::generate(pdl_interp::CheckTypeOp op, ByteCodeWriter &writer) { @@ -864,7 +864,7 @@ // Add the result types. If the operation has inferred results, we use a // marker "size" value. Otherwise, we add the list of explicit result types. - if (op.getInferredResultTypes()) + if (op.isInferredResultTypes()) writer.append(kInferTypesMarker); else writer.appendPDLValueList(op.getInputResultTypes()); diff --git a/mlir/lib/TableGen/Operator.cpp b/mlir/lib/TableGen/Operator.cpp --- a/mlir/lib/TableGen/Operator.cpp +++ b/mlir/lib/TableGen/Operator.cpp @@ -845,6 +845,26 @@ return "get" + convertToCamelFromSnakeCase(name, /*capitalizeFirst=*/true); } +std::string Operator::getGetterNameUnitAttr(StringRef name) const { + std::string partialName = + convertToCamelFromSnakeCase(name, + /*capitalizeFirst=*/true); + for (auto &namedAttr : getAttributes()) { + if (namedAttr.name == name && + namedAttr.attr.getBaseAttr().getAttrDefName() == "UnitAttr") { + return "is" + partialName; + } + } + return "get" + partialName; +} + +std::string Operator::getGetterNameAttr(const NamedAttribute &attr) const { + std::string name = convertToCamelFromSnakeCase(attr.name, + /*capitalizeFirst=*/true); + return attr.attr.getBaseAttr().getAttrDefName() == "UnitAttr" ? "is" + name + : "get" + name; +} + std::string Operator::getSetterName(StringRef name) const { return "set" + convertToCamelFromSnakeCase(name, /*capitalizeFirst=*/true); } diff --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp --- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp +++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp @@ -405,7 +405,7 @@ raw_ostream &os = emitter.ostream(); os << "#include "; - if (includeOp.getIsStandardInclude()) + if (includeOp.isStandardInclude()) os << "<" << includeOp.getInclude() << ">"; else os << "\"" << includeOp.getInclude() << "\""; diff --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp @@ -214,15 +214,13 @@ static_cast( moduleTranslation.convertType(ft)), inlineAsmOp.getAsmString(), inlineAsmOp.getConstraints(), - inlineAsmOp.getHasSideEffects(), - inlineAsmOp.getIsAlignStack(), + inlineAsmOp.isHasSideEffects(), inlineAsmOp.isAlignStack(), convertAsmDialectToLLVM(*inlineAsmOp.getAsmDialect())) - : llvm::InlineAsm::get(static_cast( - moduleTranslation.convertType(ft)), - inlineAsmOp.getAsmString(), - inlineAsmOp.getConstraints(), - inlineAsmOp.getHasSideEffects(), - inlineAsmOp.getIsAlignStack()); + : llvm::InlineAsm::get( + static_cast( + moduleTranslation.convertType(ft)), + inlineAsmOp.getAsmString(), inlineAsmOp.getConstraints(), + inlineAsmOp.isHasSideEffects(), inlineAsmOp.isAlignStack()); llvm::CallInst *inst = builder.CreateCall( inlineAsmInst, moduleTranslation.lookupValues(inlineAsmOp.getOperands())); @@ -286,7 +284,7 @@ llvm::Type *ty = moduleTranslation.convertType(lpOp.getType()); llvm::LandingPadInst *lpi = builder.CreateLandingPad(ty, lpOp.getNumOperands()); - lpi->setCleanup(lpOp.getCleanup()); + lpi->setCleanup(lpOp.isCleanup()); // Add clauses for (llvm::Value *operand : diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -571,7 +571,7 @@ auto orderedRegionOp = cast(opInst); // TODO: The code generation for ordered simd directive is not supported yet. - if (orderedRegionOp.getSimd()) + if (orderedRegionOp.isSimd()) return failure(); // TODO: support error propagation in OpenMPIRBuilder and use it instead of @@ -593,7 +593,7 @@ llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder); builder.restoreIP( moduleTranslation.getOpenMPBuilder()->createOrderedThreadsSimd( - ompLoc, bodyGenCB, finiCB, !orderedRegionOp.getSimd())); + ompLoc, bodyGenCB, finiCB, !orderedRegionOp.isSimd())); return bodyGenStatus; } @@ -660,7 +660,7 @@ llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder); builder.restoreIP(moduleTranslation.getOpenMPBuilder()->createSections( ompLoc, allocaIP, sectionCBs, privCB, finiCB, false, - sectionsOp.getNowait())); + sectionsOp.isNowait())); return bodyGenStatus; } @@ -678,7 +678,7 @@ }; auto finiCB = [&](InsertPointTy codeGenIP) {}; builder.restoreIP(moduleTranslation.getOpenMPBuilder()->createSingle( - ompLoc, bodyCB, finiCB, singleOp.getNowait(), /*DidIt=*/nullptr)); + ompLoc, bodyCB, finiCB, singleOp.isNowait(), /*DidIt=*/nullptr)); return bodyGenStatus; } @@ -688,8 +688,8 @@ LLVM::ModuleTranslation &moduleTranslation) { using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; LogicalResult bodyGenStatus = success(); - if (taskOp.getIfExpr() || taskOp.getFinalExpr() || taskOp.getUntiedAttr() || - taskOp.getMergeableAttr() || taskOp.getInReductions() || + if (taskOp.getIfExpr() || taskOp.getFinalExpr() || taskOp.isUntiedAttr() || + taskOp.isMergeableAttr() || taskOp.getInReductions() || taskOp.getPriority() || !taskOp.getAllocateVars().empty()) { return taskOp.emitError("unhandled clauses for translation to LLVM IR"); } @@ -727,7 +727,7 @@ findAllocaInsertPoint(builder, moduleTranslation); llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder); builder.restoreIP(moduleTranslation.getOpenMPBuilder()->createTask( - ompLoc, allocaIP, bodyCB, !taskOp.getUntied(), /*Final*/ nullptr, + ompLoc, allocaIP, bodyCB, !taskOp.isUntied(), /*Final*/ nullptr, /*IfCondition*/ nullptr, dds)); return bodyGenStatus; } @@ -868,7 +868,7 @@ } loopInfos.push_back(ompBuilder->createCanonicalLoop( loc, bodyGen, lowerBound, upperBound, step, - /*IsSigned=*/true, loop.getInclusive(), computeIP)); + /*IsSigned=*/true, loop.isInclusive(), computeIP)); if (failed(bodyGenStatus)) return failure(); @@ -886,10 +886,10 @@ bool isOrdered = loop.getOrderedVal().has_value(); std::optional scheduleModifier = loop.getScheduleModifier(); - bool isSimd = loop.getSimdModifier(); + bool isSimd = loop.isSimdModifier(); ompBuilder->applyWorkshareLoop( - ompLoc.DL, loopInfo, allocaIP, !loop.getNowait(), + ompLoc.DL, loopInfo, allocaIP, !loop.isNowait(), convertToScheduleKind(schedule), chunk, isSimd, scheduleModifier == omp::ScheduleModifier::monotonic, scheduleModifier == omp::ScheduleModifier::nonmonotonic, isOrdered); @@ -936,7 +936,7 @@ builder.SetInsertPoint(tempTerminator); llvm::OpenMPIRBuilder::InsertPointTy contInsertPoint = ompBuilder->createReductions(builder.saveIP(), allocaIP, reductionInfos, - loop.getNowait()); + loop.isNowait()); if (!contInsertPoint.getBlock()) return loop->emitOpError() << "failed to convert reductions"; auto nextInsertionPoint = @@ -1454,7 +1454,7 @@ return success(); }) .Case([&](omp::EnterDataOp enterDataOp) { - if (enterDataOp.getNowait()) + if (enterDataOp.isNowait()) return failure(); if (auto ifExprVar = enterDataOp.getIfExpr()) @@ -1473,7 +1473,7 @@ return success(); }) .Case([&](omp::ExitDataOp exitDataOp) { - if (exitDataOp.getNowait()) + if (exitDataOp.isNowait()) return failure(); if (auto ifExprVar = exitDataOp.getIfExpr()) @@ -1614,7 +1614,7 @@ return false; } - if (targetOp.getNowait()) { + if (targetOp.isNowait()) { opInst.emitError("Nowait clause not yet supported"); return false; } diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -708,10 +708,10 @@ cst = nullptr; auto *var = new llvm::GlobalVariable( - *llvmModule, type, op.getConstant(), linkage, cst, op.getSymName(), + *llvmModule, type, op.isConstant(), linkage, cst, op.getSymName(), /*InsertBefore=*/nullptr, - op.getThreadLocal_() ? llvm::GlobalValue::GeneralDynamicTLSModel - : llvm::GlobalValue::NotThreadLocal, + op.isThreadLocal_() ? llvm::GlobalValue::GeneralDynamicTLSModel + : llvm::GlobalValue::NotThreadLocal, addrSpace); if (op.getUnnamedAddr().has_value()) @@ -720,7 +720,7 @@ if (op.getSection().has_value()) var->setSection(*op.getSection()); - addRuntimePreemptionSpecifier(op.getDsoLocal(), var); + addRuntimePreemptionSpecifier(op.isDsoLocal(), var); std::optional alignment = op.getAlignment(); if (alignment.has_value()) @@ -989,7 +989,7 @@ llvmFunc->setLinkage(convertLinkageToLLVM(function.getLinkage())); llvmFunc->setCallingConv(convertCConvToLLVM(function.getCConv())); mapFunction(function.getName(), llvmFunc); - addRuntimePreemptionSpecifier(function.getDsoLocal(), llvmFunc); + addRuntimePreemptionSpecifier(function.isDsoLocal(), llvmFunc); // Convert function attributes. convertFunctionAttributes(function, llvmFunc); @@ -1185,7 +1185,7 @@ llvm::MDNode::getTemporary(ctx, std::nullopt).release()); operands.push_back(llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(offsetTy, tagOp.getOffset()))); - if (tagOp.getConstant()) + if (tagOp.isConstant()) operands.push_back(llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(offsetTy, 1))); } diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir --- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir +++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir @@ -464,8 +464,8 @@ // CHECK-NEXT: llvm.inline_asm has_side_effects {{.*}} (i32, i32) -> i8 %2 = llvm.inline_asm has_side_effects "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 - // CHECK-NEXT: llvm.inline_asm is_align_stack {{.*}} (i32, i32) -> i8 - %3 = llvm.inline_asm is_align_stack "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 + // CHECK-NEXT: llvm.inline_asm align_stack {{.*}} (i32, i32) -> i8 + %3 = llvm.inline_asm align_stack "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: llvm.inline_asm "foo", "=r,=r,r" {{.*}} : (i32) -> !llvm.struct<(i8, i8)> %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (i32) -> !llvm.struct<(i8, i8)> diff --git a/mlir/test/Dialect/Linalg/match-ops-invalid.mlir b/mlir/test/Dialect/Linalg/match-ops-invalid.mlir --- a/mlir/test/Dialect/Linalg/match-ops-invalid.mlir +++ b/mlir/test/Dialect/Linalg/match-ops-invalid.mlir @@ -78,7 +78,7 @@ transform.match.structured %arg0 : !transform.any_op { ^bb1(%arg1: !transform.any_op): // expected-error @below {{cannot request both 'all' and 'inverted' values in the list}} - "transform.match.structured.dim"(%arg1) { is_all, is_inverted, raw_dim_list = array } : (!transform.any_op) -> () + "transform.match.structured.dim"(%arg1) { all, inverted, raw_dim_list = array } : (!transform.any_op) -> () transform.match.structured.yield } transform.yield @@ -91,7 +91,7 @@ transform.match.structured %arg0 : !transform.any_op { ^bb1(%arg1: !transform.any_op): // expected-error @below {{cannot both request 'all' and specific values in the list}} - "transform.match.structured.dim"(%arg1) { is_all, raw_dim_list = array } : (!transform.any_op) -> () + "transform.match.structured.dim"(%arg1) { all, raw_dim_list = array } : (!transform.any_op) -> () transform.match.structured.yield } transform.yield @@ -129,7 +129,7 @@ transform.match.structured %arg0 : !transform.any_op { ^bb1(%arg1: !transform.any_op): // expected-error @below {{cannot request the same dimension to be both parallel and reduction}} - "transform.match.structured.dim"(%arg1) { is_all, parallel, reduction, raw_dim_list = array } : (!transform.any_op) -> () + "transform.match.structured.dim"(%arg1) { all, parallel, reduction, raw_dim_list = array } : (!transform.any_op) -> () transform.match.structured.yield } transform.yield diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir --- a/mlir/test/Target/LLVMIR/llvmir.mlir +++ b/mlir/test/Target/LLVMIR/llvmir.mlir @@ -1892,7 +1892,7 @@ %2 = llvm.inline_asm has_side_effects "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: call i8 asm alignstack "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}) - %3 = llvm.inline_asm is_align_stack "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 + %3 = llvm.inline_asm align_stack "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 // CHECK-NEXT: call i8 asm inteldialect "foo", "=r,r,r"(i32 {{.*}}, i32 {{.*}}) %4 = llvm.inline_asm asm_dialect = "intel" "foo", "=r,r,r" %arg0, %arg0 : (i32, i32) -> i8 diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -2301,8 +2301,8 @@ } def FormatOptionalWithElse : TEST_Op<"format_optional_else"> { - let arguments = (ins UnitAttr:$isFirstBranchPresent); - let assemblyFormat = "(`then` $isFirstBranchPresent^):(`else`)? attr-dict"; + let arguments = (ins UnitAttr:$firstBranchPresent); + let assemblyFormat = "(`then` $firstBranchPresent^):(`else`)? attr-dict"; } def FormatCompoundAttr : TEST_Op<"format_compound_attr"> { diff --git a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp --- a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp +++ b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp @@ -368,7 +368,7 @@ for (Operation *op : state.getPayloadOps(getTarget())) op->erase(); - if (getFailAfterErase()) + if (isFailAfterErase()) return emitSilenceableError() << "silenceable error"; return DiagnosedSilenceableFailure::success(); } @@ -596,15 +596,15 @@ Operation *target, ::transform::ApplyToEachResultList &results, ::transform::TransformState &state) { Builder builder(getContext()); - if (getFirstResultIsParam()) { + if (isFirstResultIsParam()) { results.push_back(builder.getI64IntegerAttr(0)); - } else if (getFirstResultIsNull()) { + } else if (isFirstResultIsNull()) { results.push_back(nullptr); } else { results.push_back(*state.getPayloadOps(getIn()).begin()); } - if (getSecondResultIsHandle()) { + if (isSecondResultIsHandle()) { results.push_back(*state.getPayloadOps(getIn()).begin()); } else { results.push_back(builder.getI64IntegerAttr(42)); @@ -651,15 +651,15 @@ void mlir::test::TestRequiredMemoryEffectsOp::getEffects( SmallVectorImpl &effects) { - if (getHasOperandEffect()) + if (isHasOperandEffect()) transform::consumesHandle(getIn(), effects); - if (getHasResultEffect()) + if (isHasResultEffect()) transform::producesHandle(getOut(), effects); else transform::onlyReadsHandle(getOut(), effects); - if (getModifiesPayload()) + if (isModifiesPayload()) transform::modifiesPayload(effects); } diff --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td --- a/mlir/test/mlir-tblgen/op-attribute.td +++ b/mlir/test/mlir-tblgen/op-attribute.td @@ -510,7 +510,7 @@ } // DEF-LABEL: UnitAttrOp definitions -// DEF: bool UnitAttrOp::getAttr() { +// DEF: bool UnitAttrOp::isAttr() { // DEF: return {{.*}} != nullptr // DEF: ::mlir::Attribute UnitAttrOp::removeAttrAttr() { diff --git a/mlir/test/mlir-tblgen/op-format.td b/mlir/test/mlir-tblgen/op-format.td --- a/mlir/test/mlir-tblgen/op-format.td +++ b/mlir/test/mlir-tblgen/op-format.td @@ -64,7 +64,7 @@ // CHECK-NEXT: result.addAttribute("a", parser.getBuilder().getUnitAttr()) // CHECK: parser.parseKeyword("bar") // CHECK-LABEL: OptionalGroupB::print -// CHECK: if (!getAAttr()) +// CHECK: if (!isAAttr()) // CHECK-NEXT: odsPrinter << ' ' << "foo" // CHECK-NEXT: else // CHECK-NEXT: odsPrinter << ' ' << "bar" diff --git a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp --- a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp @@ -133,7 +133,7 @@ llvm::raw_string_ostream bs(builder); while (StringLoc loc = findNextVariable(builderStrRef)) { auto name = loc.in(builderStrRef).drop_front(); - auto getterName = op.getGetterName(name); + auto getterName = op.getGetterNameUnitAttr(name); // First, insert the non-matched part as is. bs << builderStrRef.substr(0, loc.pos); // Then, rewrite the name based on its kind. diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -620,7 +620,7 @@ auto &op = emitHelper.getOp(); for (const auto &namedAttr : op.getAttributes()) ctx.addSubst(namedAttr.name, - emitHelper.getOp().getGetterName(namedAttr.name) + "()"); + emitHelper.getOp().getGetterNameAttr(namedAttr) + "()"); // Populate substitutions for named operands. for (int i = 0, e = op.getNumOperands(); i < e; ++i) { @@ -816,7 +816,8 @@ body << formatv("::mlir::Attribute {0};\n", getVarName(optional->attrName)); } - body << formatv(findRequiredAttr, emitHelper.getAttrName(it.first), + body << formatv(findRequiredAttr, + emitHelper.getAttrName(it.first), emitHelper.emitErrorPrefix(), it.first); for (const AttributeMetadata *optional : optionalAttrs) { body << formatv(checkOptionalAttr, @@ -1038,6 +1039,38 @@ << ";\n"; } +template +static void emitAttrGetterWithReturnTypeDeprecated( + FmtContext &fctx, OpClassOrAdaptor &opClass, const Operator &op, + StringRef name, StringRef deprecatedName, Attribute attr) { + auto *method = opClass.addMethod(attr.getReturnType(), deprecatedName); + std::string deprecatedMessage = std::string(deprecatedName) + + " is deprecated. Use " + std::string(name) + + " instead."; + method->setDeprecated(deprecatedMessage); + ERROR_IF_PRUNED(method, deprecatedName, op); + auto &body = method->body(); + body << " auto attr = " << deprecatedName << "Attr();\n"; + if (attr.hasDefaultValue() && attr.isOptional()) { + // Returns the default value if not set. + // TODO: this is inefficient, we are recreating the attribute for every + // call. This should be set instead. + if (!attr.isConstBuildable()) { + PrintFatalError("DefaultValuedAttr of type " + attr.getAttrDefName() + + " must have a constBuilder"); + } + std::string defaultValue = std::string( + tgfmt(attr.getConstBuilderTemplate(), &fctx, attr.getDefaultValue())); + body << " if (!attr)\n return " + << tgfmt(attr.getConvertFromStorageCall(), + &fctx.withSelf(defaultValue)) + << ";\n"; + } + body << " return " + << tgfmt(attr.getConvertFromStorageCall(), &fctx.withSelf("attr")) + << ";\n"; +} + void OpEmitter::genPropertiesSupport() { if (!emitHelper.hasProperties()) return; @@ -1291,7 +1324,8 @@ continue; if (canEmitAttrVerifier(attr, /*isEmittingForOp=*/false)) { - std::string name = op.getGetterName(namedAttr->attrName); + std::string name = + op.getGetterName(namedAttr->attrName); verifyInherentAttrsMethod << formatv(R"( {{ @@ -1316,6 +1350,20 @@ method->body() << " " << attr.getDerivedCodeBody() << "\n"; }; + // Emit the deprecated derived attribute body for unit attributes + // TODO: Method will be removed 2023-06-23/ + auto emitDerivedAttrDeprecated = [&](StringRef name, StringRef deprecatedName, + Attribute attr) { + if (auto *method = + opClass.addMethod(attr.getReturnType(), deprecatedName)) { + method->body() << " " << attr.getDerivedCodeBody() << "\n"; + std::string deprecatedMessage = std::string(deprecatedName) + + " is deprecated. Use " + + std::string(name) + " instead."; + method->setDeprecated(deprecatedMessage); + } + }; + // Generate named accessor with Attribute return type. This is a wrapper // class that allows referring to the attributes via accessors instead of // having to use the string interface for better compile time verification. @@ -1331,13 +1379,51 @@ attr.getStorageType()); }; + // Generate deprecated named accessor with Attribute return type for unit + // attributes + // TODO: Method will be removed 2023-06-23/ + auto emitAttrWithStorageTypeDeprecated = + [&](StringRef name, StringRef deprecatedName, StringRef attrName, + Attribute attr) { + auto *method = + opClass.addMethod(attr.getStorageType(), deprecatedName + "Attr"); + std::string deprecatedMessage = std::string(deprecatedName) + + " is deprecated. Use " + + std::string(name) + " instead."; + method->setDeprecated(deprecatedMessage); + if (!method) + return; + method->body() << formatv( + " return {0}.{1}<{2}>();", emitHelper.getAttr(attrName), + attr.isOptional() || attr.hasDefaultValue() ? "dyn_cast_or_null" + : "cast", + attr.getStorageType()); + }; + for (const NamedAttribute &namedAttr : op.getAttributes()) { - std::string name = op.getGetterName(namedAttr.name); + std::string name = op.getGetterNameAttr(namedAttr); if (namedAttr.attr.isDerivedAttr()) { emitDerivedAttr(name, namedAttr.attr); + + // Emit deprecated derived attribute for unit attributes + // TODO: Method will be removed 2023-06-23/ + if (namedAttr.attr.getBaseAttr().getAttrDefName() == "UnitAttr") { + std::string deprecatedName = op.getGetterName(namedAttr.name); + emitDerivedAttrDeprecated(name, deprecatedName, namedAttr.attr); + } } else { emitAttrWithStorageType(name, namedAttr.name, namedAttr.attr); emitAttrGetterWithReturnType(fctx, opClass, op, name, namedAttr.attr); + + // Emit deprecated attribute for unit attributes + // TODO: Method will be removed 2023-06-23/ + if (namedAttr.attr.getBaseAttr().getAttrDefName() == "UnitAttr") { + std::string deprecatedName = op.getGetterName(namedAttr.name); + emitAttrWithStorageTypeDeprecated(name, deprecatedName, namedAttr.name, + namedAttr.attr); + emitAttrGetterWithReturnTypeDeprecated(fctx, opClass, op, name, + deprecatedName, namedAttr.attr); + } } } @@ -1377,7 +1463,7 @@ std::string attrs; llvm::raw_string_ostream os(attrs); interleaveComma(nonMaterializable, os, [&](const NamedAttribute &attr) { - os << op.getGetterName(attr.name); + os << op.getGetterNameAttr(attr); }); PrintWarning( op.getLoc(), @@ -1493,10 +1579,11 @@ void OpEmitter::genOptionalAttrRemovers() { // Generate methods for removing optional attributes, instead of having to // use the string interface. Enables better compile time verification. - auto emitRemoveAttr = [&](StringRef name, bool useProperties) { - auto upperInitial = name.take_front().upper(); - auto *method = opClass.addMethod("::mlir::Attribute", - op.getRemoverName(name) + "Attr"); + auto emitRemoveAttr = [&](const NamedAttribute &namedAttr, + bool useProperties) { + auto upperInitial = namedAttr.name.take_front().upper(); + auto *method = opClass.addMethod( + "::mlir::Attribute", op.getRemoverName(namedAttr.name) + "Attr"); if (!method) return; if (useProperties) { @@ -1505,17 +1592,16 @@ attr = {{}; return attr; )", - name); + namedAttr.name); return; } method->body() << formatv("return (*this)->removeAttr({0}AttrName());", - op.getGetterName(name)); + op.getGetterName(namedAttr.name)); }; for (const NamedAttribute &namedAttr : op.getAttributes()) if (namedAttr.attr.isOptional()) - emitRemoveAttr(namedAttr.name, - op.getDialect().usePropertiesForAttributes()); + emitRemoveAttr(namedAttr, op.getDialect().usePropertiesForAttributes()); } // Generates the code to compute the start and end index of an operand or result @@ -2268,8 +2354,8 @@ std::string resultType; const auto &namedAttr = op.getAttribute(0); - body << " auto attrName = " << op.getGetterName(namedAttr.name) - << "AttrName(" << builderOpState + body << " auto attrName = " << op.getGetterName(namedAttr.name) << "AttrName(" + << builderOpState << ".name);\n" " for (auto attr : attributes) {\n" " if (attr.getName() != attrName) continue;\n"; @@ -3663,7 +3749,7 @@ const auto &attr = namedAttr.attr; if (attr.isDerivedAttr()) continue; - std::string emitName = op.getGetterName(name); + std::string emitName = op.getGetterNameAttr(namedAttr); emitAttrWithStorageType(name, emitName, attr); emitAttrGetterWithReturnType(fctx, genericAdaptorBase, op, emitName, attr); } diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -1778,8 +1778,8 @@ tgfmt(attr.getConstBuilderTemplate(), &fctx, attr.getDefaultValue())); body << " {\n"; body << " ::mlir::Builder odsBuilder(getContext());\n"; - body << " ::mlir::Attribute attr = " << op.getGetterName(name) - << "Attr();\n"; + body << " ::mlir::Attribute attr = " + << op.getGetterNameAttr(namedAttr) << "Attr();\n"; body << " if(attr && (attr == " << defaultValue << "))\n"; body << " elidedAttrs.push_back(\"" << name << "\");\n"; body << " }\n"; @@ -1826,7 +1826,7 @@ const Operator &op, MethodBody &body) { if (auto *attr = dyn_cast(element)) { - body << op.getGetterName(attr->getVar()->name) << "Attr()"; + body << op.getGetterNameAttr(*attr->getVar()) << "Attr()"; } else if (isa(element)) { body << "getOperation()->getAttrDictionary()"; @@ -1931,7 +1931,7 @@ body << llvm::formatv(enumAttrBeginPrinterCode, (var->attr.isOptional() ? "*" : "") + - op.getGetterName(var->name), + op.getGetterNameAttr(*var), enumAttr.getSymbolToStringFnName()); // Get a string containing all of the cases that can't be represented with a @@ -2008,7 +2008,7 @@ }) .Case([&](AttributeVariable *element) { Attribute attr = element->getVar()->attr; - body << op.getGetterName(element->getVar()->name) << "Attr()"; + body << op.getGetterNameAttr(*element->getVar()) << "Attr()"; if (attr.isOptional()) return; // done if (attr.hasDefaultValue()) { @@ -2016,7 +2016,7 @@ // default value. FmtContext fctx; fctx.withBuilder("::mlir::OpBuilder((*this)->getContext())"); - body << " && " << op.getGetterName(element->getVar()->name) + body << " && " << op.getGetterNameAttr(*element->getVar()) << "Attr() != " << tgfmt(attr.getConstBuilderTemplate(), &fctx, attr.getDefaultValue()); @@ -2130,7 +2130,7 @@ for (VariableElement *var : vars) { TypeSwitch(var) .Case([&](AttributeVariable *attrEle) { - body << " || " << op.getGetterName(attrEle->getVar()->name) + body << " || " << op.getGetterNameAttr(*attrEle->getVar()) << "Attr()"; }) .Case([&](OperandVariable *ele) { @@ -2197,7 +2197,7 @@ // If we are formatting as a symbol name, handle it as a symbol name. if (shouldFormatSymbolNameAttr(var)) { - body << " _odsPrinter.printSymbolName(" << op.getGetterName(var->name) + body << " _odsPrinter.printSymbolName(" << op.getGetterNameAttr(*var) << "Attr().getValue());\n"; return; } @@ -2205,14 +2205,14 @@ // Elide the attribute type if it is buildable. if (attr->getTypeBuilder()) body << " _odsPrinter.printAttributeWithoutType(" - << op.getGetterName(var->name) << "Attr());\n"; + << op.getGetterNameAttr(*var) << "Attr());\n"; else if (attr->shouldBeQualified() || var->attr.getStorageType() == "::mlir::Attribute") - body << " _odsPrinter.printAttribute(" << op.getGetterName(var->name) + body << " _odsPrinter.printAttribute(" << op.getGetterNameAttr(*var) << "Attr());\n"; else body << "_odsPrinter.printStrippedAttrOrType(" - << op.getGetterName(var->name) << "Attr());\n"; + << op.getGetterNameAttr(*var) << "Attr());\n"; } else if (auto *operand = dyn_cast(element)) { if (operand->getVar()->isVariadicOfVariadic()) { body << " ::llvm::interleaveComma(" diff --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp --- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp +++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @@ -1365,7 +1365,7 @@ os << formatv(" {0}::{1} tblgen_attrVal = this->{2}() & " "static_cast<{0}::{1}>(1 << i);\n", enumAttr.getCppNamespace(), enumAttr.getEnumClassName(), - srcOp.getGetterName(namedAttr.name)); + srcOp.getGetterNameAttr(namedAttr)); os << formatv( " if (static_cast<{0}>(tblgen_attrVal) == 0) continue;\n", enumAttr.getUnderlyingType()); @@ -1373,7 +1373,7 @@ // For IntEnumAttr, we just need to query the value as a whole. os << " {\n"; os << formatv(" auto tblgen_attrVal = this->{0}();\n", - srcOp.getGetterName(namedAttr.name)); + srcOp.getGetterNameAttr(namedAttr)); } os << formatv(" auto tblgen_instance = {0}::{1}(tblgen_attrVal);\n", enumAttr.getCppNamespace(), avail.getQueryFnName());