diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -564,7 +564,7 @@ for (auto id : ids) { AffineForOp iv; if ((iv = getForInductionVarOwner(id)) && - llvm::is_contained(enclosingIVs, iv) == false) { + !llvm::is_contained(enclosingIVs, iv)) { cst.projectOut(id); } } diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -1014,11 +1014,7 @@ // If both iterators didn't reach the end, we have leftover dimentions which // implies that we have a mismatch in shape. - if (currSrcDim != srcShape.size() || currDstDim != dstShape.size()) { - return false; - } - - return true; + return !(currSrcDim != srcShape.size() || currDstDim != dstShape.size()); } namespace { diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -84,9 +84,7 @@ readOp.getContext()); // TODO: Support transpose once it is added to GPU dialect ops. // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1). - if (!map.isMinorIdentity() && map != broadcastInnerDim) - return false; - return true; + return !(!map.isMinorIdentity() && map != broadcastInnerDim); } // Return true if the transfer op can be converted to a MMA matrix store. diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp @@ -472,10 +472,7 @@ bool hasWrite = aliasesInPlaceWrite(opResult, aliasInfo, state) || aliasesInPlaceWrite(opOperand.get(), aliasInfo, state) || state.bufferizesToMemoryWrite(opOperand); - if (!hasWrite) - return false; - - return true; + return hasWrite; } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp @@ -425,12 +425,10 @@ // TODO: Support cases such as extract_slice(init_tensor). SmallVector opOperands = state.getAliasingOpOperand(opResult); - if (!llvm::all_of(opOperands, [&](OpOperand *operand) { - return aliasInfo.areEquivalentBufferizedValues(operand->get(), - opResult); - })) - return true; - return false; + return !llvm::all_of(opOperands, [&](OpOperand *operand) { + return aliasInfo.areEquivalentBufferizedValues(operand->get(), + opResult); + }); }); // Replace only if the reverse use-def chain ends at exactly one diff --git a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp @@ -543,14 +543,11 @@ if (op->hasTrait()) { auto &body = function_like_impl::getFunctionBody(op); return llvm::all_of(llvm::drop_begin(body, 1), [&](Block &block) { - if (llvm::any_of( - blockArgsToDetensor, [&](BlockArgument blockArgument) { - return blockArgument.getOwner() == &block && - !typeConverter.isLegal(blockArgument.getType()); - })) { - return false; - } - return true; + return !llvm::any_of( + blockArgsToDetensor, [&](BlockArgument blockArgument) { + return blockArgument.getOwner() == &block && + !typeConverter.isLegal(blockArgument.getType()); + }); }); } diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -428,10 +428,7 @@ auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace(); auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace(); - if (aMemSpace != bMemSpace) - return false; - - return true; + return aMemSpace == bMemSpace; } return false; diff --git a/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp b/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp --- a/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp +++ b/mlir/lib/Dialect/StandardOps/Transforms/FuncConversions.cpp @@ -146,10 +146,7 @@ // ReturnLike operations have to be legalized with their parent. For // return this is handled, for other ops they remain as is. - if (op->hasTrait()) - return true; - - return false; + return op->hasTrait(); } bool mlir::isNotBranchOpInterfaceOrReturnLikeOp(Operation *op) { diff --git a/mlir/lib/Dialect/Vector/VectorUtils.cpp b/mlir/lib/Dialect/Vector/VectorUtils.cpp --- a/mlir/lib/Dialect/Vector/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/VectorUtils.cpp @@ -302,11 +302,7 @@ // This could be useful information if we wanted to reshape at the level of // the vector type (but we would have to look at the compute and distinguish // between parallel, reduction and possibly other cases. - if (!ratio.hasValue()) { - return false; - } - - return true; + return ratio.hasValue(); } bool mlir::isDisjointTransferIndices(VectorTransferOpInterface transferA, diff --git a/mlir/lib/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Transforms/NormalizeMemRefs.cpp --- a/mlir/lib/Transforms/NormalizeMemRefs.cpp +++ b/mlir/lib/Transforms/NormalizeMemRefs.cpp @@ -92,13 +92,11 @@ /// are satisfied will the value become a candidate for replacement. /// TODO: Extend this for DimOps. static bool isMemRefNormalizable(Value::user_range opUsers) { - if (llvm::any_of(opUsers, [](Operation *op) { - if (op->hasTrait()) - return false; - return true; - })) - return false; - return true; + return !llvm::any_of(opUsers, [](Operation *op) { + if (op->hasTrait()) + return false; + return true; + }); } /// Set all the calling functions and the callees of the function as not diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp @@ -54,8 +54,7 @@ static bool isDependentLoadOrStoreOp(Operation *op, DenseMap &values) { if (auto loadOp = dyn_cast(op)) { - return values.count(loadOp.getMemRef()) > 0 && - values[loadOp.getMemRef()] == true; + return values.count(loadOp.getMemRef()) > 0 && values[loadOp.getMemRef()]; } if (auto storeOp = dyn_cast(op)) { return values.count(storeOp.getMemRef()) > 0; diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -1345,9 +1345,7 @@ } return WalkResult::advance(); }); - if (walkResult.wasInterrupted()) - return false; - return true; + return !walkResult.wasInterrupted(); } // Gathers all maximal sub-blocks of operations that do not themselves diff --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp --- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp +++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp @@ -71,7 +71,7 @@ double typeRangeMax = double(outputElementType.getStorageTypeMax() - outputElementType.getZeroPoint()) * outputElementType.getScale(); - bool narrowRange = outputElementType.getStorageTypeMin() == 1 ? true : false; + bool narrowRange = outputElementType.getStorageTypeMin() == 1; auto dstQConstType = RankedTensorType::get( outputType.getShape(), diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp --- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp +++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp @@ -391,9 +391,7 @@ type.getNumElements() % multiplicity != 0) return mlir::WalkResult::advance(); auto filterAlloc = [](Operation *op) { - if (isa(op)) - return false; - return true; + return !isa(op); }; auto dependentOps = getSlice(op, filterAlloc); // Create a loop and move instructions from the Op slice into the loop.