Index: lib/Target/AArch64/AArch64AddressTypePromotion.cpp =================================================================== --- lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -207,9 +207,7 @@ } static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { - if (isa(Inst) && OpIdx == 0) - return false; - return true; + return !(isa(Inst) && OpIdx == 0); } bool Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -946,10 +946,7 @@ return true; const auto *I = cast(V); - if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) - return true; - - return false; + return FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB; } bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -130,9 +130,7 @@ // Note: currently hasFP() is always true for hasCalls(), but that's an // implementation detail of the current code, not a strict requirement, // so stay safe here and check both. - if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128) - return false; - return true; + return !(MFI->hasCalls() || hasFP(MF) || NumBytes > 128); } /// hasFP - Return true if the specified function should have a dedicated frame Index: lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -328,9 +328,7 @@ bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const { // it hurts if the value is used at least twice, unless we are optimizing // for code size. - if (ForCodeSize || V.hasOneUse()) - return true; - return false; + return ForCodeSize || V.hasOneUse(); } /// SelectShiftedRegister - Select a "shifted register" operand. If the value @@ -797,10 +795,7 @@ if (ShiftVal != 0 && ShiftVal != LegalShiftVal) return false; - if (isWorthFolding(N)) - return true; - - return false; + return isWorthFolding(N); } bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size, Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2812,9 +2812,7 @@ return false; if (getTargetMachine().Options.GuaranteedTailCallOpt) { - if (IsTailCallConvention(CalleeCC) && CCMatch) - return true; - return false; + return IsTailCallConvention(CalleeCC) && CCMatch; } // Externally-defined functions with weak linkage should not be @@ -6974,12 +6972,10 @@ const DataLayout &DL = I->getModule()->getDataLayout(); EVT VT = getValueType(DL, User->getOperand(0)->getType()); - if (isFMAFasterThanFMulAndFAdd(VT) && - isOperationLegalOrCustom(ISD::FMA, VT) && - (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)) - return false; - - return true; + return !(isFMAFasterThanFMulAndFAdd(VT) && + isOperationLegalOrCustom(ISD::FMA, VT) && + (Options.AllowFPOpFusion == FPOpFusion::Fast || + Options.UnsafeFPMath)); } // All 32-bit GPR operations implicitly zero the high-half of the corresponding @@ -7280,9 +7276,7 @@ // 12-bit optionally shifted immediates are legal for adds. bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { - if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)) - return true; - return false; + return (Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0); } // Integer comparisons are implemented with ADDS/SUBS, so the range of valid @@ -7341,10 +7335,8 @@ // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 - if (!AM.Scale || AM.Scale == 1 || - (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes)) - return true; - return false; + return !AM.Scale || AM.Scale == 1 || + (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); } int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL, @@ -9309,10 +9301,8 @@ } case ISD::Constant: case ISD::TargetConstant: { - if (std::abs(cast(V.getNode())->getSExtValue()) < - 1LL << (width - 1)) - return true; - return false; + return std::abs(cast(V.getNode())->getSExtValue()) < + 1LL << (width - 1); } } @@ -9922,10 +9912,7 @@ // return instructions to help enable tail call optimizations for this // instruction. bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { - if (!CI->isTailCall()) - return false; - - return true; + return CI->isTailCall(); } bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, Index: lib/Target/AArch64/AArch64PromoteConstant.cpp =================================================================== --- lib/Target/AArch64/AArch64PromoteConstant.cpp +++ lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -285,10 +285,7 @@ // Do not mess with inline asm. const CallInst *CI = dyn_cast(Instr); - if (CI && isa(CI->getCalledValue())) - return false; - - return true; + return !(CI && isa(CI->getCalledValue())); } /// Check if the given Cst should be converted into Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -190,9 +190,7 @@ // If it's wrong, we'll materialize the constant and still get to the // object; it's just suboptimal. Negative offsets use the unscaled // load/store instructions, which have a 9-bit signed immediate. - if (MFI->getLocalFrameSize() < 256) - return false; - return true; + return MFI->getLocalFrameSize() >= 256; } return false;