Index: lib/Target/AArch64/AArch64AddressTypePromotion.cpp =================================================================== --- lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -198,16 +198,11 @@ // Current heuristic is: each step should be profitable. // Therefore we don't allow to increase the number of sext even if it may // be profitable later on. - if (isa(Inst) && isa(Inst->getOperand(1))) - return true; - - return false; + return isa(Inst) && isa(Inst->getOperand(1)); } static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { - if (isa(Inst) && OpIdx == 0) - return false; - return true; + return !isa(Inst) || OpIdx != 0; } bool Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -934,10 +934,7 @@ // If this is a type than can be sign or zero-extended to a basic operation // go ahead and accept it now. - if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) - return true; - - return false; + return VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16; } bool AArch64FastISel::isValueAvailable(const Value *V) const { @@ -945,10 +942,7 @@ return true; const auto *I = cast(V); - if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) - return true; - - return false; + return FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB; } bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { @@ -2059,9 +2053,7 @@ if (!computeAddress(I->getOperand(1), Addr, I->getOperand(0)->getType())) return false; - if (!emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I))) - return false; - return true; + return emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I)); } static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) { Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -130,9 +130,7 @@ // Note: currently hasFP() is always true for hasCalls(), but that's an // implementation detail of the current code, not a strict requirement, // so stay safe here and check both. - if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128) - return false; - return true; + return !MFI->hasCalls() && !hasFP(MF) && NumBytes <= 128; } /// hasFP - Return true if the specified function should have a dedicated frame @@ -526,11 +524,9 @@ if (MI->getOpcode() == AArch64::LDPXpost || MI->getOpcode() == AArch64::LDPDpost || MI->getOpcode() == AArch64::LDPXi || MI->getOpcode() == AArch64::LDPDi) { - if (!isCalleeSavedRegister(MI->getOperand(RtIdx).getReg(), CSRegs) || - !isCalleeSavedRegister(MI->getOperand(RtIdx + 1).getReg(), CSRegs) || - MI->getOperand(RtIdx + 2).getReg() != AArch64::SP) - return false; - return true; + return isCalleeSavedRegister(MI->getOperand(RtIdx).getReg(), CSRegs) && + isCalleeSavedRegister(MI->getOperand(RtIdx + 1).getReg(), CSRegs) && + MI->getOperand(RtIdx + 2).getReg() == AArch64::SP; } return false; Index: lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -312,9 +312,7 @@ bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const { // it hurts if the value is used at least twice, unless we are optimizing // for code size. - if (ForCodeSize || V.hasOneUse()) - return true; - return false; + return ForCodeSize || V.hasOneUse(); } /// SelectShiftedRegister - Select a "shifted register" operand. If the value @@ -735,10 +733,7 @@ if (ShiftVal != 0 && ShiftVal != LegalShiftVal) return false; - if (isWorthFolding(N)) - return true; - - return false; + return isWorthFolding(N); } bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size, Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1825,17 +1825,13 @@ static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { if (N->getOpcode() == ISD::SIGN_EXTEND) return true; - if (isExtendedBUILD_VECTOR(N, DAG, true)) - return true; - return false; + return isExtendedBUILD_VECTOR(N, DAG, true); } static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { if (N->getOpcode() == ISD::ZERO_EXTEND) return true; - if (isExtendedBUILD_VECTOR(N, DAG, false)) - return true; - return false; + return isExtendedBUILD_VECTOR(N, DAG, false); } static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { @@ -2408,9 +2404,7 @@ return false; if (getTargetMachine().Options.GuaranteedTailCallOpt) { - if (IsTailCallConvention(CalleeCC) && CCMatch) - return true; - return false; + return IsTailCallConvention(CalleeCC) && CCMatch; } // Externally-defined functions with weak linkage should not be @@ -6574,12 +6568,10 @@ const TargetOptions &Options = getTargetMachine().Options; EVT VT = getValueType(User->getOperand(0)->getType()); - if (isFMAFasterThanFMulAndFAdd(VT) && - isOperationLegalOrCustom(ISD::FMA, VT) && - (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)) - return false; - - return true; + return !(isFMAFasterThanFMulAndFAdd(VT) && + isOperationLegalOrCustom(ISD::FMA, VT) && + (Options.AllowFPOpFusion == FPOpFusion::Fast || + Options.UnsafeFPMath)); } // All 32-bit GPR operations implicitly zero the high-half of the corresponding @@ -6725,9 +6717,7 @@ // 12-bit optionally shifted immediates are legal for adds. bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { - if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)) - return true; - return false; + return (Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0); } // Integer comparisons are implemented with ADDS/SUBS, so the range of valid @@ -6776,19 +6766,15 @@ // 12-bit unsigned offset unsigned shift = Log2_64(NumBytes); - if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && - // Must be a multiple of NumBytes (NumBytes is a power of 2) - (Offset >> shift) << shift == Offset) - return true; - return false; + return NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && + // Must be a multiple of NumBytes (NumBytes is a power of 2) + (Offset >> shift) << shift == Offset; } // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 - if (!AM.Scale || AM.Scale == 1 || - (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes)) - return true; - return false; + return !AM.Scale || AM.Scale == 1 || + (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); } int AArch64TargetLowering::getScalingFactorCost(const AddrMode &AM, @@ -8331,10 +8317,8 @@ } case ISD::Constant: case ISD::TargetConstant: { - if (std::abs(cast(V.getNode())->getSExtValue()) < - 1LL << (width - 1)) - return true; - return false; + return std::abs(cast(V.getNode())->getSExtValue()) < + 1LL << (width - 1); } } @@ -8891,10 +8875,7 @@ // return instructions to help enable tail call optimizations for this // instruction. bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { - if (!CI->isTailCall()) - return false; - - return true; + return CI->isTailCall(); } bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2454,10 +2454,7 @@ return false; // Must only used by the user we combine with. - if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg())) - return false; - - return true; + return MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()); } /// hasPattern - return true when there is potentially a faster code sequence Index: lib/Target/AArch64/AArch64PromoteConstant.cpp =================================================================== --- lib/Target/AArch64/AArch64PromoteConstant.cpp +++ lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -285,10 +285,7 @@ // Do not mess with inline asm. const CallInst *CI = dyn_cast(Instr); - if (CI && isa(CI->getCalledValue())) - return false; - - return true; + return !CI || !isa(CI->getCalledValue()); } /// Check if the given Cst should be converted into Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -178,9 +178,7 @@ // If it's wrong, we'll materialize the constant and still get to the // object; it's just suboptimal. Negative offsets use the unscaled // load/store instructions, which have a 9-bit signed immediate. - if (MFI->getLocalFrameSize() < 256) - return false; - return true; + return MFI->getLocalFrameSize() >= 256; } return false; @@ -188,10 +186,7 @@ bool AArch64RegisterInfo::canRealignStack(const MachineFunction &MF) const { - if (MF.getFunction()->hasFnAttribute("no-realign-stack")) - return false; - - return true; + return !MF.getFunction()->hasFnAttribute("no-realign-stack"); } // FIXME: share this with other backends with identical implementation?