Index: lib/Target/ARM/A15SDOptimizer.cpp =================================================================== --- lib/Target/ARM/A15SDOptimizer.cpp +++ lib/Target/ARM/A15SDOptimizer.cpp @@ -341,10 +341,7 @@ &ARM::SPRRegClass)) return true; - if (MI->isRegSequence() && usesRegClass(MI->getOperand(1), &ARM::SPRRegClass)) - return true; - - return false; + return MI->isRegSequence() && usesRegClass(MI->getOperand(1), &ARM::SPRRegClass); } // Looks through full copies to get the instruction that defines the input Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1631,10 +1631,7 @@ return false; // FIXME: overly conservative? // Four loads in a row should be sufficient. - if (NumLoads >= 3) - return false; - - return true; + return NumLoads < 3; } bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, @@ -1674,10 +1671,7 @@ // Calls don't actually change the stack pointer, even if they have imp-defs. // No ARM calling conventions change the stack pointer. (X86 calling // conventions sometimes do). - if (!MI->isCall() && MI->definesRegister(ARM::SP)) - return true; - - return false; + return !MI->isCall() && MI->definesRegister(ARM::SP); } bool ARMBaseInstrInfo:: @@ -2363,14 +2357,12 @@ OI->getOperand(2).getReg() == SrcReg))) return true; - if ((CmpI->getOpcode() == ARM::CMPri || + return (CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) && (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) && OI->getOperand(1).getReg() == SrcReg && - OI->getOperand(2).getImm() == ImmValue) - return true; - return false; + OI->getOperand(2).getImm() == ImmValue; } /// optimizeCompareInstr - Convert the instruction supplying the argument to the @@ -4547,12 +4539,9 @@ unsigned ShOpVal = MI->getOperand(3).getImm(); unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. - if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || + return (ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || ((ShImm == 1 || ShImm == 2) && - ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) - return true; - - return false; + ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl); } bool ARMBaseInstrInfo::getRegSequenceLikeInputs( Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -322,9 +322,7 @@ // space, so it's all more likely to be within range of the frame pointer. // If it's wrong, the scavenger will still enable access to work, it just // won't be optimal. - if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128) - return false; - return true; + return !(AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128); } return false; @@ -679,10 +677,7 @@ Offset = -Offset; unsigned Mask = (1 << NumBits) - 1; - if ((unsigned)Offset <= Mask * Scale) - return true; - - return false; + return (unsigned)Offset <= Mask * Scale; } void Index: lib/Target/ARM/ARMFastISel.cpp =================================================================== --- lib/Target/ARM/ARMFastISel.cpp +++ lib/Target/ARM/ARMFastISel.cpp @@ -748,10 +748,7 @@ // If this is a type than can be sign or zero-extended to a basic operation // go ahead and accept it now. - if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) - return true; - - return false; + return VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16; } // Computes the address to get to an object. @@ -1195,9 +1192,7 @@ if (!ARMComputeAddress(I->getOperand(1), Addr)) return false; - if (!ARMEmitStore(VT, SrcReg, Addr, cast(I)->getAlignment())) - return false; - return true; + return ARMEmitStore(VT, SrcReg, Addr, cast(I)->getAlignment()); } static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { Index: lib/Target/ARM/ARMFrameLowering.cpp =================================================================== --- lib/Target/ARM/ARMFrameLowering.cpp +++ lib/Target/ARM/ARMFrameLowering.cpp @@ -109,14 +109,11 @@ return false; return true; } - if ((MI->getOpcode() == ARM::LDR_POST_IMM || + return (MI->getOpcode() == ARM::LDR_POST_IMM || MI->getOpcode() == ARM::LDR_POST_REG || MI->getOpcode() == ARM::t2LDR_POST) && isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) && - MI->getOperand(1).getReg() == ARM::SP) - return true; - - return false; + MI->getOperand(1).getReg() == ARM::SP; } static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB, Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -4939,10 +4939,7 @@ } // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of @@ -4966,10 +4963,7 @@ } // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } static bool isVZIPMask(ArrayRef M, EVT VT, unsigned &WhichResult) { @@ -4988,10 +4982,7 @@ } // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of @@ -5013,10 +5004,7 @@ } // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } /// \return true if this is a reverse operation on an vector. @@ -5824,9 +5812,7 @@ static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) return true; - if (isExtendedBUILD_VECTOR(N, DAG, true)) - return true; - return false; + return isExtendedBUILD_VECTOR(N, DAG, true); } /// isZeroExtended - Check if a node is a vector value that is zero-extended @@ -5834,9 +5820,7 @@ static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) return true; - if (isExtendedBUILD_VECTOR(N, DAG, false)) - return true; - return false; + return isExtendedBUILD_VECTOR(N, DAG, false); } static EVT getExtensionTo64Bits(const EVT &OrigVT) { @@ -10068,11 +10052,8 @@ return true; SDNode *U = *ExtVal->use_begin(); - if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || - U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) - return false; - - return true; + return !(U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || + U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL); } bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { @@ -11088,9 +11069,7 @@ assert(Ty->isIntegerTy()); unsigned Bits = Ty->getPrimitiveSizeInBits(); - if (Bits == 0 || Bits > 32) - return false; - return true; + return !(Bits == 0 || Bits > 32); } bool ARMTargetLowering::hasLoadLinkedStoreConditional() const { return true; } Index: lib/Target/ARM/ARMSubtarget.cpp =================================================================== --- lib/Target/ARM/ARMSubtarget.cpp +++ lib/Target/ARM/ARMSubtarget.cpp @@ -291,9 +291,7 @@ if (!isTargetMachO()) { // Extra load is needed for all externally visible. - if (GV->hasLocalLinkage() || GV->hasHiddenVisibility()) - return false; - return true; + return !(GV->hasLocalLinkage() || GV->hasHiddenVisibility()); } else { if (RelocM == Reloc::PIC_) { // If this is a strong reference to a definition, it is definitely not @@ -308,11 +306,7 @@ // If symbol visibility is hidden, we have a stub for common symbol // references and external declarations. - if (isDecl || GV->hasCommonLinkage()) - // Hidden $non_lazy_ptr reference. - return true; - - return false; + return isDecl || GV->hasCommonLinkage(); } else { // If this is a strong reference to a definition, it is definitely not // through a stub. Index: lib/Target/ARM/AsmParser/ARMAsmParser.cpp =================================================================== --- lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -1260,22 +1260,16 @@ Val == INT32_MIN; } bool isMemTBB() const { - if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || - Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) - return false; - return true; + return !(!isMem() || !Memory.OffsetRegNum || Memory.isNegative || + Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0); } bool isMemTBH() const { - if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || + return !(!isMem() || !Memory.OffsetRegNum || Memory.isNegative || Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || - Memory.Alignment != 0 ) - return false; - return true; + Memory.Alignment != 0); } bool isMemRegOffset() const { - if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) - return false; - return true; + return !(!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0); } bool isT2MemRegOffset() const { if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || @@ -1284,9 +1278,7 @@ // Only lsl #{0, 1, 2, 3} allowed. if (Memory.ShiftType == ARM_AM::no_shift) return true; - if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) - return false; - return true; + return !(Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3); } bool isMemThumbRR() const { // Thumb reg+reg addressing is simple. Just two registers, a base and @@ -5574,17 +5566,14 @@ // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't // right, this will result in better diagnostics (which operand is off) // anyway. - if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && + return isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && (Operands.size() == 5 || Operands.size() == 6) && static_cast(*Operands[3]).isReg() && static_cast(*Operands[3]).getReg() == ARM::SP && static_cast(*Operands[1]).getReg() == 0 && (static_cast(*Operands[4]).isImm() || (Operands.size() == 6 && - static_cast(*Operands[5]).isImm()))) - return true; - - return false; + static_cast(*Operands[5]).isImm())); } bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, Index: lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp =================================================================== --- lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -175,9 +175,7 @@ } bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const { - if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode()) - return true; - return false; + return getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode(); } bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, Index: lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp =================================================================== --- lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp +++ lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -338,9 +338,7 @@ Value -= Writer->getSectionAddress(Fragment.getParent()); // If the resultant value would be out of range for an internal relocation, // use an external instead. - if (Value > Range || Value < -(Range + 1)) - return true; - return false; + return Value > Range || Value < -(Range + 1); } void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, Index: lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- lib/Target/ARM/Thumb2SizeReduction.cpp +++ lib/Target/ARM/Thumb2SizeReduction.cpp @@ -902,11 +902,8 @@ return true; // Try to transform to a 16-bit non-two-address instruction. - if (Entry.NarrowOpc1 && - ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) - return true; - - return false; + return Entry.NarrowOpc1 && + ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); } bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {