Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -4564,12 +4564,9 @@ unsigned ShOpVal = MI->getOperand(3).getImm(); unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. - if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || - ((ShImm == 1 || ShImm == 2) && - ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) - return true; - - return false; + return (ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || + ((ShImm == 1 || ShImm == 2) && + ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl); } bool ARMBaseInstrInfo::getRegSequenceLikeInputs( Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -323,9 +323,7 @@ // space, so it's all more likely to be within range of the frame pointer. // If it's wrong, the scavenger will still enable access to work, it just // won't be optimal. - if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128) - return false; - return true; + return !(AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128); } return false; @@ -669,10 +667,7 @@ Offset = -Offset; unsigned Mask = (1 << NumBits) - 1; - if ((unsigned)Offset <= Mask * Scale) - return true; - - return false; + return (unsigned)Offset <= Mask * Scale; } void Index: lib/Target/ARM/ARMFrameLowering.cpp =================================================================== --- lib/Target/ARM/ARMFrameLowering.cpp +++ lib/Target/ARM/ARMFrameLowering.cpp @@ -109,14 +109,11 @@ return false; return true; } - if ((MI->getOpcode() == ARM::LDR_POST_IMM || - MI->getOpcode() == ARM::LDR_POST_REG || - MI->getOpcode() == ARM::t2LDR_POST) && - isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) && - MI->getOperand(1).getReg() == ARM::SP) - return true; - - return false; + return (MI->getOpcode() == ARM::LDR_POST_IMM || + MI->getOpcode() == ARM::LDR_POST_REG || + MI->getOpcode() == ARM::t2LDR_POST) && + isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) && + MI->getOperand(1).getReg() == ARM::SP; } static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB, Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -2430,10 +2430,7 @@ auto Attr = CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); - if (!CI->isTailCall() || Attr.getValueAsString() == "true") - return false; - - return true; + return !(!CI->isTailCall() || Attr.getValueAsString() == "true"); } // Trying to write a 64 bit value so need to split into two 32 bit values first, @@ -5127,10 +5124,7 @@ WhichResult = 0; // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of @@ -5163,10 +5157,7 @@ WhichResult = 0; // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } // Checks whether the shuffle mask represents a vector zip (VZIP) by checking @@ -5201,10 +5192,7 @@ WhichResult = 0; // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of @@ -5234,10 +5222,7 @@ WhichResult = 0; // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. - if (VT.is64BitVector() && EltSz == 32) - return false; - - return true; + return !(VT.is64BitVector() && EltSz == 32); } /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), @@ -10461,11 +10446,8 @@ return true; SDNode *U = *ExtVal->use_begin(); - if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || - U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) - return false; - - return true; + return !(U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || + U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL); } bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { @@ -11549,9 +11531,7 @@ assert(Ty->isIntegerTy()); unsigned Bits = Ty->getPrimitiveSizeInBits(); - if (Bits == 0 || Bits > 32) - return false; - return true; + return !(Bits == 0 || Bits > 32); } Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, Index: lib/Target/ARM/ARMSubtarget.cpp =================================================================== --- lib/Target/ARM/ARMSubtarget.cpp +++ lib/Target/ARM/ARMSubtarget.cpp @@ -255,9 +255,7 @@ if (!isTargetMachO()) { // Extra load is needed for all externally visible. - if (GV->hasLocalLinkage() || GV->hasHiddenVisibility()) - return false; - return true; + return !(GV->hasLocalLinkage() || GV->hasHiddenVisibility()); } else { // If this is a strong reference to a definition, it is definitely not // through a stub.