Index: lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp =================================================================== --- lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -64,12 +64,12 @@ if (is16BitMode(STI) && BaseReg.getReg() == 0 && Disp.isImm() && Disp.getImm() < 0x10000) return true; - if ((BaseReg.getReg() != 0 && - X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) || - (IndexReg.getReg() != 0 && - X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg()))) - return true; - return false; + return (BaseReg.getReg() != 0 && + X86MCRegisterClasses[X86::GR16RegClassID].contains( + BaseReg.getReg())) || + (IndexReg.getReg() != 0 && + X86MCRegisterClasses[X86::GR16RegClassID].contains( + IndexReg.getReg())); } unsigned GetX86RegNum(const MCOperand &MO) const { @@ -226,12 +226,12 @@ const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); - if ((BaseReg.getReg() != 0 && - X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) || - (IndexReg.getReg() != 0 && - X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg()))) - return true; - return false; + return (BaseReg.getReg() != 0 && + X86MCRegisterClasses[X86::GR32RegClassID].contains( + BaseReg.getReg())) || + (IndexReg.getReg() != 0 && + X86MCRegisterClasses[X86::GR32RegClassID].contains( + IndexReg.getReg())); } /// Is64BitMemOperand - Return true if the specified instruction has Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -2075,10 +2075,7 @@ // Fall-back to pseudo conditional move instructions, which will be later // converted to control-flow. - if (X86FastEmitPseudoSelect(RetVT, I)) - return true; - - return false; + return X86FastEmitPseudoSelect(RetVT, I); } bool X86FastISel::X86SelectSIToFP(const Instruction *I) { @@ -2425,8 +2422,7 @@ // Grab the frame index. X86AddressMode AM; if (!X86SelectAddress(Slot, AM)) return false; - if (!X86FastEmitStore(PtrTy, Op1, AM)) return false; - return true; + return X86FastEmitStore(PtrTy, Op1, AM); } case Intrinsic::dbg_declare: { const DbgDeclareInst *DI = cast(II); Index: lib/Target/X86/X86ISelDAGToDAG.cpp =================================================================== --- lib/Target/X86/X86ISelDAGToDAG.cpp +++ lib/Target/X86/X86ISelDAGToDAG.cpp @@ -443,11 +443,9 @@ return false; if (Chain.getOperand(0).getNode() == Callee.getNode()) return true; - if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && - Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && - Callee.getValue(1).hasOneUse()) - return true; - return false; + return Chain.getOperand(0).getOpcode() == ISD::TokenFactor && + Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && + Callee.getValue(1).hasOneUse(); } void X86DAGToDAGISel::PreprocessISelDAG() { @@ -1425,9 +1423,7 @@ IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { LoadSDNode *LD = cast(PatternNodeWithChain); - if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) - return false; - return true; + return SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment); } } @@ -2059,10 +2055,7 @@ InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps); } - if (!ChainCheck) - return false; - - return true; + return ChainCheck; } /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -2228,10 +2228,7 @@ CallSite CS(CI); CallingConv::ID CalleeCC = CS.getCallingConv(); - if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) - return false; - - return true; + return IsTailCallConvention(CalleeCC) || IsCCallConvention(CalleeCC); } /// Return true if the function is being made into @@ -3357,9 +3354,7 @@ return false; if (DAG.getTarget().Options.GuaranteedTailCallOpt) { - if (IsTailCallConvention(CalleeCC) && CCMatch) - return true; - return false; + return IsTailCallConvention(CalleeCC) && CCMatch; } // Look for obvious safe cases to perform tail call optimization that do not @@ -3649,10 +3644,7 @@ // For kernel code model we know that all object resist in the negative half // of 32bits address space. We may not accept negative offsets, since they may // be just off and we may accept pretty large positive ones. - if (M == CodeModel::Kernel && Offset >= 0) - return true; - - return false; + return M == CodeModel::Kernel && Offset >= 0; } /// isCalleePop - Determines whether the callee is required to pop its @@ -3830,9 +3822,7 @@ assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); - if (BitSize == 0 || BitSize > 64) - return false; - return true; + return BitSize != 0 && BitSize <= 64; } bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, @@ -7633,10 +7623,7 @@ // each requiring a single input. if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4)) return false; - if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4)) - return false; - - return true; + return Mask[2] == -1 || Mask[3] == -1 || (Mask[2] < 4) == (Mask[3] < 4); } /// \brief Lower a vector shuffle using the SHUFPS instruction. @@ -13567,10 +13554,7 @@ Opc == X86ISD::AND)) return true; - if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) - return true; - - return false; + return Op.getResNo() == 2 && Opc == X86ISD::UMUL; } static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -2497,11 +2497,8 @@ isKill = Src.isKill(); isUndef = Src.isUndef(); - if (TargetRegisterInfo::isVirtualRegister(NewSrc) && - !MF.getRegInfo().constrainRegClass(NewSrc, RC)) - return false; - - return true; + return !TargetRegisterInfo::isVirtualRegister(NewSrc) || + MF.getRegInfo().constrainRegClass(NewSrc, RC); } // This is for an LEA64_32r and incoming registers are 32-bit. One way or @@ -4143,24 +4140,22 @@ OI->getOperand(2).getReg() == SrcReg))) return true; - if (((FlagI->getOpcode() == X86::CMP64ri32 && - OI->getOpcode() == X86::SUB64ri32) || - (FlagI->getOpcode() == X86::CMP64ri8 && - OI->getOpcode() == X86::SUB64ri8) || - (FlagI->getOpcode() == X86::CMP32ri && - OI->getOpcode() == X86::SUB32ri) || - (FlagI->getOpcode() == X86::CMP32ri8 && - OI->getOpcode() == X86::SUB32ri8) || - (FlagI->getOpcode() == X86::CMP16ri && - OI->getOpcode() == X86::SUB16ri) || - (FlagI->getOpcode() == X86::CMP16ri8 && - OI->getOpcode() == X86::SUB16ri8) || - (FlagI->getOpcode() == X86::CMP8ri && - OI->getOpcode() == X86::SUB8ri)) && - OI->getOperand(1).getReg() == SrcReg && - OI->getOperand(2).getImm() == ImmValue) - return true; - return false; + return ((FlagI->getOpcode() == X86::CMP64ri32 && + OI->getOpcode() == X86::SUB64ri32) || + (FlagI->getOpcode() == X86::CMP64ri8 && + OI->getOpcode() == X86::SUB64ri8) || + (FlagI->getOpcode() == X86::CMP32ri && + OI->getOpcode() == X86::SUB32ri) || + (FlagI->getOpcode() == X86::CMP32ri8 && + OI->getOpcode() == X86::SUB32ri8) || + (FlagI->getOpcode() == X86::CMP16ri && + OI->getOpcode() == X86::SUB16ri) || + (FlagI->getOpcode() == X86::CMP16ri8 && + OI->getOpcode() == X86::SUB16ri8) || + (FlagI->getOpcode() == X86::CMP8ri && + OI->getOpcode() == X86::SUB8ri)) && + OI->getOperand(1).getReg() == SrcReg && + OI->getOperand(2).getImm() == ImmValue; } /// Check whether the definition can be converted Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -1121,9 +1121,7 @@ // Todo: AVX512 allows gather/scatter, works with strided and random as well if ((DataWidth < 32) || (Consecutive == 0)) return false; - if (ST->hasAVX512() || ST->hasAVX2()) - return true; - return false; + return ST->hasAVX512() || ST->hasAVX2(); } bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {