Index: include/llvm/Target/TargetInstrInfo.h =================================================================== --- include/llvm/Target/TargetInstrInfo.h +++ include/llvm/Target/TargetInstrInfo.h @@ -952,6 +952,7 @@ const MachineRegisterInfo *MRI) const { return false; } + virtual bool optimizeCondBranch(MachineInstr *MI) const { return false; } /// optimizeLoadInstr - Try to remove the load by folding it to a register /// operand at the use. We fold the load instructions if and only if the Index: lib/CodeGen/PeepholeOptimizer.cpp =================================================================== --- lib/CodeGen/PeepholeOptimizer.cpp +++ lib/CodeGen/PeepholeOptimizer.cpp @@ -134,6 +134,7 @@ bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, SmallPtrSetImpl &LocalMIs); bool optimizeSelect(MachineInstr *MI); + bool optimizeCondBranch(MachineInstr *MI); bool optimizeCopyOrBitcast(MachineInstr *MI); bool optimizeCoalescableCopy(MachineInstr *MI); bool optimizeUncoalescableCopy(MachineInstr *MI, @@ -499,6 +500,14 @@ return true; } +/// \brief Check if a simpler conditional branch can be +// generated +bool PeepholeOptimizer::optimizeCondBranch(MachineInstr *MI) { + if (!TII->optimizeCondBranch(MI)) + return false; + return true; +} + /// \brief Check if the registers defined by the pair (RegisterClass, SubReg) /// share the same register file. static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, @@ -1105,6 +1114,11 @@ continue; } + if (MI->isConditionalBranch() && optimizeCondBranch(MI)) { + Changed = true; + continue; + } + if (isCoalescableCopy(*MI) && optimizeCoalescableCopy(MI)) { // MI is just rewritten. Changed = true; Index: lib/Target/AArch64/AArch64InstrInfo.h =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.h +++ lib/Target/AArch64/AArch64InstrInfo.h @@ -165,6 +165,7 @@ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, int CmpValue, const MachineRegisterInfo *MRI) const override; + bool optimizeCondBranch(MachineInstr *MI) const; /// hasPattern - return true when there is potentially a faster code sequence /// for an instruction chain ending in . All potential patterns are /// listed Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -768,6 +768,41 @@ return NewOpc; } +/// True when condition code could be modified on the instruction +/// trace starting at from and ending at to. +static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To, + const bool CheckOnlyCCWrites, + const TargetRegisterInfo *TRI) { + // We iterate backward, starting from the instruction before CmpInstr and + // stop when reaching the definition of the source register or done with the + // basic block, to check whether NZCV is used or modified in between. + MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin(); + + // Early exit if To is at the beginning of the BB. + if (I == B) + return true; + + // Check whether the definition of SrcReg is in the same basic block as + // Compare. If not, assume the condition code gets modified on some path. + if (To->getParent() != From->getParent()) + return true; + + // Check that NZCV isn't set on the trace. + for (--I; I != E; --I) { + const MachineInstr &Instr = *I; + + if (Instr.modifiesRegister(AArch64::NZCV, TRI) || + (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI))) + // This instruction modifies or uses NZCV after the one we want to + // change. + return true; + if (I == B) + // We currently don't allow the instruction trace to cross basic + // block boundaries + return true; + } + return false; +} /// optimizeCompareInstr - Convert the instruction supplying the argument to the /// comparison into one that sets the zero bit in the flags register. bool AArch64InstrInfo::optimizeCompareInstr( @@ -806,36 +841,10 @@ if (!MI) return false; - // We iterate backward, starting from the instruction before CmpInstr and - // stop when reaching the definition of the source register or done with the - // basic block, to check whether NZCV is used or modified in between. - MachineBasicBlock::iterator I = CmpInstr, E = MI, - B = CmpInstr->getParent()->begin(); - - // Early exit if CmpInstr is at the beginning of the BB. - if (I == B) - return false; - - // Check whether the definition of SrcReg is in the same basic block as - // Compare. If not, we can't optimize away the Compare. - if (MI->getParent() != CmpInstr->getParent()) - return false; - - // Check that NZCV isn't set between the comparison instruction and the one we - // want to change. + bool CheckOnlyCCWrites = false; const TargetRegisterInfo *TRI = &getRegisterInfo(); - for (--I; I != E; --I) { - const MachineInstr &Instr = *I; - - if (Instr.modifiesRegister(AArch64::NZCV, TRI) || - Instr.readsRegister(AArch64::NZCV, TRI)) - // This instruction modifies or uses NZCV after the one we want to - // change. We can't do this transformation. - return false; - if (I == B) - // The 'and' is below the comparison instruction. - return false; - } + if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI)) + return false; unsigned NewOpc = MI->getOpcode(); switch (MI->getOpcode()) { @@ -2831,3 +2840,116 @@ return; } + +/// \brief Replace csincr-branch sequence by simple conditional branch +/// +/// Examples: +/// 1. +/// csinc w9, wzr, wzr, +/// tbnz w9, #0, 0x44 +/// to +/// b. +/// +/// 2. +/// csinc w9, wzr, wzr, +/// tbz w9, #0, 0x44 +/// to +/// b. +/// +/// \param MI Conditional Branch +/// \return True when the simple conditional branch is generated +/// +bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const { + // negative branch? + bool is_negate = false; + // test and branch? + bool has_bit_test = false; + // target BB operand in MI + unsigned tbb_operand = 0; + // CC operand in MI + unsigned cc_operand = 0; + switch (MI->getOpcode()) { + default: + llvm_unreachable("Unknown branch instruction?"); + case AArch64::Bcc: + return false; + case AArch64::CBZW: + case AArch64::CBZX: + tbb_operand = 1; + cc_operand = 2; + + break; + case AArch64::CBNZW: + case AArch64::CBNZX: + tbb_operand = 1; + cc_operand = 2; + is_negate = true; + break; + case AArch64::TBZW: + case AArch64::TBZX: + tbb_operand = 2; + cc_operand = 3; + has_bit_test = true; + break; + case AArch64::TBNZW: + case AArch64::TBNZX: + tbb_operand = 2; + cc_operand = 3; + is_negate = true; + has_bit_test = true; + break; + } + assert(cc_operand == 2 || cc_operand == 3 && "Invalid CC"); + assert(tbb_operand == 1 || cc_operand == 2 && "Invalid TBB"); + // So we increment a zero register and test for bits other + // than bit 0? Conservatively bail out in case the verifier + // missed this case. + if (has_bit_test && MI->getOperand(1).getImm()) + return false; + + // Find Definition. + assert(MI->getParent() && "Incomplete machine instruciton\n"); + MachineBasicBlock *MBB = MI->getParent(); + assert(MBB && "Can't get MachineBasicBlock here"); + MachineFunction *MF = MBB->getParent(); + assert(MF && "Can't get MachineFunction here"); + MachineRegisterInfo *MRI = &MF->getRegInfo(); + assert(MRI && "Can't get MachineRegisterInfo here"); + unsigned VReg = MI->getOperand(0).getReg(); + if (!TargetRegisterInfo::isVirtualRegister(VReg)) + return false; + + MachineInstr *DefMI = MRI->getVRegDef(VReg); + + // Look for CSINC + if (!(DefMI->getOpcode() == AArch64::CSINCWr && + DefMI->getOperand(1).getReg() == AArch64::WZR && + DefMI->getOperand(2).getReg() == AArch64::WZR) && + !(DefMI->getOpcode() == AArch64::CSINCXr && + DefMI->getOperand(1).getReg() == AArch64::XZR && + DefMI->getOperand(2).getReg() == AArch64::XZR)) + return false; + + if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1) + return false; + + AArch64CC::CondCode CC = + (AArch64CC::CondCode)DefMI->getOperand(cc_operand).getImm(); + bool CheckOnlyCCWrites = true; + // Convert when the condition code is not modified between + // the CSINC and the branch. The CC may be used by other + // instructions in between. + if (!modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, + &getRegisterInfo())) { + MachineBasicBlock &MBB = *MI->getParent(); + MachineBasicBlock *TBB = MI->getOperand(tbb_operand).getMBB(); + DebugLoc DL = MI->getDebugLoc(); + if (is_negate) + CC = AArch64CC::getInvertedCondCode(CC); + BuildMI(MBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB); + DefMI->eraseFromParent(); + MI->eraseFromParent(); + return true; + } + return false; +} Index: test/CodeGen/AArch64/arm64-bcc.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/arm64-bcc.ll @@ -0,0 +1,26 @@ +; RUN: llc < %s -mtriple=arm64-apple-darwint | FileCheck %s +; Checks for conditional branch b.vs + +; Function Attrs: nounwind +define i32 @add(i32, i32) { +entry: + %2 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %0, i32 %1) + %3 = extractvalue { i32, i1 } %2, 1 + br i1 %3, label %6, label %4 + +;