Index: llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -252,9 +252,10 @@ /// Emit a CSet for an integer compare. /// - /// \p DefReg is expected to be a 32-bit scalar register. + /// \p DefReg and \p SrcReg are expected to be 32-bit scalar registers. MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred, - MachineIRBuilder &MIRBuilder) const; + MachineIRBuilder &MIRBuilder, + Register SrcReg = AArch64::WZR) const; /// Emit a CSet for a FP compare. /// /// \p Dst is expected to be a 32-bit scalar register. @@ -2153,6 +2154,34 @@ I.setDesc(TII.get(TargetOpcode::COPY)); return true; } + + case TargetOpcode::G_ADD: { + // Check if this is being fed by a G_ICMP on either side. + // + // (cmp pred, x, y) + z + // + // In the above case, when the cmp is true, we increment z by 1. So, we can + // fold the add into the cset for the cmp by using cinc. + // + // FIXME: This would probably be a lot nicer in PostLegalizerLowering. + Register X = I.getOperand(1).getReg(); + Register CmpReg = I.getOperand(2).getReg(); + MachineInstr *Cmp = getOpcodeDef(TargetOpcode::G_ICMP, CmpReg, MRI); + if (!Cmp) { + std::swap(X, CmpReg); + Cmp = getOpcodeDef(TargetOpcode::G_ICMP, CmpReg, MRI); + if (!Cmp) + return false; + } + MachineIRBuilder MIRBuilder(I); + auto Pred = + static_cast(Cmp->getOperand(1).getPredicate()); + emitIntegerCompare(Cmp->getOperand(2), Cmp->getOperand(3), + Cmp->getOperand(1), MIRBuilder); + emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIRBuilder, X); + I.eraseFromParent(); + return true; + } default: return false; } @@ -4365,14 +4394,13 @@ MachineInstr * AArch64InstructionSelector::emitCSetForICMP(Register DefReg, unsigned Pred, - MachineIRBuilder &MIRBuilder) const { + MachineIRBuilder &MIRBuilder, + Register SrcReg) const { // CSINC increments the result when the predicate is false. Invert it. const AArch64CC::CondCode InvCC = changeICMPPredToAArch64CC( CmpInst::getInversePredicate((CmpInst::Predicate)Pred)); - auto I = - MIRBuilder - .buildInstr(AArch64::CSINCWr, {DefReg}, {Register(AArch64::WZR), Register(AArch64::WZR)}) - .addImm(InvCC); + auto I = MIRBuilder.buildInstr(AArch64::CSINCWr, {DefReg}, {SrcReg, SrcReg}) + .addImm(InvCC); constrainSelectedInstRegOperands(*I, TII, TRI, RBI); return &*I; } Index: llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir @@ -270,3 +270,61 @@ RET_ReallyLR implicit $w0 ... +--- +name: cmp_add_rhs +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $w0, $w1, $w2 + + ; The CSINC should use the add's RHS. + + ; CHECK-LABEL: name: cmp_add_rhs + ; CHECK: liveins: $w0, $w1, $w2 + ; CHECK: %cmp_lhs:gpr32 = COPY $w0 + ; CHECK: %cmp_rhs:gpr32 = COPY $w1 + ; CHECK: %add_rhs:gpr32 = COPY $w2 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv + ; CHECK: %add:gpr32 = CSINCWr %add_rhs, %add_rhs, 1, implicit $nzcv + ; CHECK: $w0 = COPY %add + ; CHECK: RET_ReallyLR implicit $w0 + %cmp_lhs:gpr(s32) = COPY $w0 + %cmp_rhs:gpr(s32) = COPY $w1 + %add_rhs:gpr(s32) = COPY $w2 + %cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s32), %cmp_rhs + %add:gpr(s32) = G_ADD %cmp, %add_rhs + $w0 = COPY %add(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: cmp_add_lhs +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $w0, $w1, $w2 + + ; The CSINC should use the add's LHS. + + ; CHECK-LABEL: name: cmp_add_lhs + ; CHECK: liveins: $w0, $w1, $w2 + ; CHECK: %cmp_lhs:gpr32 = COPY $w0 + ; CHECK: %cmp_rhs:gpr32 = COPY $w1 + ; CHECK: %add_lhs:gpr32 = COPY $w2 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv + ; CHECK: %add:gpr32 = CSINCWr %add_lhs, %add_lhs, 1, implicit $nzcv + ; CHECK: $w0 = COPY %add + ; CHECK: RET_ReallyLR implicit $w0 + %cmp_lhs:gpr(s32) = COPY $w0 + %cmp_rhs:gpr(s32) = COPY $w1 + %add_lhs:gpr(s32) = COPY $w2 + %cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s32), %cmp_rhs + %add:gpr(s32) = G_ADD %add_lhs, %cmp + $w0 = COPY %add(s32) + RET_ReallyLR implicit $w0