diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -225,6 +225,9 @@ MachineIRBuilder &MIRBuilder) const; MachineInstr *emitTST(MachineOperand &LHS, MachineOperand &RHS, MachineIRBuilder &MIRBuilder) const; + MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS, + AArch64CC::CondCode CC, + MachineIRBuilder &MIRBuilder) const; MachineInstr *emitExtractVectorElt(Optional DstReg, const RegisterBank &DstRB, LLT ScalarTy, Register VecReg, unsigned LaneIdx, @@ -983,17 +986,107 @@ return GenericOpc; } -static unsigned selectSelectOpc(MachineInstr &I, MachineRegisterInfo &MRI, - const RegisterBankInfo &RBI) { - const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); - bool IsFP = (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() != - AArch64::GPRRegBankID); - LLT Ty = MRI.getType(I.getOperand(0).getReg()); - if (Ty == LLT::scalar(32)) - return IsFP ? AArch64::FCSELSrrr : AArch64::CSELWr; - else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) - return IsFP ? AArch64::FCSELDrrr : AArch64::CSELXr; - return 0; +MachineInstr * +AArch64InstructionSelector::emitSelect(Register Dst, Register True, + Register False, AArch64CC::CondCode CC, + MachineIRBuilder &MIB) const { + MachineRegisterInfo &MRI = *MIB.getMRI(); + assert(RBI.getRegBank(False, MRI, TRI)->getID() == + RBI.getRegBank(True, MRI, TRI)->getID() && + "Expected both select operands to have the same regbank?"); + LLT Ty = MRI.getType(True); + if (Ty.isVector()) + return nullptr; + const unsigned Size = Ty.getSizeInBits(); + assert((Size == 32 || Size == 64) && + "Expected 32 bit or 64 bit select only?"); + const bool Is32Bit = Size == 32; + if (RBI.getRegBank(True, MRI, TRI)->getID() != AArch64::GPRRegBankID) { + unsigned Opc = Is32Bit ? AArch64::FCSELSrrr : AArch64::FCSELDrrr; + auto FCSel = MIB.buildInstr(Opc, {Dst}, {True, False}).addImm(CC); + constrainSelectedInstRegOperands(*FCSel, TII, TRI, RBI); + return &*FCSel; + } + + // By default, we'll try and emit a CSEL. + unsigned Opc = Is32Bit ? AArch64::CSELWr : AArch64::CSELXr; + + // Helper lambda which tries to use CSINC/CSINV for the instruction when its + // true/false values are constants. + // FIXME: All of these patterns already exist in tablegen. We should be + // able to import these. + auto TryOptSelectCst = [&Opc, &True, &False, &CC, Is32Bit, &MRI]() { + auto TrueCst = getConstantVRegValWithLookThrough(True, MRI); + auto FalseCst = getConstantVRegValWithLookThrough(False, MRI); + if (!TrueCst && !FalseCst) + return false; + + Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR; + if (TrueCst && FalseCst) { + auto T = TrueCst->Value; + auto F = FalseCst->Value; + + if (T == 0 && F == 1) { + // G_SELECT cc, 0, 1 -> CSINC zreg, zreg, cc + Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr; + True = ZReg; + False = ZReg; + return true; + } + + if (T == 0 && F == -1) { + // G_SELECT cc 0, -1 -> CSINV zreg, zreg cc + Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr; + True = ZReg; + False = ZReg; + return true; + } + } + + if (TrueCst) { + auto T = TrueCst->Value; + if (T == 1) { + // G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc + Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr; + True = False; + False = ZReg; + CC = AArch64CC::getInvertedCondCode(CC); + return true; + } + + if (T == -1) { + // G_SELECT cc, -1, f -> CSINV f, zreg, inv_cc + Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr; + True = False; + False = ZReg; + CC = AArch64CC::getInvertedCondCode(CC); + return true; + } + } + + if (FalseCst) { + auto F = FalseCst->Value; + if (F == 1) { + // G_SELECT cc, t, 1 -> CSINC t, zreg, cc + Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr; + False = ZReg; + return true; + } + + if (F == -1) { + // G_SELECT cc, t, -1 -> CSINC t, zreg, cc + Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr; + False = ZReg; + return true; + } + } + return false; + }; + + TryOptSelectCst(); + auto SelectInst = MIB.buildInstr(Opc, {Dst}, {True, False}).addImm(CC); + constrainSelectedInstRegOperands(*SelectInst, TII, TRI, RBI); + return &*SelectInst; } /// Returns true if \p P is an unsigned integer comparison predicate. @@ -2831,25 +2924,15 @@ if (tryOptSelect(I)) return true; - Register CSelOpc = selectSelectOpc(I, MRI, RBI); // Make sure to use an unused vreg instead of wzr, so that the peephole // optimizations will be able to optimize these. + MachineIRBuilder MIB(I); Register DeadVReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); - MachineInstr &TstMI = - *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri)) - .addDef(DeadVReg) - .addUse(CondReg) - .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); - - MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc)) - .addDef(I.getOperand(0).getReg()) - .addUse(TReg) - .addUse(FReg) - .addImm(AArch64CC::NE); - - constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI); - constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI); - + auto TstMI = MIB.buildInstr(AArch64::ANDSWri, {DeadVReg}, {CondReg}) + .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); + constrainSelectedInstRegOperands(*TstMI, TII, TRI, RBI); + if (!emitSelect(I.getOperand(0).getReg(), TReg, FReg, AArch64CC::NE, MIB)) + return false; I.eraseFromParent(); return true; } @@ -4132,9 +4215,6 @@ bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const { MachineIRBuilder MIB(I); MachineRegisterInfo &MRI = *MIB.getMRI(); - const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); - Register SrcReg1 = I.getOperand(2).getReg(); - Register SrcReg2 = I.getOperand(3).getReg(); // We want to recognize this pattern: // // $z = G_FCMP pred, $x, $y @@ -4218,37 +4298,8 @@ } // Emit the select. - // We may also be able to emit a CSINC if the RHS operand is a 1. - const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg1, MRI, TRI); - auto ValAndVReg = - getConstantVRegValWithLookThrough(SrcReg2, MRI); - - if (SrcRB.getID() == AArch64::GPRRegBankID && ValAndVReg && - ValAndVReg->Value == 1) { - unsigned Size = MRI.getType(SrcReg1).getSizeInBits(); - unsigned Opc = 0; - Register Zero; - if (Size == 64) { - Opc = AArch64::CSINCXr; - Zero = AArch64::XZR; - } else { - Opc = AArch64::CSINCWr; - Zero = AArch64::WZR; - } - auto CSINC = - MIB.buildInstr(Opc, {I.getOperand(0).getReg()}, {SrcReg1, Zero}) - .addImm(CondCode); - constrainSelectedInstRegOperands(*CSINC, TII, TRI, RBI); - I.eraseFromParent(); - return true; - } - - unsigned CSelOpc = selectSelectOpc(I, MRI, RBI); - auto CSel = - MIB.buildInstr(CSelOpc, {I.getOperand(0).getReg()}, - {I.getOperand(2).getReg(), I.getOperand(3).getReg()}) - .addImm(CondCode); - constrainSelectedInstRegOperands(*CSel, TII, TRI, RBI); + emitSelect(I.getOperand(0).getReg(), I.getOperand(2).getReg(), + I.getOperand(3).getReg(), CondCode, MIB); I.eraseFromParent(); return true; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir @@ -45,10 +45,9 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: $wzr = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 1, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -77,10 +76,9 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: $wzr = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 1, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -109,11 +107,10 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY1]], implicit-def $nzcv ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 11, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -142,11 +139,10 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY]], implicit-def $nzcv ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 11, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -175,11 +171,9 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: $xzr = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 1, implicit $nzcv - ; CHECK: $x0 = COPY [[CSELXr]] + ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv + ; CHECK: $x0 = COPY [[CSINCXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 %1:gpr(s64) = COPY $x1 @@ -208,11 +202,9 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: $xzr = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 1, implicit $nzcv - ; CHECK: $x0 = COPY [[CSELXr]] + ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv + ; CHECK: $x0 = COPY [[CSINCXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 %1:gpr(s64) = COPY $x1 @@ -241,12 +233,10 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY1]], implicit-def $nzcv ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 11, implicit $nzcv - ; CHECK: $x0 = COPY [[CSELXr]] + ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv + ; CHECK: $x0 = COPY [[CSINCXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 %1:gpr(s64) = COPY $x1 @@ -275,12 +265,10 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY]], implicit-def $nzcv ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 11, implicit $nzcv - ; CHECK: $x0 = COPY [[CSELXr]] + ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv + ; CHECK: $x0 = COPY [[CSINCXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 %1:gpr(s64) = COPY $x1 @@ -307,10 +295,9 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY1]], [[COPY]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY1]], 0, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 1, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -338,11 +325,9 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: [[ANDSXrr:%[0-9]+]]:gpr64 = ANDSXrr [[COPY1]], [[COPY]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY1]], 0, implicit $nzcv - ; CHECK: $x0 = COPY [[CSELXr]] + ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY1]], $xzr, 1, implicit $nzcv + ; CHECK: $x0 = COPY [[CSINCXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 %1:gpr(s64) = COPY $x1 @@ -370,11 +355,10 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]] ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 0, 0, implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY1]], 8, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -402,11 +386,10 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]] ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 42, 0, implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY1]], 8, implicit $nzcv - ; CHECK: $w0 = COPY [[CSELWr]] + ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv + ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 @@ -527,10 +510,8 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: %copy:gpr64 = COPY $x1 ; CHECK: %zero:gpr64 = COPY $xzr - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: %one:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: [[ANDSXrs:%[0-9]+]]:gpr64 = ANDSXrs %zero, %copy, 16, implicit-def $nzcv - ; CHECK: %select:gpr64 = CSELXr %one, %zero, 0, implicit $nzcv + ; CHECK: %select:gpr64 = CSINCXr %zero, $xzr, 1, implicit $nzcv ; CHECK: $x0 = COPY %select ; CHECK: RET_ReallyLR implicit $x0 %copy:gpr(s64) = COPY $x1 @@ -561,9 +542,8 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: %copy:gpr32 = COPY $w1 ; CHECK: %zero:gpr32 = COPY $wzr - ; CHECK: %one:gpr32 = MOVi32imm 1 ; CHECK: [[ANDSWrs:%[0-9]+]]:gpr32 = ANDSWrs %zero, %copy, 16, implicit-def $nzcv - ; CHECK: %select:gpr32 = CSELWr %one, %zero, 0, implicit $nzcv + ; CHECK: %select:gpr32 = CSINCWr %zero, $wzr, 1, implicit $nzcv ; CHECK: $w0 = COPY %select ; CHECK: RET_ReallyLR implicit $w0 %copy:gpr(s32) = COPY $w1 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir @@ -64,3 +64,284 @@ %4:fpr(s64) = G_SELECT %5(s1), %1, %2 $d0 = COPY %4(s64) RET_ReallyLR implicit $d0 +... +--- +name: csel +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1, $w2, $w3 + ; CHECK-LABEL: name: csel + ; CHECK: liveins: $w0, $w1, $w2, $w3 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: %t:gpr32 = COPY $w2 + ; CHECK: %f:gpr32 = COPY $w3 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSELWr %t, %f, 1, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = COPY $w2 + %f:gpr(s32) = COPY $w3 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinc_t_0_f_1 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1 + ; G_SELECT cc, 0, 1 -> CSINC zreg, zreg, cc + + ; CHECK-LABEL: name: csinc_t_0_f_1 + ; CHECK: liveins: $w0, $w1 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = G_CONSTANT i32 0 + %f:gpr(s32) = G_CONSTANT i32 1 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinv_t_0_f_neg_1 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1 + ; G_SELECT cc 0, -1 -> CSINV zreg, zreg cc + + ; CHECK-LABEL: name: csinv_t_0_f_neg_1 + ; CHECK: liveins: $w0, $w1 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINVWr $wzr, $wzr, 1, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = G_CONSTANT i32 0 + %f:gpr(s32) = G_CONSTANT i32 -1 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinc_t_1 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1, $w2 + ; G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc + + ; CHECK-LABEL: name: csinc_t_1 + ; CHECK: liveins: $w0, $w1, $w2 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: %f:gpr32 = COPY $w2 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINCWr %f, $wzr, 0, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = G_CONSTANT i32 1 + %f:gpr(s32) = COPY $w2 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinv_t_neg_1 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1, $w2 + ; G_SELECT cc, -1, f -> CSINV f, zreg, inv_cc + + ; CHECK-LABEL: name: csinv_t_neg_1 + ; CHECK: liveins: $w0, $w1, $w2 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: %f:gpr32 = COPY $w2 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINVWr %f, $wzr, 0, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = G_CONSTANT i32 -1 + %f:gpr(s32) = COPY $w2 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinc_f_1 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1, $w2 + ; G_SELECT cc, t, 1 -> CSINC t, zreg, cc + + ; CHECK-LABEL: name: csinc_f_1 + ; CHECK: liveins: $w0, $w1, $w2 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: %t:gpr32 = COPY $w2 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINCWr %t, $wzr, 1, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = COPY $w2 + %f:gpr(s32) = G_CONSTANT i32 1 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinc_f_neg_1 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1, $w2 + ; G_SELECT cc, t, -1 -> CSINC t, zreg, cc + + ; CHECK-LABEL: name: csinc_f_neg_1 + ; CHECK: liveins: $w0, $w1, $w2 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %reg1:gpr32 = COPY $w1 + ; CHECK: %t:gpr32 = COPY $w2 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %reg1, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINVWr %t, $wzr, 1, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %reg1:gpr(s32) = COPY $w1 + %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1 + %cond:gpr(s1) = G_TRUNC %cmp(s32) + %t:gpr(s32) = COPY $w2 + %f:gpr(s32) = G_CONSTANT i32 -1 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 +... +--- +name: csinc_t_1_no_cmp +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1 + ; G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc + + ; CHECK-LABEL: name: csinc_t_1_no_cmp + ; CHECK: liveins: $w0, $w1 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %f:gpr32 = COPY $w1 + ; CHECK: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %reg0, 0, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINCWr %f, $wzr, 0, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %cond:gpr(s1) = G_TRUNC %reg0(s32) + %t:gpr(s32) = G_CONSTANT i32 1 + %f:gpr(s32) = COPY $w1 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: csinc_f_1_no_cmp +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1 + ; G_SELECT cc, t, 1 -> CSINC t, zreg, cc + + ; CHECK-LABEL: name: csinc_f_1_no_cmp + ; CHECK: liveins: $w0, $w1 + ; CHECK: %reg0:gpr32 = COPY $w0 + ; CHECK: %t:gpr32 = COPY $w1 + ; CHECK: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %reg0, 0, implicit-def $nzcv + ; CHECK: %select:gpr32 = CSINCWr %t, $wzr, 1, implicit $nzcv + ; CHECK: $w0 = COPY %select + ; CHECK: RET_ReallyLR implicit $w0 + %reg0:gpr(s32) = COPY $w0 + %cond:gpr(s1) = G_TRUNC %reg0(s32) + %t:gpr(s32) = COPY $w1 + %f:gpr(s32) = G_CONSTANT i32 1 + %select:gpr(s32) = G_SELECT %cond(s1), %t, %f + $w0 = COPY %select(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: csinc_t_1_no_cmp_s64 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1 + ; G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc + + ; CHECK-LABEL: name: csinc_t_1_no_cmp_s64 + ; CHECK: liveins: $x0, $x1 + ; CHECK: %reg0:gpr64 = COPY $x0 + ; CHECK: %cond:gpr32 = COPY %reg0.sub_32 + ; CHECK: %f:gpr64 = COPY $x1 + ; CHECK: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %cond, 0, implicit-def $nzcv + ; CHECK: %select:gpr64 = CSINCXr %f, $xzr, 0, implicit $nzcv + ; CHECK: $x0 = COPY %select + ; CHECK: RET_ReallyLR implicit $x0 + %reg0:gpr(s64) = COPY $x0 + %cond:gpr(s1) = G_TRUNC %reg0(s64) + %t:gpr(s64) = G_CONSTANT i64 1 + %f:gpr(s64) = COPY $x1 + %select:gpr(s64) = G_SELECT %cond(s1), %t, %f + $x0 = COPY %select(s64) + RET_ReallyLR implicit $x0