Index: llvm/lib/Target/AArch64/AArch64Combine.td =================================================================== --- llvm/lib/Target/AArch64/AArch64Combine.td +++ llvm/lib/Target/AArch64/AArch64Combine.td @@ -83,12 +83,24 @@ (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]) >; +def adjust_icmp_imm_matchdata : + GIDefMatchData<"std::pair">; +def adjust_icmp_imm : GICombineRule < + (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo), + (match (wip_match_opcode G_ICMP):$root, + [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]), + (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }]) +>; + +def icmp_lowering : GICombineGroup<[adjust_icmp_imm]>; + // Post-legalization combines which should happen at all optimization levels. // (E.g. ones that facilitate matching for the selector) For example, matching // pseudos. def AArch64PostLegalizerLoweringHelper : GICombinerHelper<"AArch64GenPostLegalizerLoweringHelper", - [shuffle_vector_pseudos, vashr_vlshr_imm]> { + [shuffle_vector_pseudos, vashr_vlshr_imm, + icmp_lowering]> { let DisableRuleOption = "aarch64postlegalizerlowering-disable-rule"; } Index: llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h =================================================================== --- /dev/null +++ llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h @@ -0,0 +1,29 @@ +//===- AArch64GlobalISelUtils ------------------------------------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file APIs for AArch64-specific helper functions used in the GlobalISel +/// pipeline. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_AARCH64_GISEL_AARCH64GLOBALISELUTILS_H +#define LLVM_LIB_TARGET_AARCH64_GISEL_AARCH64GLOBALISELUTILS_H + +#include + +namespace llvm { +namespace AArch64GISelUtils { + +/// \returns true if \p C is a legal immediate operand for an arithmetic +/// instruction. +constexpr bool isLegalArithImmed(const uint64_t C) { + return (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); +} + +} // namespace AArch64GISelUtils +} // namespace llvm + +#endif Index: llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -165,14 +165,9 @@ MachineIRBuilder &MIRBuilder) const; // Emit an integer compare between LHS and RHS, which checks for Predicate. - // - // This returns the produced compare instruction, and the predicate which - // was ultimately used in the compare. The predicate may differ from what - // is passed in \p Predicate due to optimization. - std::pair - emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS, - MachineOperand &Predicate, - MachineIRBuilder &MIRBuilder) const; + MachineInstr *emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS, + MachineOperand &Predicate, + MachineIRBuilder &MIRBuilder) const; /// Emit a floating point comparison between \p LHS and \p RHS. MachineInstr *emitFPCompare(Register LHS, Register RHS, @@ -229,6 +224,8 @@ MachineIRBuilder &MIRBuilder) const; MachineInstr *emitADDS(Register Dst, MachineOperand &LHS, MachineOperand &RHS, MachineIRBuilder &MIRBuilder) const; + MachineInstr *emitSUBS(Register Dst, MachineOperand &LHS, MachineOperand &RHS, + MachineIRBuilder &MIRBuilder) const; MachineInstr *emitCMN(MachineOperand &LHS, MachineOperand &RHS, MachineIRBuilder &MIRBuilder) const; MachineInstr *emitTST(MachineOperand &LHS, MachineOperand &RHS, @@ -382,13 +379,6 @@ MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate, MachineIRBuilder &MIRBuilder) const; - MachineInstr *tryOptArithImmedIntegerCompare(MachineOperand &LHS, - MachineOperand &RHS, - CmpInst::Predicate &Predicate, - MachineIRBuilder &MIB) const; - MachineInstr *tryOptArithShiftedCompare(MachineOperand &LHS, - MachineOperand &RHS, - MachineIRBuilder &MIB) const; /// Return true if \p MI is a load or store of \p NumBytes bytes. bool isLoadStoreOfNumBytes(const MachineInstr &MI, unsigned NumBytes) const; @@ -1385,9 +1375,10 @@ if (!VRegAndVal || VRegAndVal->Value != 0) { // If we can't select a CBZ then emit a cmp + Bcc. - MachineInstr *Cmp; - std::tie(Cmp, Pred) = emitIntegerCompare( - CCMI->getOperand(2), CCMI->getOperand(3), CCMI->getOperand(1), MIB); + auto Pred = + static_cast(CCMI->getOperand(1).getPredicate()); + emitIntegerCompare(CCMI->getOperand(2), CCMI->getOperand(3), + CCMI->getOperand(1), MIB); const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(Pred); MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB); I.eraseFromParent(); @@ -2867,9 +2858,8 @@ } MachineIRBuilder MIRBuilder(I); - MachineInstr *Cmp; - CmpInst::Predicate Pred; - std::tie(Cmp, Pred) = emitIntegerCompare(I.getOperand(2), I.getOperand(3), + auto Pred = static_cast(I.getOperand(1).getPredicate()); + MachineInstr *Cmp = emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), MIRBuilder); emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIRBuilder); I.eraseFromParent(); @@ -3845,7 +3835,7 @@ MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo(); assert(LHS.isReg() && RHS.isReg() && "Expected register operands?"); auto Ty = MRI.getType(LHS.getReg()); - assert(Ty.isScalar() && "Expected a scalar?"); + assert(!Ty.isVector() && "Expected a scalar or pointer?"); unsigned Size = Ty.getSizeInBits(); assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit type only"); bool Is32Bit = Size == 32; @@ -3881,6 +3871,17 @@ return emitBinOp(OpcTable, Dst, LHS, RHS, MIRBuilder); } +MachineInstr * +AArch64InstructionSelector::emitSUBS(Register Dst, MachineOperand &LHS, + MachineOperand &RHS, + MachineIRBuilder &MIRBuilder) const { + const std::array, 3> OpcTable{ + {{AArch64::SUBSXri, AArch64::SUBSWri}, + {AArch64::SUBSXrs, AArch64::SUBSWrs}, + {AArch64::SUBSXrr, AArch64::SUBSWrr}}}; + return emitBinOp(OpcTable, Dst, LHS, RHS, MIRBuilder); +} + MachineInstr * AArch64InstructionSelector::emitCMN(MachineOperand &LHS, MachineOperand &RHS, MachineIRBuilder &MIRBuilder) const { @@ -3917,8 +3918,7 @@ return emitInstr(OpcTable[2][Is32Bit], {Ty}, {LHS, RHS}, MIRBuilder); } -std::pair -AArch64InstructionSelector::emitIntegerCompare( +MachineInstr *AArch64InstructionSelector::emitIntegerCompare( MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate, MachineIRBuilder &MIRBuilder) const { assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!"); @@ -3928,24 +3928,11 @@ assert(!CmpTy.isVector() && "Expected scalar or pointer"); unsigned Size = CmpTy.getSizeInBits(); assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit LHS/RHS?"); - auto P = static_cast(Predicate.getPredicate()); - // Fold the compare into a cmn or tst if possible. if (auto FoldCmp = tryFoldIntegerCompare(LHS, RHS, Predicate, MIRBuilder)) - return {FoldCmp, P}; - - // Compares need special handling for their shifted/immediate forms. We - // may be able to modify the predicate or an illegal constant to perform - // some folding. - if (auto ImmedCmp = tryOptArithImmedIntegerCompare(LHS, RHS, P, MIRBuilder)) - return {ImmedCmp, P}; - if (auto ShiftedCmp = tryOptArithShiftedCompare(LHS, RHS, MIRBuilder)) - return {ShiftedCmp, P}; - unsigned CmpOpc = Size == 32 ? AArch64::SUBSWrr : AArch64::SUBSXrr; - auto CmpMI = MIRBuilder.buildInstr( - CmpOpc, {MRI.cloneVirtualRegister(LHS.getReg())}, {LHS, RHS}); - constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI); - return {&*CmpMI, P}; + return FoldCmp; + auto Dst = MRI.cloneVirtualRegister(LHS.getReg()); + return emitSUBS(Dst, LHS, RHS, MIRBuilder); } MachineInstr *AArch64InstructionSelector::emitCSetForFCmp( @@ -4177,16 +4164,11 @@ AArch64CC::CondCode CondCode; if (CondOpc == TargetOpcode::G_ICMP) { - MachineInstr *Cmp; - CmpInst::Predicate Pred; - - std::tie(Cmp, Pred) = - emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3), - CondDef->getOperand(1), MIB); - - // Have to collect the CondCode after emitIntegerCompare, since it can - // update the predicate. + auto Pred = + static_cast(CondDef->getOperand(1).getPredicate()); CondCode = changeICMPPredToAArch64CC(Pred); + emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3), + CondDef->getOperand(1), MIB); } else { // Get the condition code for the select. CmpInst::Predicate Pred = @@ -4336,143 +4318,6 @@ return nullptr; } -MachineInstr *AArch64InstructionSelector::tryOptArithImmedIntegerCompare( - MachineOperand &LHS, MachineOperand &RHS, CmpInst::Predicate &P, - MachineIRBuilder &MIB) const { - // Attempt to select the immediate form of an integer compare. - MachineRegisterInfo &MRI = *MIB.getMRI(); - auto Ty = MRI.getType(LHS.getReg()); - assert(!Ty.isVector() && "Expected scalar or pointer only?"); - unsigned Size = Ty.getSizeInBits(); - assert((Size == 32 || Size == 64) && - "Expected 32 bit or 64 bit compare only?"); - - // Check if this is a case we can already handle. - InstructionSelector::ComplexRendererFns ImmFns; - ImmFns = selectArithImmed(RHS); - - if (!ImmFns) { - // We didn't get a rendering function, but we may still have a constant. - auto MaybeImmed = getImmedFromMO(RHS); - if (!MaybeImmed) - return nullptr; - - // We have a constant, but it doesn't fit. Try adjusting it by one and - // updating the predicate if possible. - uint64_t C = *MaybeImmed; - CmpInst::Predicate NewP; - switch (P) { - default: - return nullptr; - case CmpInst::ICMP_SLT: - case CmpInst::ICMP_SGE: - // Check for - // - // x slt c => x sle c - 1 - // x sge c => x sgt c - 1 - // - // When c is not the smallest possible negative number. - if ((Size == 64 && static_cast(C) == INT64_MIN) || - (Size == 32 && static_cast(C) == INT32_MIN)) - return nullptr; - NewP = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT; - C -= 1; - break; - case CmpInst::ICMP_ULT: - case CmpInst::ICMP_UGE: - // Check for - // - // x ult c => x ule c - 1 - // x uge c => x ugt c - 1 - // - // When c is not zero. - if (C == 0) - return nullptr; - NewP = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; - C -= 1; - break; - case CmpInst::ICMP_SLE: - case CmpInst::ICMP_SGT: - // Check for - // - // x sle c => x slt c + 1 - // x sgt c => s sge c + 1 - // - // When c is not the largest possible signed integer. - if ((Size == 32 && static_cast(C) == INT32_MAX) || - (Size == 64 && static_cast(C) == INT64_MAX)) - return nullptr; - NewP = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE; - C += 1; - break; - case CmpInst::ICMP_ULE: - case CmpInst::ICMP_UGT: - // Check for - // - // x ule c => x ult c + 1 - // x ugt c => s uge c + 1 - // - // When c is not the largest possible unsigned integer. - if ((Size == 32 && static_cast(C) == UINT32_MAX) || - (Size == 64 && C == UINT64_MAX)) - return nullptr; - NewP = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE; - C += 1; - break; - } - - // Check if the new constant is valid. - if (Size == 32) - C = static_cast(C); - ImmFns = select12BitValueWithLeftShift(C); - if (!ImmFns) - return nullptr; - P = NewP; - } - - // At this point, we know we can select an immediate form. Go ahead and do - // that. - unsigned Opc; - if (Size == 32) { - Opc = AArch64::SUBSWri; - } else { - Opc = AArch64::SUBSXri; - } - - auto CmpMI = MIB.buildInstr(Opc, {Ty}, {LHS.getReg()}); - for (auto &RenderFn : *ImmFns) - RenderFn(CmpMI); - constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI); - return &*CmpMI; -} - -MachineInstr *AArch64InstructionSelector::tryOptArithShiftedCompare( - MachineOperand &LHS, MachineOperand &RHS, MachineIRBuilder &MIB) const { - // We are looking for the following pattern: - // - // shift = G_SHL/ASHR/LHSR y, c - // ... - // cmp = G_ICMP pred, something, shift - // - // Since we will select the G_ICMP to a SUBS, we can potentially fold the - // shift into the subtract. - static const unsigned OpcTable[2] = {AArch64::SUBSWrs, AArch64::SUBSXrs}; - auto ImmFns = selectShiftedRegister(RHS); - if (!ImmFns) - return nullptr; - MachineRegisterInfo &MRI = *MIB.getMRI(); - auto Ty = MRI.getType(LHS.getReg()); - assert(!Ty.isVector() && "Expected scalar or pointer only?"); - unsigned Size = Ty.getSizeInBits(); - bool Idx = (Size == 64); - unsigned Opc = OpcTable[Idx]; - auto CmpMI = MIB.buildInstr(Opc, {Ty}, {LHS.getReg()}); - for (auto &RenderFn : *ImmFns) - RenderFn(CmpMI); - constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI); - return &*CmpMI; -} - bool AArch64InstructionSelector::tryOptShuffleDupLane( MachineInstr &I, LLT DstTy, LLT SrcTy, ArrayRef Mask, MachineRegisterInfo &MRI) const { Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "AArch64TargetMachine.h" +#include "AArch64GlobalISelUtils.h" #include "MCTargetDesc/AArch64MCTargetDesc.h" #include "llvm/CodeGen/GlobalISel/Combiner.h" #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" @@ -31,6 +32,7 @@ using namespace llvm; using namespace MIPatternMatch; +using namespace AArch64GISelUtils; /// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR. /// @@ -408,6 +410,134 @@ return true; } +/// Determine if it is possible to modify the \p RHS and predicate \p P of a +/// comparison instruction such that the right-hand side is an arithmetic +/// immediate. +/// +/// \returns A pair containing the updated immediate and predicate which may +/// be used to optimize the instruction. +/// +/// \note This assumes that the comparison has been legalized. +Optional> +tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P, + const MachineRegisterInfo &MRI) { + const auto &Ty = MRI.getType(RHS); + if (Ty.isVector()) + return None; + unsigned Size = Ty.getSizeInBits(); + assert((Size == 32 || Size == 64) && "Expected 32 or 64 bit compare only?"); + + // If the RHS is not a constant, or the RHS is already a valid arithmetic + // immediate, then there is nothing to change. + auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI); + if (!ValAndVReg) + return None; + uint64_t C = ValAndVReg->Value; + if (isLegalArithImmed(C)) + return None; + + // We have a non-arithmetic immediate. Check if adjusting the immediate and + // adjusting the predicate will result in a legal arithmetic immediate. + switch (P) { + default: + return None; + case CmpInst::ICMP_SLT: + case CmpInst::ICMP_SGE: + // Check for + // + // x slt c => x sle c - 1 + // x sge c => x sgt c - 1 + // + // When c is not the smallest possible negative number. + if ((Size == 64 && static_cast(C) == INT64_MIN) || + (Size == 32 && static_cast(C) == INT32_MIN)) + return None; + P = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT; + C -= 1; + break; + case CmpInst::ICMP_ULT: + case CmpInst::ICMP_UGE: + // Check for + // + // x ult c => x ule c - 1 + // x uge c => x ugt c - 1 + // + // When c is not zero. + if (C == 0) + return None; + P = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; + C -= 1; + break; + case CmpInst::ICMP_SLE: + case CmpInst::ICMP_SGT: + // Check for + // + // x sle c => x slt c + 1 + // x sgt c => s sge c + 1 + // + // When c is not the largest possible signed integer. + if ((Size == 32 && static_cast(C) == INT32_MAX) || + (Size == 64 && static_cast(C) == INT64_MAX)) + return None; + P = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE; + C += 1; + break; + case CmpInst::ICMP_ULE: + case CmpInst::ICMP_UGT: + // Check for + // + // x ule c => x ult c + 1 + // x ugt c => s uge c + 1 + // + // When c is not the largest possible unsigned integer. + if ((Size == 32 && static_cast(C) == UINT32_MAX) || + (Size == 64 && C == UINT64_MAX)) + return None; + P = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE; + C += 1; + break; + } + + // Check if the new constant is valid, and return the updated constant and + // predicate if it is. + if (Size == 32) + C = static_cast(C); + if (!isLegalArithImmed(C)) + return None; + return {{C, P}}; +} + +/// \returns true if it is possible to update the RHS + predicate of a compare +/// such that the RHS will be selected as an arithmetic immediate. +bool matchAdjustICmpImmAndPred( + MachineInstr &MI, const MachineRegisterInfo &MRI, + std::pair &MatchInfo) { + Register RHS = MI.getOperand(3).getReg(); + auto Pred = static_cast(MI.getOperand(1).getPredicate()); + if (auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred, MRI)) { + MatchInfo = *MaybeNewImmAndPred; + errs() << "Optimizing: "; + MI.dump(); + return true; + } + return false; +} + +bool applyAdjustICmpImmAndPred( + MachineInstr &MI, std::pair &MatchInfo, + MachineIRBuilder &MIB, GISelChangeObserver &Observer) { + MIB.setInstrAndDebugLoc(MI); + MachineOperand &RHS = MI.getOperand(3); + MachineRegisterInfo &MRI = *MIB.getMRI(); + auto Cst = MIB.buildConstant(MRI.cloneVirtualRegister(RHS.getReg()), + MatchInfo.first); + Observer.changingInstr(MI); + RHS.setReg(Cst->getOperand(0).getReg()); + MI.getOperand(1).setPredicate(MatchInfo.second); + Observer.changedInstr(MI); + return true; +} + #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS #include "AArch64GenPostLegalizeGILowering.inc" #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS Index: llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir @@ -67,37 +67,6 @@ ... --- -name: check_update_predicate -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0, $w1 - ; The G_ICMP is optimized here to be a slt comparison by adding 1 to the - ; constant. So, the CSELWr should use the predicate code 11, rather than - ; 13. - - ; CHECK-LABEL: name: check_update_predicate - ; CHECK: liveins: $w0, $w1 - ; CHECK: %copy1:gpr32sp = COPY $w0 - ; CHECK: %copy2:gpr32 = COPY $w1 - ; CHECK: %cst:gpr32 = MOVi32imm -1 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %copy1, 0, 0, implicit-def $nzcv - ; CHECK: %select:gpr32 = CSELWr %cst, %copy2, 11, implicit $nzcv - ; CHECK: $w0 = COPY %select - ; CHECK: RET_ReallyLR implicit $w0 - %copy1:gpr(s32) = COPY $w0 - %copy2:gpr(s32) = COPY $w1 - %cst:gpr(s32) = G_CONSTANT i32 -1 - %cmp:gpr(s32) = G_ICMP intpred(sle), %copy1(s32), %cst - %trunc:gpr(s1) = G_TRUNC %cmp(s32) - %select:gpr(s32) = G_SELECT %trunc(s1), %cst, %copy2 - $w0 = COPY %select(s32) - RET_ReallyLR implicit $w0 -... ---- name: csinc alignment: 4 legalized: true Index: llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir @@ -0,0 +1,515 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s + +... +--- +name: slt_to_sle_s32 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; x slt c => x sle c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: slt_to_sle_s32 + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), [[C]] + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 4097 + %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: slt_to_sle_s64 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; x slt c => x sle c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: slt_to_sle_s64 + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), [[C]] + ; CHECK: %ext:_(s64) = G_ZEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 4097 + %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst + %ext:_(s64) = G_ZEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: sge_to_sgt_s32 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; x sge c => x sgt c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: sge_to_sgt_s32 + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s32), [[C]] + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 4097 + %cmp:_(s32) = G_ICMP intpred(sge), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: sge_to_sgt_s64 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; x sge c => x sgt c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: sge_to_sgt_s64 + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s64), [[C]] + ; CHECK: %ext:_(s64) = G_ZEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 4097 + %cmp:_(s32) = G_ICMP intpred(sge), %reg(s64), %cst + %ext:_(s64) = G_ZEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: ult_to_ule_s32 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; x ult c => x ule c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: ult_to_ule_s32 + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(ule), %reg(s32), [[C]] + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 4097 + %cmp:_(s32) = G_ICMP intpred(ult), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: ult_to_ule_s64 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; x ult c => x ule c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: ult_to_ule_s64 + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(ule), %reg(s64), [[C]] + ; CHECK: %ext:_(s64) = G_ANYEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 4097 + %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst + %ext:_(s64) = G_ANYEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: uge_to_ugt_s32 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; x uge c => x ugt c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: uge_to_ugt_s32 + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(ugt), %reg(s32), [[C]] + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 4097 + %cmp:_(s32) = G_ICMP intpred(uge), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: uge_to_ugt_s64 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; x uge c => x ugt c - 1 + ; + ; log_2(4096) == 12, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: uge_to_ugt_s64 + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(ugt), %reg(s64), [[C]] + ; CHECK: %ext:_(s64) = G_ZEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 4097 + %cmp:_(s32) = G_ICMP intpred(uge), %reg(s64), %cst + %ext:_(s64) = G_ZEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: sle_to_slt_s32 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; x sle c => x slt c + 1 + ; + ; log_2(8192) == 13, so we can represent this as a 12 bit value with a + ; left shift. + ; + ; (We can't use 4095 here, because that's a legal arithmetic immediate.) + + ; CHECK-LABEL: name: sle_to_slt_s32 + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8192 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), [[C]] + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 8191 + %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: sle_to_slt_s64 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; x sle c => x slt c + 1 + ; + ; log_2(8192) == 13, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: sle_to_slt_s64 + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8192 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), [[C]] + ; CHECK: %ext:_(s64) = G_ZEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 8191 + %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst + %ext:_(s64) = G_ZEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: sgt_to_sge_s32 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; x sgt c => s sge c + 1 + ; + ; log_2(8192) == 13, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: sgt_to_sge_s32 + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8192 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sge), %reg(s32), [[C]] + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 8191 + %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: sgt_to_sge_s64 +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; x sgt c => s sge c + 1 + ; + ; log_2(8192) == 13, so we can represent this as a 12 bit value with a + ; left shift. + + ; CHECK-LABEL: name: sgt_to_sge_s64 + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8192 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sge), %reg(s64), [[C]] + ; CHECK: %ext:_(s64) = G_ANYEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 8191 + %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s64), %cst + %ext:_(s64) = G_ANYEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: no_opt_int32_min +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; The cmp should not change. + ; + ; If we subtract 1 from the constant, it will wrap around, and so it's not + ; true that + ; + ; x slt c => x sle c - 1 + ; x sge c => x sgt c - 1 + + ; CHECK-LABEL: name: no_opt_int32_min + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: %cst:_(s32) = G_CONSTANT i32 -2147483648 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 -2147483648 + %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: no_opt_int64_min +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; The cmp should not change. + ; + ; If we subtract 1 from the constant, it will wrap around, and so it's not + ; true that + ; + ; x slt c => x sle c - 1 + ; x sge c => x sgt c - 1 + + ; CHECK-LABEL: name: no_opt_int64_min + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: %cst:_(s64) = G_CONSTANT i64 -9223372036854775808 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst + ; CHECK: %ext:_(s64) = G_ANYEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 -9223372036854775808 + %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst + %ext:_(s64) = G_ANYEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: no_opt_int32_max +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0 + + ; The cmp should not change. + ; + ; If we add 1 to the constant, it will wrap around, and so it's not true + ; that + ; + ; x slt c => x sle c - 1 + ; x sge c => x sgt c - 1 + + ; CHECK-LABEL: name: no_opt_int32_max + ; CHECK: liveins: $w0 + ; CHECK: %reg:_(s32) = COPY $w0 + ; CHECK: %cst:_(s32) = G_CONSTANT i32 2147483647 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst + ; CHECK: $w0 = COPY %cmp(s32) + ; CHECK: RET_ReallyLR implicit $w0 + %reg:_(s32) = COPY $w0 + %cst:_(s32) = G_CONSTANT i32 2147483647 + %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst + $w0 = COPY %cmp(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: no_opt_int64_max +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; The cmp should not change. + ; + ; If we add 1 to the constant, it will wrap around, and so it's not true + ; that + ; + ; x slt c => x sle c - 1 + ; x sge c => x sgt c - 1 + + + ; CHECK-LABEL: name: no_opt_int64_max + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: %cst:_(s64) = G_CONSTANT i64 9223372036854775807 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst + ; CHECK: %ext:_(s64) = G_ZEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 9223372036854775807 + %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst + %ext:_(s64) = G_ZEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: no_opt_zero +alignment: 4 +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0 + + ; The cmp should not change. + ; + ; This is an unsigned comparison, so when the constant is 0, the following + ; does not hold: + ; + ; x slt c => x sle c - 1 + ; x sge c => x sgt c - 1 + + ; CHECK-LABEL: name: no_opt_zero + ; CHECK: liveins: $x0 + ; CHECK: %reg:_(s64) = COPY $x0 + ; CHECK: %cst:_(s64) = G_CONSTANT i64 0 + ; CHECK: %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst + ; CHECK: %ext:_(s64) = G_ZEXT %cmp(s32) + ; CHECK: $x0 = COPY %ext(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %reg:_(s64) = COPY $x0 + %cst:_(s64) = G_CONSTANT i64 0 + %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst + %ext:_(s64) = G_ZEXT %cmp(s32) + $x0 = COPY %ext(s64) + RET_ReallyLR implicit $x0 + Index: llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir +++ /dev/null @@ -1,708 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s - -name: slt_to_sle_s32 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; x slt c => x sle c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: slt_to_sle_s32 - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: SUBSWri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 4097 - %4:gpr(s32) = G_ICMP intpred(slt), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: slt_to_sle_s64 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; x slt c => x sle c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: slt_to_sle_s64 - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 4097 - %4:gpr(s32) = G_ICMP intpred(slt), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: sge_to_sgt_s32 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; x sge c => x sgt c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: sge_to_sgt_s32 - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 4097 - %4:gpr(s32) = G_ICMP intpred(sge), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: sge_to_sgt_s64 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; x sge c => x sgt c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: sge_to_sgt_s64 - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 4097 - %4:gpr(s32) = G_ICMP intpred(sge), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: ult_to_ule_s32 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; x ult c => x ule c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: ult_to_ule_s32 - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 4097 - %4:gpr(s32) = G_ICMP intpred(ult), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: ult_to_ule_s64 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; x ult c => x ule c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: ult_to_ule_s64 - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 4097 - %4:gpr(s32) = G_ICMP intpred(ult), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: uge_to_ugt_s32 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; x uge c => x ugt c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: uge_to_ugt_s32 - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 4097 - %4:gpr(s32) = G_ICMP intpred(uge), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: uge_to_ugt_s64 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; x uge c => x ugt c - 1 - ; - ; We should not have a MOV here. We can subtract 1 from the constant and - ; change the condition code. - ; - ; log_2(4096) == 12, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: uge_to_ugt_s64 - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 4097 - %4:gpr(s32) = G_ICMP intpred(uge), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: sle_to_slt_s32 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; x sle c => x slt c + 1 - ; - ; We should not have a MOV here. We can add 1 to the constant and change - ; the condition code. - ; - ; log_2(8192) == 13, so we can represent this as a 12 bit value with a - ; left shift. - ; - ; (We can't use 4095 here, because that's a legal arithmetic immediate.) - - ; CHECK-LABEL: name: sle_to_slt_s32 - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 2, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 8191 - %4:gpr(s32) = G_ICMP intpred(sle), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: sle_to_slt_s64 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; x sle c => x slt c + 1 - ; - ; We should not have a MOV here. We can add 1 to the constant and change - ; the condition code. - ; - ; log_2(8192) == 13, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: sle_to_slt_s64 - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 2, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 8191 - %4:gpr(s32) = G_ICMP intpred(sle), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: sgt_to_sge_s32 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; x sgt c => s sge c + 1 - ; - ; We should not have a MOV here. We can add 1 to the constant and change - ; the condition code. - ; - ; log_2(8192) == 13, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: sgt_to_sge_s32 - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 2, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 8191 - %4:gpr(s32) = G_ICMP intpred(sgt), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: sgt_to_sge_s64 -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; x sgt c => s sge c + 1 - ; - ; We should not have a MOV here. We can add 1 to the constant and change - ; the condition code. - ; - ; log_2(8192) == 13, so we can represent this as a 12 bit value with a - ; left shift. - - ; CHECK-LABEL: name: sgt_to_sge_s64 - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 2, 12, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 8191 - %4:gpr(s32) = G_ICMP intpred(sgt), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: no_opt_int32_min -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; This one should contain a MOV. - ; - ; If we subtract 1 from the constant, it will wrap around, and so it's not - ; true that - ; - ; x slt c => x sle c - 1 - ; x sge c => x sgt c - 1 - - ; CHECK-LABEL: name: no_opt_int32_min - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -2147483648 - ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 -2147483648 - %4:gpr(s32) = G_ICMP intpred(slt), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: no_opt_int64_min -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; This one should contain a MOV. - ; - ; If we subtract 1 from the constant, it will wrap around, and so it's not - ; true that - ; - ; x slt c => x sle c - 1 - ; x sge c => x sgt c - 1 - - ; CHECK-LABEL: name: no_opt_int64_min - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -9223372036854775808 - ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 -9223372036854775808 - %4:gpr(s32) = G_ICMP intpred(slt), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: no_opt_int32_max -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0 - - ; This one should contain a MOV. - ; - ; If we add 1 to the constant, it will wrap around, and so it's not true - ; that - ; - ; x slt c => x sle c - 1 - ; x sge c => x sgt c - 1 - - ; CHECK-LABEL: name: no_opt_int32_max - ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 2147483647 - ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv - ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 - ; CHECK: $w0 = COPY [[ANDWri]] - ; CHECK: RET_ReallyLR implicit $w0 - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 2147483647 - %4:gpr(s32) = G_ICMP intpred(sle), %0(s32), %1 - %5:gpr(s32) = G_CONSTANT i32 1 - %3:gpr(s32) = G_AND %4, %5 - $w0 = COPY %3(s32) - RET_ReallyLR implicit $w0 - -... ---- -name: no_opt_int64_max -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; This one should contain a MOV. - ; - ; If we add 1 to the constant, it will wrap around, and so it's not true - ; that - ; - ; x slt c => x sle c - 1 - ; x sge c => x sgt c - 1 - - - ; CHECK-LABEL: name: no_opt_int64_max - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 9223372036854775807 - ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 9223372036854775807 - %4:gpr(s32) = G_ICMP intpred(sle), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: no_opt_zero -alignment: 4 -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0 - - ; This one should contain a MOV. - ; - ; This is an unsigned comparison, so when the constant is 0, the following - ; does not hold: - ; - ; x slt c => x sle c - 1 - ; x sge c => x sgt c - 1 - - ; CHECK-LABEL: name: no_opt_zero - ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 0, 0, implicit-def $nzcv - ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv - ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF - ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096 - ; CHECK: $x0 = COPY [[ANDXri]] - ; CHECK: RET_ReallyLR implicit $x0 - %0:gpr(s64) = COPY $x0 - %1:gpr(s64) = G_CONSTANT i64 0 - %4:gpr(s32) = G_ICMP intpred(ult), %0(s64), %1 - %6:gpr(s64) = G_ANYEXT %4(s32) - %5:gpr(s64) = G_CONSTANT i64 1 - %3:gpr(s64) = G_AND %6, %5 - $x0 = COPY %3(s64) - RET_ReallyLR implicit $x0 - -... ---- -name: more_than_one_use_select -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0, $x1, $x2 - - ; Both of these selects use the same compare. - ; - ; They should both be optimized in the same way, so the SUBS produced for - ; each CSEL should be the same. - - ; CHECK-LABEL: name: more_than_one_use_select - ; CHECK: liveins: $x0, $x1, $x2 - ; CHECK: %a:gpr64common = COPY $x0 - ; CHECK: %b:gpr64 = COPY $x1 - ; CHECK: %c:gpr64 = COPY $x2 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %a, 0, 0, implicit-def $nzcv - ; CHECK: %select1:gpr64 = CSELXr %a, %b, 11, implicit $nzcv - ; CHECK: [[SUBSXri1:%[0-9]+]]:gpr64 = SUBSXri %a, 0, 0, implicit-def $nzcv - ; CHECK: %select2:gpr64 = CSELXr %b, %c, 11, implicit $nzcv - ; CHECK: %add:gpr64 = ADDXrr %select1, %select2 - ; CHECK: $x0 = COPY %add - ; CHECK: RET_ReallyLR implicit $x0 - %a:gpr(s64) = COPY $x0 - %b:gpr(s64) = COPY $x1 - %c:gpr(s64) = COPY $x2 - %cst:gpr(s64) = G_CONSTANT i64 -1 - %cmp:gpr(s32) = G_ICMP intpred(sle), %a(s64), %cst - %trunc_cmp:gpr(s1) = G_TRUNC %cmp(s32) - %select1:gpr(s64) = G_SELECT %trunc_cmp(s1), %a, %b - %select2:gpr(s64) = G_SELECT %trunc_cmp(s1), %b, %c - %add:gpr(s64) = G_ADD %select1, %select2 - $x0 = COPY %add(s64) - RET_ReallyLR implicit $x0 -... ---- -name: more_than_one_use_select_no_opt -legalized: true -regBankSelected: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $x0, $x1, $x2 - - ; When we don't end up doing the optimization, we should not change the - ; predicate. - ; - ; In this case, the CSELXrs should both have predicate code 13. - - ; CHECK-LABEL: name: more_than_one_use_select_no_opt - ; CHECK: liveins: $x0, $x1, $x2 - ; CHECK: %a:gpr64 = COPY $x0 - ; CHECK: %b:gpr64 = COPY $x1 - ; CHECK: %c:gpr64 = COPY $x2 - ; CHECK: %cst:gpr64 = MOVi64imm 922337203685477580 - ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %a, %cst, implicit-def $nzcv - ; CHECK: %select1:gpr64 = CSELXr %a, %b, 13, implicit $nzcv - ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr %a, %cst, implicit-def $nzcv - ; CHECK: %select2:gpr64 = CSELXr %b, %c, 13, implicit $nzcv - ; CHECK: %add:gpr64 = ADDXrr %select1, %select2 - ; CHECK: $x0 = COPY %add - ; CHECK: RET_ReallyLR implicit $x0 - %a:gpr(s64) = COPY $x0 - %b:gpr(s64) = COPY $x1 - %c:gpr(s64) = COPY $x2 - %cst:gpr(s64) = G_CONSTANT i64 922337203685477580 - %cmp:gpr(s32) = G_ICMP intpred(sle), %a(s64), %cst - %trunc_cmp:gpr(s1) = G_TRUNC %cmp(s32) - %select1:gpr(s64) = G_SELECT %trunc_cmp(s1), %a, %b - %select2:gpr(s64) = G_SELECT %trunc_cmp(s1), %b, %c - %add:gpr(s64) = G_ADD %select1, %select2 - $x0 = COPY %add(s64) - RET_ReallyLR implicit $x0 -... Index: llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir @@ -184,35 +184,4 @@ bb.3: RET_ReallyLR -... ---- -name: update_pred_minus_one -legalized: true -regBankSelected: true -body: | - ; CHECK-LABEL: name: update_pred_minus_one - ; CHECK: bb.0: - ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) - ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0 - ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 0, 0, implicit-def $nzcv - ; CHECK: Bcc 11, %bb.1, implicit $nzcv - ; CHECK: B %bb.0 - ; CHECK: bb.1: - ; The G_ICMP here will be optimized into a slt against 0. - ; The branch should inherit this change, so we should have Bcc 11 rather than - ; Bcc 13. - - bb.0: - liveins: $w0 - successors: %bb.0, %bb.1 - - %0:gpr(s32) = COPY $w0 - %1:gpr(s32) = G_CONSTANT i32 -1 - %2:gpr(s32) = G_ICMP intpred(sle), %0, %1 - %3:gpr(s1) = G_TRUNC %2(s32) - G_BRCOND %3(s1), %bb.1 - G_BR %bb.0 - - bb.1: -... Index: llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir @@ -99,9 +99,10 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64 = COPY $x0 - ; CHECK: %and:gpr64sp = ANDXri %copy, 8000 - ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %and, 0, 0, implicit-def $nzcv - ; CHECK: Bcc 10, %bb.1, implicit $nzcv + ; CHECK: %negative_one:gpr64 = MOVi64imm -1 + ; CHECK: %and:gpr64common = ANDXri %copy, 8000 + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %and, %negative_one, implicit-def $nzcv + ; CHECK: Bcc 12, %bb.1, implicit $nzcv ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR