diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -1537,7 +1537,8 @@ /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, int &Value) const { + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const { return false; } @@ -1545,7 +1546,8 @@ /// into something more efficient. E.g., on ARM most instructions can set the /// flags register, obviating the need for a separate CMP. virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int Mask, int Value, + Register SrcReg2, int64_t Mask, + int64_t Value, const MachineRegisterInfo *MRI) const { return false; } diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -626,7 +626,7 @@ // If this instruction is a comparison against zero and isn't comparing a // physical register, we can try to optimize it. Register SrcReg, SrcReg2; - int CmpMask, CmpValue; + int64_t CmpMask, CmpValue; if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || SrcReg.isPhysical() || SrcReg2.isPhysical()) return false; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -227,12 +227,12 @@ /// in SrcReg and SrcReg2, and the value it compares against in CmpValue. /// Return true if the comparison instruction can be analyzed. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const override; + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const override; /// optimizeCompareInstr - Convert the instruction supplying the argument to /// the comparison into one that sets the zero bit in the flags register. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int CmpMask, int CmpValue, + Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override; bool optimizeCondBranch(MachineInstr &MI) const override; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1112,24 +1112,14 @@ /// in SrcReg and SrcReg2, and the value it compares against in CmpValue. /// Return true if the comparison instruction can be analyzed. bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const { + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const { // The first operand can be a frame index where we'd normally expect a // register. assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands"); if (!MI.getOperand(1).isReg()) return false; - auto NormalizeCmpValue = [](int64_t Value) -> int { - // Comparison immediates may be 64-bit, but CmpValue is only an int. - // Normalize to 0/1/2 return value, where 2 indicates any value apart from - // 0 or 1. - // TODO: Switch CmpValue to int64_t in the API to avoid this. - if (Value == 0 || Value == 1) - return Value; - return 2; - }; - switch (MI.getOpcode()) { default: break; @@ -1165,7 +1155,7 @@ SrcReg = MI.getOperand(1).getReg(); SrcReg2 = 0; CmpMask = ~0; - CmpValue = NormalizeCmpValue(MI.getOperand(2).getImm()); + CmpValue = MI.getOperand(2).getImm(); return true; case AArch64::ANDSWri: case AArch64::ANDSXri: @@ -1174,9 +1164,9 @@ SrcReg = MI.getOperand(1).getReg(); SrcReg2 = 0; CmpMask = ~0; - CmpValue = NormalizeCmpValue(AArch64_AM::decodeLogicalImmediate( + CmpValue = AArch64_AM::decodeLogicalImmediate( MI.getOperand(2).getImm(), - MI.getOpcode() == AArch64::ANDSWri ? 32 : 64)); + MI.getOpcode() == AArch64::ANDSWri ? 32 : 64); return true; } @@ -1437,8 +1427,8 @@ /// instruction. /// Only comparison with zero is supported. bool AArch64InstrInfo::optimizeCompareInstr( - MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int CmpMask, - int CmpValue, const MachineRegisterInfo *MRI) const { + MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, + int64_t CmpValue, const MachineRegisterInfo *MRI) const { assert(CmpInstr.getParent()); assert(MRI); @@ -1466,9 +1456,6 @@ if (CmpInstr.getOpcode() == AArch64::PTEST_PP) return optimizePTestInstr(&CmpInstr, SrcReg, SrcReg2, MRI); - // Warning: CmpValue == 2 indicates *any* value apart from 0 or 1. - assert((CmpValue == 0 || CmpValue == 1 || CmpValue == 2) && - "CmpValue must be 0, 1, or 2!"); if (SrcReg2 != 0) return false; diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h @@ -289,15 +289,15 @@ /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const override; + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const override; /// optimizeCompareInstr - Convert the instruction to set the zero flag so /// that we can remove a "comparison with zero"; Remove a redundant CMP /// instruction if the flags can be updated in the same way by an earlier /// instruction such as SUB. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int CmpMask, int CmpValue, + Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override; bool analyzeSelect(const MachineInstr &MI, diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -2798,8 +2798,8 @@ /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const { + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const { switch (MI.getOpcode()) { default: break; case ARM::CMPri: @@ -2870,7 +2870,8 @@ /// This function can be extended later on. inline static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, - int ImmValue, const MachineInstr *OI, + int64_t ImmValue, + const MachineInstr *OI, bool &IsThumb1) { if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) && @@ -3005,8 +3006,8 @@ /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the /// condition code of instructions which use the flags. bool ARMBaseInstrInfo::optimizeCompareInstr( - MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int CmpMask, - int CmpValue, const MachineRegisterInfo *MRI) const { + MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, + int64_t CmpValue, const MachineRegisterInfo *MRI) const { // Get the unique definition of SrcReg. MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); if (!MI) return false; @@ -3293,7 +3294,7 @@ MachineBasicBlock::const_iterator Next = &MI; ++Next; Register SrcReg, SrcReg2; - int CmpMask, CmpValue; + int64_t CmpMask, CmpValue; bool IsThumb1; if (Next != MI.getParent()->end() && analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) && diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp --- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -468,7 +468,7 @@ return false; Register CmpReg1, CmpReg2; - int CmpImm = 0, CmpMask = 0; + int64_t CmpImm = 0, CmpMask = 0; bool CmpAnalyzed = TII->analyzeCompare(*PredI, CmpReg1, CmpReg2, CmpMask, CmpImm); // Fail if the compare was not analyzed, or it's not comparing a register @@ -652,7 +652,7 @@ unsigned CondOpc = CondI->getOpcode(); Register CmpReg1, CmpReg2; - int Mask = 0, ImmValue = 0; + int64_t Mask = 0, ImmValue = 0; bool AnalyzedCmp = TII->analyzeCompare(*CondI, CmpReg1, CmpReg2, Mask, ImmValue); if (!AnalyzedCmp) @@ -1453,7 +1453,7 @@ E = MRI->use_instr_nodbg_end(); I != E; ++I) { MachineInstr *MI = &*I; Register CmpReg1, CmpReg2; - int CmpMask = 0, CmpValue = 0; + int64_t CmpMask = 0, CmpValue = 0; if (!TII->analyzeCompare(*MI, CmpReg1, CmpReg2, CmpMask, CmpValue)) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h @@ -270,7 +270,8 @@ /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, int &Value) const override; + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const override; /// Compute the instruction latency of a given instruction. /// If the instruction has higher cost when predicated, it's returned via diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1791,8 +1791,8 @@ /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, - int &Value) const { + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const { unsigned Opc = MI.getOpcode(); // Set mask and the first source register. diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp --- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -508,7 +508,7 @@ while (CmpI->getOpcode() == Hexagon::C2_not) CmpI = MRI->getVRegDef(CmpI->getOperand(1).getReg()); - int Mask = 0, Val = 0; + int64_t Mask = 0, Val = 0; bool OkCI = TII->analyzeCompare(*CmpI, CmpR1, CmpR2, Mask, Val); if (!OkCI) return; diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.h b/llvm/lib/Target/Lanai/LanaiInstrInfo.h --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.h +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.h @@ -96,14 +96,14 @@ // SrcReg2 if having two register operands, and the value it compares against // in CmpValue. Return true if the comparison instruction can be analyzed. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const override; + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const override; // See if the comparison instruction can be converted into something more // efficient. E.g., on Lanai register-register instructions can set the flag // register, obviating the need for a separate compare. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int CmpMask, int CmpValue, + Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override; // Analyze the given select instruction, returning true if it cannot be diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -175,8 +175,8 @@ } bool LanaiInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const { + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const { switch (MI.getOpcode()) { default: break; @@ -203,7 +203,7 @@ // * SFSUB_F_RR can be made redundant by SUB_RI if the operands are the same. // * SFSUB_F_RI can be made redundant by SUB_I if the operands are the same. inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg, - unsigned SrcReg2, int ImmValue, + unsigned SrcReg2, int64_t ImmValue, MachineInstr *OI) { if (CmpI->getOpcode() == Lanai::SFSUB_F_RR && OI->getOpcode() == Lanai::SUB_R && @@ -281,8 +281,9 @@ } bool LanaiInstrInfo::optimizeCompareInstr( - MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int /*CmpMask*/, - int CmpValue, const MachineRegisterInfo *MRI) const { + MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, + int64_t /*CmpMask*/, int64_t CmpValue, + const MachineRegisterInfo *MRI) const { // Get the unique definition of SrcReg. MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); if (!MI) diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h @@ -524,10 +524,11 @@ // Comparison optimization. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, int &Value) const override; + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const override; bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int Mask, int Value, + Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -2343,8 +2343,8 @@ } bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, - int &Value) const { + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const { unsigned Opc = MI.getOpcode(); switch (Opc) { @@ -2373,7 +2373,8 @@ } bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int Mask, int Value, + Register SrcReg2, int64_t Mask, + int64_t Value, const MachineRegisterInfo *MRI) const { if (DisableCmpOpt) return false; diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -234,7 +234,8 @@ const DebugLoc &DL, int *BytesAdded = nullptr) const override; bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, int &Value) const override; + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const override; bool canInsertSelect(const MachineBasicBlock &, ArrayRef Cond, Register, Register, Register, int &, int &, int &) const override; diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -514,8 +514,8 @@ } bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &Mask, - int &Value) const { + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const { assert(MI.isCompare() && "Caller should have checked for a comparison"); if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() && diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -510,14 +510,14 @@ /// compares against in CmpValue. Return true if the comparison instruction /// can be analyzed. bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const override; + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const override; /// optimizeCompareInstr - Check if there exists an earlier instruction that /// operates on the same source operands and sets flags in the same way as /// Compare; remove Compare if possible. bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int CmpMask, int CmpValue, + Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override; /// optimizeLoadInstr - Try to remove the load by folding it to a register diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3922,8 +3922,8 @@ } bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, - Register &SrcReg2, int &CmpMask, - int &CmpValue) const { + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const { switch (MI.getOpcode()) { default: break; case X86::CMP64ri32: @@ -4010,7 +4010,7 @@ /// ImmValue: immediate for FlagI if it takes an immediate. inline static bool isRedundantFlagInstr(const MachineInstr &FlagI, Register SrcReg, Register SrcReg2, - int ImmMask, int ImmValue, + int64_t ImmMask, int64_t ImmValue, const MachineInstr &OI) { if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) || (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) || @@ -4207,8 +4207,8 @@ /// operates on the same source operands and sets flags in the same way as /// Compare; remove Compare if possible. bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, - Register SrcReg2, int CmpMask, - int CmpValue, + Register SrcReg2, int64_t CmpMask, + int64_t CmpValue, const MachineRegisterInfo *MRI) const { // Check whether we can replace SUB with CMP. switch (CmpInstr.getOpcode()) {