diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h --- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h @@ -63,7 +63,7 @@ int64_t &CR; ConstantMatch(int64_t &C) : CR(C) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { - if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) { + if (auto MaybeCst = getIConstantVRegSExtVal(Reg, MRI)) { CR = *MaybeCst; return true; } @@ -73,21 +73,31 @@ inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); } -struct ICstRegMatch { - Register &CR; - ICstRegMatch(Register &C) : CR(C) {} +struct GCstAndRegMatch { + Optional &ValReg; + GCstAndRegMatch(Optional &ValReg) : ValReg(ValReg) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { - if (auto MaybeCst = getConstantVRegValWithLookThrough( - Reg, MRI, /*LookThroughInstrs*/ true, - /*HandleFConstants*/ false)) { - CR = MaybeCst->VReg; - return true; - } - return false; + ValReg = getIConstantVRegValWithLookThrough(Reg, MRI); + return ValReg ? true : false; + } +}; + +inline GCstAndRegMatch m_GCst(Optional &ValReg) { + return GCstAndRegMatch(ValReg); +} + +struct GFCstAndRegMatch { + Optional &FPValReg; + GFCstAndRegMatch(Optional &FPValReg) : FPValReg(FPValReg) {} + bool match(const MachineRegisterInfo &MRI, Register Reg) { + FPValReg = getFConstantVRegValWithLookThrough(Reg, MRI); + return FPValReg ? true : false; } }; -inline ICstRegMatch m_ICst(Register &Reg) { return ICstRegMatch(Reg); } +inline GFCstAndRegMatch m_GFCst(Optional &FPValReg) { + return GFCstAndRegMatch(FPValReg); +} /// Matcher for a specific constant value. struct SpecificConstantMatch { diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h --- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h @@ -162,13 +162,12 @@ MachineOptimizationRemarkMissed &R); /// If \p VReg is defined by a G_CONSTANT, return the corresponding value. -Optional getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI); +Optional getIConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI); -/// If \p VReg is defined by a G_CONSTANT fits in int64_t -/// returns it. -Optional getConstantVRegSExtVal(Register VReg, - const MachineRegisterInfo &MRI); +/// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it. +Optional getIConstantVRegSExtVal(Register VReg, + const MachineRegisterInfo &MRI); /// Simple struct used to hold a constant integer value and a virtual /// register. @@ -176,22 +175,32 @@ APInt Value; Register VReg; }; -/// If \p VReg is defined by a statically evaluable chain of -/// instructions rooted on a G_F/CONSTANT (\p LookThroughInstrs == true) -/// and that constant fits in int64_t, returns its value as well as the -/// virtual register defined by this G_F/CONSTANT. -/// When \p LookThroughInstrs == false this function behaves like -/// getConstantVRegVal. -/// When \p HandleFConstants == false the function bails on G_FCONSTANTs. -/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as -/// G_SEXT. + +/// If \p VReg is defined by a statically evaluable chain of instructions rooted +/// on a G_CONSTANT returns its APInt value and def register. Optional -getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, - bool LookThroughInstrs = true, - bool HandleFConstants = true, - bool LookThroughAnyExt = false); -const ConstantInt *getConstantIntVRegVal(Register VReg, - const MachineRegisterInfo &MRI); +getIConstantVRegValWithLookThrough(Register VReg, + const MachineRegisterInfo &MRI, + bool LookThroughInstrs = true); + +/// If \p VReg is defined by a statically evaluable chain of instructions rooted +/// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register. +Optional getAnyConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, + bool LookThroughInstrs = true, bool LookThroughAnyExt = false); + +struct FPValueAndVReg { + APFloat Value; + Register VReg; +}; + +/// If \p VReg is defined by a statically evaluable chain of instructions rooted +/// on a G_FCONSTANT returns its APFloat value and def register. +Optional +getFConstantVRegValWithLookThrough(Register VReg, + const MachineRegisterInfo &MRI, + bool LookThroughInstrs = true); + const ConstantFP* getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI); diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -650,7 +650,7 @@ return false; auto MaybeMask = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeMask) return false; @@ -1281,7 +1281,7 @@ Register Add2 = MI.getOperand(1).getReg(); Register Imm1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); + auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); if (!MaybeImmVal) return false; @@ -1291,7 +1291,7 @@ Register Base = Add2Def->getOperand(1).getReg(); Register Imm2 = Add2Def->getOperand(2).getReg(); - auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); + auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); if (!MaybeImm2Val) return false; @@ -1360,7 +1360,7 @@ Register Shl2 = MI.getOperand(1).getReg(); Register Imm1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); + auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); if (!MaybeImmVal) return false; @@ -1370,7 +1370,7 @@ Register Base = Shl2Def->getOperand(1).getReg(); Register Imm2 = Shl2Def->getOperand(2).getReg(); - auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); + auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); if (!MaybeImm2Val) return false; @@ -1454,7 +1454,7 @@ // Find a matching one-use shift by constant. const Register C1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(C1, MRI); + auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI); if (!MaybeImmVal) return false; @@ -1468,7 +1468,7 @@ // Must be a constant. auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); if (!MaybeImmVal) return false; @@ -1540,7 +1540,7 @@ unsigned &ShiftVal) { assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeImmVal) return false; @@ -1575,7 +1575,7 @@ // TODO: Should handle vector splat. Register RHS = MI.getOperand(2).getReg(); - auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI); + auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI); if (!MaybeShiftAmtVal) return false; @@ -1836,7 +1836,7 @@ return false; auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeImmVal) return false; @@ -2010,7 +2010,7 @@ Register RHS = PtrAdd.getOffsetReg(); MachineRegisterInfo &MRI = Builder.getMF().getRegInfo(); - if (auto RHSCst = getConstantVRegSExtVal(RHS, MRI)) { + if (auto RHSCst = getIConstantVRegSExtVal(RHS, MRI)) { int64_t Cst; if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { NewCst = Cst + *RHSCst; @@ -2241,7 +2241,7 @@ bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) { assert(MI.getOpcode() == TargetOpcode::G_SELECT); if (auto MaybeCstCmp = - getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) { + getIConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) { OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2; return true; } @@ -2341,7 +2341,7 @@ if (!MOP.isReg()) return false; // MIPatternMatch doesn't let us look through G_ZEXT etc. - auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI); + auto ValAndVReg = getIConstantVRegValWithLookThrough(MOP.getReg(), MRI); return ValAndVReg && ValAndVReg->Value == C; } @@ -2962,7 +2962,7 @@ return false; if (Ty.isPointer()) { - auto ConstVal = getConstantVRegVal(PtrAdd.getBaseReg(), MRI); + auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI); return ConstVal && *ConstVal == 0; } @@ -3715,7 +3715,7 @@ {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}})) return false; - auto Cst = getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements()) return false; @@ -3788,7 +3788,7 @@ MRI.use_instr_nodbg_end())) { if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT) return false; - auto Cst = getConstantVRegVal(II.getOperand(2).getReg(), MRI); + auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI); if (!Cst) return false; unsigned Idx = Cst.getValue().getZExtValue(); @@ -4106,10 +4106,10 @@ if (MRI.hasOneNonDBGUse(Src1Reg)) return false; - auto C1 = getConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI); + auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI); if (!C1) return false; - auto C2 = getConstantVRegVal(Src2Reg, MRI); + auto C2 = getIConstantVRegVal(Src2Reg, MRI); if (!C2) return false; @@ -4167,7 +4167,7 @@ Register Src1Reg = MI.getOperand(1).getReg(); if (RHS->getOpcode() != TargetOpcode::G_ADD) return false; - auto C2 = getConstantVRegVal(RHS->getOperand(2).getReg(), MRI); + auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI); if (!C2) return false; @@ -4191,9 +4191,9 @@ // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C) // if and only if (G_PTR_ADD X, C) has one use. Register LHSBase; - Register LHSCstOff; + Optional LHSCstOff; if (!mi_match(MI.getBaseReg(), MRI, - m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_ICst(LHSCstOff))))) + m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff))))) return false; auto *LHSPtrAdd = cast(LHS); @@ -4204,7 +4204,7 @@ LHSPtrAdd->moveBefore(&MI); Register RHSReg = MI.getOffsetReg(); Observer.changingInstr(MI); - MI.getOperand(2).setReg(LHSCstOff); + MI.getOperand(2).setReg(LHSCstOff->VReg); Observer.changedInstr(MI); Observer.changingInstr(*LHSPtrAdd); LHSPtrAdd->getOperand(2).setReg(RHSReg); @@ -4225,10 +4225,10 @@ Register Src2Reg = MI.getOperand(2).getReg(); Register LHSSrc1 = LHSPtrAdd->getBaseReg(); Register LHSSrc2 = LHSPtrAdd->getOffsetReg(); - auto C1 = getConstantVRegVal(LHSSrc2, MRI); + auto C1 = getIConstantVRegVal(LHSSrc2, MRI); if (!C1) return false; - auto C2 = getConstantVRegVal(Src2Reg, MRI); + auto C2 = getIConstantVRegVal(Src2Reg, MRI); if (!C2) return false; @@ -4337,7 +4337,7 @@ } // Find the mask on the RHS. - auto Cst = getConstantVRegValWithLookThrough(AndRHS, MRI); + auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI); if (!Cst) return false; auto Mask = Cst->Value; diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp --- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp +++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -245,7 +245,7 @@ break; } case TargetOpcode::G_CONSTANT: { - auto CstVal = getConstantVRegVal(R, MRI); + auto CstVal = getIConstantVRegVal(R, MRI); if (!CstVal) break; Known = KnownBits::makeConstant(*CstVal); diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp --- a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp +++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -37,7 +37,7 @@ const MachineOperand &MO, int64_t Value, const MachineRegisterInfo &MRI) const { if (MO.isReg() && MO.getReg()) - if (auto VRegVal = getConstantVRegValWithLookThrough(MO.getReg(), MRI)) + if (auto VRegVal = getIConstantVRegValWithLookThrough(MO.getReg(), MRI)) return VRegVal->Value.getSExtValue() == Value; return false; } diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -4075,9 +4075,7 @@ // If the index is a constant, we can really break this down as you would // expect, and index into the target size pieces. int64_t IdxVal; - auto MaybeCst = - getConstantVRegValWithLookThrough(Idx, MRI, /*LookThroughInstrs*/ true, - /*HandleFConstants*/ false); + auto MaybeCst = getIConstantVRegValWithLookThrough(Idx, MRI); if (MaybeCst) { IdxVal = MaybeCst->Value.getSExtValue(); // Avoid out of bounds indexing the pieces. @@ -4931,8 +4929,7 @@ const LLT HalfTy = LLT::scalar(NewBitSize); const LLT CondTy = LLT::scalar(1); - if (auto VRegAndVal = - getConstantVRegValWithLookThrough(Amt, MRI, true, false)) { + if (auto VRegAndVal = getIConstantVRegValWithLookThrough(Amt, MRI)) { return narrowScalarShiftByConstant(MI, VRegAndVal->Value, HalfTy, ShiftAmtTy); } @@ -7536,7 +7533,7 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { MachineRegisterInfo &MRI = *MIB.getMRI(); unsigned NumBits = Ty.getScalarSizeInBits(); - auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI); if (!Ty.isVector() && ValVRegAndVal) { APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8); APInt SplatVal = APInt::getSplat(NumBits, Scalar); @@ -7590,7 +7587,7 @@ const auto &DstMMO = **MI.memoperands_begin(); MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); - auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI); bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0; if (!findGISelOptimalMemOpLowering(MemOps, Limit, @@ -7691,7 +7688,7 @@ bool IsVolatile = MemOp->isVolatile(); // See if this is a constant length copy - auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); + auto LenVRegAndVal = getIConstantVRegValWithLookThrough(Len, MRI); // FIXME: support dynamically sized G_MEMCPY_INLINE assert(LenVRegAndVal.hasValue() && "inline memcpy with dynamic size is not yet supported"); @@ -7954,7 +7951,7 @@ } // See if this is a constant length copy - auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); + auto LenVRegAndVal = getIConstantVRegValWithLookThrough(Len, MRI); if (!LenVRegAndVal) return UnableToLegalize; uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue(); diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -267,10 +267,10 @@ reportGISelFailure(MF, TPC, MORE, R); } -Optional llvm::getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { - Optional ValAndVReg = - getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); +Optional llvm::getIConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI) { + Optional ValAndVReg = getIConstantVRegValWithLookThrough( + VReg, MRI, /*LookThroughInstrs*/ false); assert((!ValAndVReg || ValAndVReg->VReg == VReg) && "Value found while looking through instrs"); if (!ValAndVReg) @@ -278,41 +278,27 @@ return ValAndVReg->Value; } -Optional llvm::getConstantVRegSExtVal(Register VReg, - const MachineRegisterInfo &MRI) { - Optional Val = getConstantVRegVal(VReg, MRI); +Optional +llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { + Optional Val = getIConstantVRegVal(VReg, MRI); if (Val && Val->getBitWidth() <= 64) return Val->getSExtValue(); return None; } -Optional llvm::getConstantVRegValWithLookThrough( - Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, - bool HandleFConstant, bool LookThroughAnyExt) { +namespace { + +typedef std::function IsOpcodeFn; +typedef std::function(const MachineInstr *MI)> GetAPCstFn; + +Optional getConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode, + GetAPCstFn getAPCstValue, bool LookThroughInstrs = true, + bool LookThroughAnyExt = false) { SmallVector, 4> SeenOpcodes; MachineInstr *MI; - auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { - return Opcode == TargetOpcode::G_CONSTANT || - (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); - }; - auto GetImmediateValue = [HandleFConstant, - &MRI](const MachineInstr &MI) -> Optional { - const MachineOperand &CstVal = MI.getOperand(1); - if (!CstVal.isImm() && !CstVal.isCImm() && - (!HandleFConstant || !CstVal.isFPImm())) - return None; - if (!CstVal.isFPImm()) { - unsigned BitWidth = - MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) - : CstVal.getCImm()->getValue(); - assert(Val.getBitWidth() == BitWidth && - "Value bitwidth doesn't match definition type"); - return Val; - } - return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); - }; - while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && + + while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) && LookThroughInstrs) { switch (MI->getOpcode()) { case TargetOpcode::G_ANYEXT: @@ -339,10 +325,10 @@ return None; } } - if (!MI || !IsConstantOpcode(MI->getOpcode())) + if (!MI || !IsConstantOpcode(MI)) return None; - Optional MaybeVal = GetImmediateValue(*MI); + Optional MaybeVal = getAPCstValue(MI); if (!MaybeVal) return None; APInt &Val = *MaybeVal; @@ -365,12 +351,65 @@ return ValueAndVReg{Val, VReg}; } -const ConstantInt *llvm::getConstantIntVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { - MachineInstr *MI = MRI.getVRegDef(VReg); - if (MI->getOpcode() != TargetOpcode::G_CONSTANT) - return nullptr; - return MI->getOperand(1).getCImm(); +bool isIConstant(const MachineInstr *MI) { + if (!MI) + return false; + return MI->getOpcode() == TargetOpcode::G_CONSTANT; +} + +bool isFConstant(const MachineInstr *MI) { + if (!MI) + return false; + return MI->getOpcode() == TargetOpcode::G_FCONSTANT; +} + +bool isAnyConstant(const MachineInstr *MI) { + if (!MI) + return false; + unsigned Opc = MI->getOpcode(); + return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT; +} + +Optional getCImmAsAPInt(const MachineInstr *MI) { + const MachineOperand &CstVal = MI->getOperand(1); + if (CstVal.isCImm()) + return CstVal.getCImm()->getValue(); + return None; +} + +Optional getCImmOrFPImmAsAPInt(const MachineInstr *MI) { + const MachineOperand &CstVal = MI->getOperand(1); + if (CstVal.isCImm()) + return CstVal.getCImm()->getValue(); + if (CstVal.isFPImm()) + return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); + return None; +} + +} // end anonymous namespace + +Optional llvm::getIConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { + return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant, + getCImmAsAPInt, LookThroughInstrs); +} + +Optional llvm::getAnyConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, + bool LookThroughAnyExt) { + return getConstantVRegValWithLookThrough( + VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs, + LookThroughAnyExt); +} + +Optional llvm::getFConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { + auto Reg = getConstantVRegValWithLookThrough( + VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs); + if (!Reg) + return None; + return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(), + Reg->VReg}; } const ConstantFP * @@ -437,16 +476,16 @@ Optional llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI) { - auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); + auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false); if (!MaybeOp2Cst) return None; - auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false); if (!MaybeOp1Cst) return None; - const APInt &C1 = *MaybeOp1Cst; - const APInt &C2 = *MaybeOp2Cst; + const APInt &C1 = MaybeOp1Cst->Value; + const APInt &C2 = MaybeOp2Cst->Value; switch (Opcode) { default: break; @@ -659,7 +698,7 @@ Optional llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI) { - auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI); if (MaybeOp1Cst) { switch (Opcode) { default: @@ -677,7 +716,7 @@ Register Src, const MachineRegisterInfo &MRI) { assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP); - if (auto MaybeSrcVal = getConstantVRegVal(Src, MRI)) { + if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) { APFloat DstVal(getFltSemanticForLLT(DstTy)); DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP, APFloat::rmNearestTiesToEven); @@ -707,7 +746,7 @@ // shifting the bit off the end is undefined. // TODO: Constant splat - if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { + if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { if (*ConstLHS == 1) return true; } @@ -715,7 +754,7 @@ break; } case TargetOpcode::G_LSHR: { - if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { + if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { if (ConstLHS->isSignMask()) return true; } @@ -737,7 +776,7 @@ // zeros is greater than the truncation amount. const unsigned BitWidth = Ty.getScalarSizeInBits(); for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { - auto Const = getConstantVRegVal(MI.getOperand(I).getReg(), MRI); + auto Const = getIConstantVRegVal(MI.getOperand(I).getReg(), MRI); if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2()) return false; } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp @@ -26,7 +26,7 @@ return None; Register Src = MI.getOperand(1).getReg(); if (auto ValAndVReg = - getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) + getAnyConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) return RegOrConstant(ValAndVReg->Value.getSExtValue()); return RegOrConstant(Src); } @@ -56,7 +56,7 @@ !CmpInst::isEquality(Pred)) return false; auto MaybeZero = - getConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI); + getIConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI); return MaybeZero && MaybeZero->Value.getZExtValue() == 0; } @@ -68,7 +68,8 @@ auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); if (!TLI.getLibcallName(RTLIB::BZERO)) return false; - auto Zero = getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI); + auto Zero = + getIConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI); if (!Zero || Zero->Value.getSExtValue() != 0) return false; @@ -78,8 +79,8 @@ if (!MinSize) { // If the size is known, check it. If it is not known, assume using bzero is // better. - if (auto Size = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI)) { + if (auto Size = getIConstantVRegValWithLookThrough( + MI.getOperand(2).getReg(), MRI)) { if (Size->Value.getSExtValue() <= 256) return false; } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -666,7 +666,7 @@ Immed = Root.getCImm()->getZExtValue(); else if (Root.isReg()) { auto ValAndVReg = - getConstantVRegValWithLookThrough(Root.getReg(), MRI, true); + getIConstantVRegValWithLookThrough(Root.getReg(), MRI, true); if (!ValAndVReg) return None; Immed = ValAndVReg->Value.getSExtValue(); @@ -1209,8 +1209,8 @@ &Optimized]() { if (Optimized) return false; - auto TrueCst = getConstantVRegValWithLookThrough(True, MRI); - auto FalseCst = getConstantVRegValWithLookThrough(False, MRI); + auto TrueCst = getIConstantVRegValWithLookThrough(True, MRI); + auto FalseCst = getIConstantVRegValWithLookThrough(False, MRI); if (!TrueCst && !FalseCst) return false; @@ -1352,13 +1352,13 @@ case TargetOpcode::G_XOR: { TestReg = MI->getOperand(1).getReg(); Register ConstantReg = MI->getOperand(2).getReg(); - auto VRegAndVal = getConstantVRegValWithLookThrough(ConstantReg, MRI); + auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstantReg, MRI); if (!VRegAndVal) { // AND commutes, check the other side for a constant. // FIXME: Can we canonicalize the constant so that it's always on the // same side at some point earlier? std::swap(ConstantReg, TestReg); - VRegAndVal = getConstantVRegValWithLookThrough(ConstantReg, MRI); + VRegAndVal = getIConstantVRegValWithLookThrough(ConstantReg, MRI); } if (VRegAndVal) { if (HasZext) @@ -1373,7 +1373,7 @@ case TargetOpcode::G_SHL: { TestReg = MI->getOperand(1).getReg(); auto VRegAndVal = - getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); if (VRegAndVal) C = VRegAndVal->Value.getSExtValue(); break; @@ -1501,7 +1501,7 @@ // Check if the AND has a constant on its RHS which we can use as a mask. // If it's a power of 2, then it's the same as checking a specific bit. // (e.g, ANDing with 8 == ANDing with 000...100 == testing if bit 3 is set) - auto MaybeBit = getConstantVRegValWithLookThrough( + auto MaybeBit = getIConstantVRegValWithLookThrough( AndInst.getOperand(2).getReg(), *MIB.getMRI()); if (!MaybeBit) return false; @@ -1577,7 +1577,7 @@ Register RHS = ICmp.getOperand(3).getReg(); // We're allowed to emit a TB(N)Z/CB(N)Z. Try to do that. - auto VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI); + auto VRegAndVal = getIConstantVRegValWithLookThrough(RHS, MRI); MachineInstr *AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI); // When we can emit a TB(N)Z, prefer that. @@ -1612,7 +1612,7 @@ if (ICmpInst::isEquality(Pred)) { if (!VRegAndVal) { std::swap(RHS, LHS); - VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI); + VRegAndVal = getIConstantVRegValWithLookThrough(RHS, MRI); AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI); } @@ -2071,7 +2071,7 @@ // selector which will match the register variant. assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op"); const auto &MO = I.getOperand(2); - auto VRegAndVal = getConstantVRegVal(MO.getReg(), MRI); + auto VRegAndVal = getIConstantVRegVal(MO.getReg(), MRI); if (!VRegAndVal) return false; @@ -2153,7 +2153,7 @@ // Before selecting a DUP instruction, check if it is better selected as a // MOV or load from a constant pool. Register Src = I.getOperand(1).getReg(); - auto ValAndVReg = getConstantVRegValWithLookThrough(Src, MRI); + auto ValAndVReg = getIConstantVRegValWithLookThrough(Src, MRI); if (!ValAndVReg) return false; LLVMContext &Ctx = MF.getFunction().getContext(); @@ -2371,10 +2371,10 @@ unsigned Size = Ty.getSizeInBits(); unsigned Opc = OpcTable[IsSigned][Size == 64]; auto Cst1 = - getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), MRI); assert(Cst1 && "Should have gotten a constant for src 1?"); auto Cst2 = - getConstantVRegValWithLookThrough(I.getOperand(3).getReg(), MRI); + getIConstantVRegValWithLookThrough(I.getOperand(3).getReg(), MRI); assert(Cst2 && "Should have gotten a constant for src 2?"); auto LSB = Cst1->Value.getZExtValue(); auto Width = Cst2->Value.getZExtValue(); @@ -2840,9 +2840,8 @@ // If we're storing a 0, use WZR/XZR. if (Opcode == TargetOpcode::G_STORE) { - auto CVal = getConstantVRegValWithLookThrough( - LoadStore->getOperand(0).getReg(), MRI, /*LookThroughInstrs = */ true, - /*HandleFConstants = */ false); + auto CVal = getIConstantVRegValWithLookThrough( + LoadStore->getOperand(0).getReg(), MRI); if (CVal && CVal->Value == 0) { switch (LoadStore->getOpcode()) { case AArch64::STRWui: @@ -2972,7 +2971,7 @@ case TargetOpcode::G_PTRMASK: { Register MaskReg = I.getOperand(2).getReg(); - Optional MaskVal = getConstantVRegSExtVal(MaskReg, MRI); + Optional MaskVal = getIConstantVRegSExtVal(MaskReg, MRI); // TODO: Implement arbitrary cases if (!MaskVal || !isShiftedMask_64(*MaskVal)) return false; @@ -4017,7 +4016,7 @@ } // Find the index to extract from. - auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI); + auto VRegAndVal = getIConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI); if (!VRegAndVal) return false; unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); @@ -4408,7 +4407,7 @@ {AArch64::ANDSXrr, AArch64::ANDSWrr}}; // ANDS needs a logical immediate for its immediate form. Check if we can // fold one in. - if (auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI)) { + if (auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS.getReg(), MRI)) { int64_t Imm = ValAndVReg->Value.getSExtValue(); if (AArch64_AM::isLogicalImmediate(Imm, RegSize)) { @@ -4753,7 +4752,7 @@ if (!CmpInst::isUnsigned(P) && LHSDef && LHSDef->getOpcode() == TargetOpcode::G_AND) { // Make sure that the RHS is 0. - auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI); + auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); if (!ValAndVReg || ValAndVReg->Value != 0) return nullptr; @@ -4955,7 +4954,7 @@ // Find the definition of the index. Bail out if it's not defined by a // G_CONSTANT. Register IdxReg = I.getOperand(3).getReg(); - auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI); + auto VRegAndVal = getIConstantVRegValWithLookThrough(IdxReg, MRI); if (!VRegAndVal) return false; unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); @@ -5653,7 +5652,7 @@ // constant is the RHS. Register OffsetReg = OffsetInst->getOperand(1).getReg(); Register ConstantReg = OffsetInst->getOperand(2).getReg(); - auto ValAndVReg = getConstantVRegValWithLookThrough(ConstantReg, MRI); + auto ValAndVReg = getIConstantVRegValWithLookThrough(ConstantReg, MRI); if (!ValAndVReg) { // We didn't get a constant on the RHS. If the opcode is a shift, then // we're done. @@ -5662,7 +5661,7 @@ // If we have a G_MUL, we can use either register. Try looking at the RHS. std::swap(OffsetReg, ConstantReg); - ValAndVReg = getConstantVRegValWithLookThrough(ConstantReg, MRI); + ValAndVReg = getIConstantVRegValWithLookThrough(ConstantReg, MRI); if (!ValAndVReg) return None; } @@ -5830,7 +5829,7 @@ // mov x0, wide // ldr x2, [base, x0] auto ValAndVReg = - getConstantVRegValWithLookThrough(PtrAdd->getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(PtrAdd->getOperand(2).getReg(), MRI); if (ValAndVReg) { unsigned Scale = Log2_32(SizeInBytes); int64_t ImmOff = ValAndVReg->Value.getSExtValue(); @@ -6295,7 +6294,7 @@ assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT"); Optional CstVal = - getConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI); + getIConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI); assert(CstVal && "Expected constant value"); MIB.addImm(CstVal.getValue()); } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -978,7 +978,7 @@ // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the // imported patterns can select it later. Either way, it will be legal. Register AmtReg = MI.getOperand(2).getReg(); - auto VRegAndVal = getConstantVRegValWithLookThrough(AmtReg, MRI); + auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI); if (!VRegAndVal) return true; // Check the shift amount is in range for an immediate form. @@ -1085,8 +1085,8 @@ MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { // Only legal if we can select immediate forms. // TODO: Lower this otherwise. - return getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) && - getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); + return getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) && + getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); } bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI, diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -55,7 +55,7 @@ Register Src2 = MI.getOperand(2).getReg(); LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); - auto Cst = getConstantVRegValWithLookThrough(Src2, MRI); + auto Cst = getIConstantVRegValWithLookThrough(Src2, MRI); if (!Cst || Cst->Value != 0) return false; // SDAG also checks for FullFP16, but this looks to be beneficial anyway. @@ -129,7 +129,7 @@ const LLT Ty = MRI.getType(LHS); // The below optimizations require a constant RHS. - auto Const = getConstantVRegValWithLookThrough(RHS, MRI); + auto Const = getIConstantVRegValWithLookThrough(RHS, MRI); if (!Const) return false; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp @@ -527,7 +527,7 @@ // If the RHS is not a constant, or the RHS is already a valid arithmetic // immediate, then there is nothing to change. - auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI); + auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS, MRI); if (!ValAndVReg) return None; uint64_t C = ValAndVReg->Value.getZExtValue(); @@ -757,7 +757,7 @@ if (MI.getOpcode() != TargetOpcode::G_AND) return false; auto ValAndVReg = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!ValAndVReg) return false; uint64_t Mask = ValAndVReg->Value.getZExtValue(); @@ -774,7 +774,7 @@ return 0; auto MaybeShiftAmt = - getConstantVRegValWithLookThrough(Def->getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(Def->getOperand(2).getReg(), MRI); if (!MaybeShiftAmt) return 0; uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue(); @@ -814,7 +814,7 @@ // Don't swap if there's a constant on the RHS, because we know we can fold // that. Register RHS = MI.getOperand(3).getReg(); - auto RHSCst = getConstantVRegValWithLookThrough(RHS, MRI); + auto RHSCst = getIConstantVRegValWithLookThrough(RHS, MRI); if (RHSCst && isLegalArithImmed(RHSCst->Value.getSExtValue())) return false; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp @@ -146,8 +146,8 @@ for (auto &UseInstr : MRI.use_nodbg_instructions(Dst)) { if (UseInstr.getOpcode() != TargetOpcode::G_PTR_ADD) return false; - auto Cst = - getConstantVRegValWithLookThrough(UseInstr.getOperand(2).getReg(), MRI); + auto Cst = getIConstantVRegValWithLookThrough( + UseInstr.getOperand(2).getReg(), MRI); if (!Cst) return false; MinOffset = std::min(MinOffset, Cst->Value.getZExtValue()); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -141,7 +141,7 @@ = TRI.getConstrainedRegClassForOperand(Src, *MRI); Optional ConstVal = - getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true); + getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); if (ConstVal) { unsigned MovOpc = STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; @@ -609,11 +609,10 @@ const DebugLoc &DL = MI.getDebugLoc(); MachineBasicBlock *BB = MI.getParent(); - auto ConstSrc1 = - getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true); + auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); if (ConstSrc1) { auto ConstSrc0 = - getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true); + getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true); if (ConstSrc0) { const int64_t K0 = ConstSrc0->Value.getSExtValue(); const int64_t K1 = ConstSrc1->Value.getSExtValue(); @@ -845,7 +844,7 @@ auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); Optional ConstSelect = - getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true); + getIConstantVRegValWithLookThrough(LaneSelect, *MRI); if (ConstSelect) { // The selector has to be an inline immediate, so we can use whatever for // the other operands. @@ -854,7 +853,7 @@ maskTrailingOnes(STI.getWavefrontSizeLog2())); } else { Optional ConstVal = - getConstantVRegValWithLookThrough(Val, *MRI, true, true); + getIConstantVRegValWithLookThrough(Val, *MRI); // If the value written is an inline immediate, we can get away without a // copy to m0. @@ -1131,7 +1130,7 @@ return false; Optional Arg = - getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true); + getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); if (Arg.hasValue()) { const int64_t Value = Arg.getValue().Value.getSExtValue(); @@ -4010,8 +4009,8 @@ return {Root, 0}; MachineOperand &RHS = RootI->getOperand(2); - Optional MaybeOffset - = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true); + Optional MaybeOffset = + getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); if (!MaybeOffset) return {Root, 0}; return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; @@ -4339,8 +4338,8 @@ /// Get an immediate that must be 32-bits, and treated as zero extended. static Optional getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) { - // getConstantVRegVal sexts any values, so see if that matters. - Optional OffsetVal = getConstantVRegSExtVal(Reg, MRI); + // getIConstantVRegVal sexts any values, so see if that matters. + Optional OffsetVal = getIConstantVRegSExtVal(Reg, MRI); if (!OffsetVal || !isInt<32>(*OffsetVal)) return None; return Lo_32(*OffsetVal); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -2224,9 +2224,9 @@ // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need - // getConstantVRegValWithLookThrough. + // getIConstantVRegValWithLookThrough. Optional MaybeIdxVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; const int64_t IdxVal = MaybeIdxVal->Value.getSExtValue(); @@ -2256,9 +2256,9 @@ // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need - // getConstantVRegValWithLookThrough. + // getIConstantVRegValWithLookThrough. Optional MaybeIdxVal = - getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); + getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; @@ -2811,7 +2811,7 @@ static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) { if (MI.getOpcode() != TargetOpcode::G_XOR) return false; - auto ConstVal = getConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI); + auto ConstVal = getIConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI); return ConstVal && *ConstVal == -1; } @@ -3777,11 +3777,11 @@ unsigned ImmOffset, Register VIndex, MachineRegisterInfo &MRI) const { Optional MaybeVOffsetVal = - getConstantVRegValWithLookThrough(VOffset, MRI); + getIConstantVRegValWithLookThrough(VOffset, MRI); Optional MaybeSOffsetVal = - getConstantVRegValWithLookThrough(SOffset, MRI); + getIConstantVRegValWithLookThrough(SOffset, MRI); Optional MaybeVIndexVal = - getConstantVRegValWithLookThrough(VIndex, MRI); + getIConstantVRegValWithLookThrough(VIndex, MRI); // If the combined VOffset + SOffset + ImmOffset + strided VIndex is constant, // update the MMO with that offset. The stride is unknown so we can only do // this if VIndex is constant 0. diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp @@ -57,9 +57,9 @@ MinMaxMedOpc getMinMaxPair(unsigned Opc); - template + template bool matchMed(MachineInstr &MI, MachineRegisterInfo &MRI, MinMaxMedOpc MMMOpc, - Register &Val, Register &K0, Register &K1); + Register &Val, CstTy &K0, CstTy &K1); bool matchIntMinMaxToMed3(MachineInstr &MI, Med3MatchInfo &MatchInfo); void applyMed3(MachineInstr &MI, Med3MatchInfo &MatchInfo); @@ -83,11 +83,11 @@ } } -template +template bool AMDGPURegBankCombinerHelper::matchMed(MachineInstr &MI, MachineRegisterInfo &MRI, MinMaxMedOpc MMMOpc, Register &Val, - Register &K0, Register &K1) { + CstTy &K0, CstTy &K1) { // 4 operand commutes of: min(max(Val, K0), K1). // Find K1 from outer instr: min(max(...), K1) or min(K1, max(...)). // Find K0 and Val from inner instr: max(K0, Val) or max(Val, K0). @@ -115,19 +115,18 @@ return false; MinMaxMedOpc OpcodeTriple = getMinMaxPair(MI.getOpcode()); - Register Val, K0, K1; + Register Val; + Optional K0, K1; // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1. - if (!matchMed(MI, MRI, OpcodeTriple, Val, K0, K1)) + if (!matchMed(MI, MRI, OpcodeTriple, Val, K0, K1)) return false; - const APInt &K0_Imm = getConstantIntVRegVal(K0, MRI)->getValue(); - const APInt &K1_Imm = getConstantIntVRegVal(K1, MRI)->getValue(); - if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_SMED3 && K0_Imm.sgt(K1_Imm)) + if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_SMED3 && K0->Value.sgt(K1->Value)) return false; - if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_UMED3 && K0_Imm.ugt(K1_Imm)) + if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_UMED3 && K0->Value.ugt(K1->Value)) return false; - MatchInfo = {OpcodeTriple.Med, Val, K0, K1}; + MatchInfo = {OpcodeTriple.Med, Val, K0->VReg, K1->VReg}; return true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1336,7 +1336,7 @@ const LLT S32 = LLT::scalar(32); MachineRegisterInfo *MRI = B.getMRI(); - if (Optional Imm = getConstantVRegSExtVal(CombinedOffset, *MRI)) { + if (Optional Imm = getIConstantVRegSExtVal(CombinedOffset, *MRI)) { uint32_t SOffset, ImmOffset; if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget, Alignment)) { @@ -1569,7 +1569,7 @@ // A 64-bit bitfield extract uses the 32-bit bitfield extract instructions // if the width is a constant. - if (auto ConstWidth = getConstantVRegValWithLookThrough(WidthReg, MRI)) { + if (auto ConstWidth = getIConstantVRegValWithLookThrough(WidthReg, MRI)) { // Use the 32-bit bitfield extract instruction if the width is a constant. // Depending on the width size, use either the low or high 32-bits. auto Zero = B.buildConstant(S32, 0); diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp --- a/llvm/lib/Target/X86/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -479,7 +479,7 @@ "unsupported type."); if (I.getOpcode() == TargetOpcode::G_PTR_ADD) { - if (auto COff = getConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) { + if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) { int64_t Imm = *COff; if (isInt<32>(Imm)) { // Check for displacement overflow. AM.Disp = static_cast(Imm); @@ -1065,7 +1065,7 @@ return false; Opcode = X86::ADC32rr; - } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) { + } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) { // carry is constant, support only 0. if (*val != 0) return false; diff --git a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp --- a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -45,10 +45,10 @@ if (!TM) return; auto MIBCst = B.buildConstant(LLT::scalar(64), 42); - Register Src0; - bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Src0)); + Optional Src0; + bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0)); EXPECT_TRUE(match); - EXPECT_EQ(Src0, MIBCst.getReg(0)); + EXPECT_EQ(Src0->VReg, MIBCst.getReg(0)); } TEST_F(AArch64GISelMITest, MachineInstrPtrBind) { @@ -555,6 +555,25 @@ EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt())); } +TEST_F(AArch64GISelMITest, MatchFPOrIntConst) { + setUp(); + if (!TM) + return; + + Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0); + Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0); + Optional ValReg; + Optional FValReg; + + EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg))); + EXPECT_EQ(IntOne, ValReg->VReg); + EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg))); + + EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg))); + EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg))); + EXPECT_EQ(FPOne, FValReg->VReg); +} + TEST_F(AArch64GISelMITest, MatchNeg) { setUp(); if (!TM)