Index: llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h +++ llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h @@ -63,7 +63,7 @@ int64_t &CR; ConstantMatch(int64_t &C) : CR(C) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { - if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) { + if (auto MaybeCst = getGConstantVRegSExtVal(Reg, MRI)) { CR = *MaybeCst; return true; } @@ -73,21 +73,31 @@ inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); } -struct ICstRegMatch { - Register &CR; - ICstRegMatch(Register &C) : CR(C) {} +struct GCstAndRegMatch { + Optional &ValReg; + GCstAndRegMatch(Optional &ValReg) : ValReg(ValReg) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { - if (auto MaybeCst = getConstantVRegValWithLookThrough( - Reg, MRI, /*LookThroughInstrs*/ true, - /*HandleFConstants*/ false)) { - CR = MaybeCst->VReg; - return true; - } - return false; + ValReg = getGCstVRegValWithLookThrough(Reg, MRI); + return ValReg ? true : false; + } +}; + +inline GCstAndRegMatch m_GCst(Optional &ValReg) { + return GCstAndRegMatch(ValReg); +} + +struct GFCstAndRegMatch { + Optional &FPValReg; + GFCstAndRegMatch(Optional &FPValReg) : FPValReg(FPValReg) {} + bool match(const MachineRegisterInfo &MRI, Register Reg) { + FPValReg = getGFCstVRegValWithLookThrough(Reg, MRI); + return FPValReg ? true : false; } }; -inline ICstRegMatch m_ICst(Register &Reg) { return ICstRegMatch(Reg); } +inline GFCstAndRegMatch m_GFCst(Optional &FPValReg) { + return GFCstAndRegMatch(FPValReg); +} /// Matcher for a specific constant value. struct SpecificConstantMatch { Index: llvm/include/llvm/CodeGen/GlobalISel/Utils.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/Utils.h +++ llvm/include/llvm/CodeGen/GlobalISel/Utils.h @@ -162,13 +162,12 @@ MachineOptimizationRemarkMissed &R); /// If \p VReg is defined by a G_CONSTANT, return the corresponding value. -Optional getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI); +Optional getGConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI); -/// If \p VReg is defined by a G_CONSTANT fits in int64_t -/// returns it. -Optional getConstantVRegSExtVal(Register VReg, - const MachineRegisterInfo &MRI); +/// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it. +Optional getGConstantVRegSExtVal(Register VReg, + const MachineRegisterInfo &MRI); /// Simple struct used to hold a constant integer value and a virtual /// register. @@ -176,22 +175,31 @@ APInt Value; Register VReg; }; -/// If \p VReg is defined by a statically evaluable chain of -/// instructions rooted on a G_F/CONSTANT (\p LookThroughInstrs == true) -/// and that constant fits in int64_t, returns its value as well as the -/// virtual register defined by this G_F/CONSTANT. -/// When \p LookThroughInstrs == false this function behaves like -/// getConstantVRegVal. -/// When \p HandleFConstants == false the function bails on G_FCONSTANTs. -/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as -/// G_SEXT. + +/// If \p VReg is defined by a statically evaluable chain of instructions rooted +/// on a G_CONSTANT returns its APInt value and def register. Optional -getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, +getGCstVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, + bool LookThroughInstrs = true); + +/// If \p VReg is defined by a statically evaluable chain of instructions rooted +/// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register. +Optional +getGOrGFCstVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs = true, - bool HandleFConstants = true, bool LookThroughAnyExt = false); -const ConstantInt *getConstantIntVRegVal(Register VReg, - const MachineRegisterInfo &MRI); + +struct FPValueAndVReg { + APFloat Value; + Register VReg; +}; + +/// If \p VReg is defined by a statically evaluable chain of instructions rooted +/// on a G_FCONSTANT returns its APFloat value and def register. +Optional +getGFCstVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, + bool LookThroughInstrs = true); + const ConstantFP* getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI); Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1186,7 +1186,7 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { MachineRegisterInfo &MRI = *MIB.getMRI(); unsigned NumBits = Ty.getScalarSizeInBits(); - auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + auto ValVRegAndVal = getGCstVRegValWithLookThrough(Val, MRI); if (!Ty.isVector() && ValVRegAndVal) { APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8); APInt SplatVal = APInt::getSplat(NumBits, Scalar); @@ -1239,7 +1239,7 @@ const auto &DstMMO = **MI.memoperands_begin(); MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); - auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); + auto ValVRegAndVal = getGCstVRegValWithLookThrough(Val, MRI); bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0; if (!findGISelOptimalMemOpLowering(MemOps, Limit, @@ -1340,7 +1340,7 @@ bool IsVolatile = MemOp->isVolatile(); // See if this is a constant length copy - auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); + auto LenVRegAndVal = getGCstVRegValWithLookThrough(Len, MRI); // FIXME: support dynamically sized G_MEMCPY_INLINE assert(LenVRegAndVal.hasValue() && "inline memcpy with dynamic size is not yet supported"); @@ -1601,7 +1601,7 @@ } // See if this is a constant length copy - auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); + auto LenVRegAndVal = getGCstVRegValWithLookThrough(Len, MRI); if (!LenVRegAndVal) return false; // Leave it to the legalizer to lower it to a libcall. uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue(); @@ -1713,7 +1713,7 @@ Register Add2 = MI.getOperand(1).getReg(); Register Imm1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); + auto MaybeImmVal = getGCstVRegValWithLookThrough(Imm1, MRI); if (!MaybeImmVal) return false; @@ -1730,7 +1730,7 @@ Register Base = Add2Def->getOperand(1).getReg(); Register Imm2 = Add2Def->getOperand(2).getReg(); - auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); + auto MaybeImm2Val = getGCstVRegValWithLookThrough(Imm2, MRI); if (!MaybeImm2Val) return false; @@ -1769,7 +1769,7 @@ Register Shl2 = MI.getOperand(1).getReg(); Register Imm1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); + auto MaybeImmVal = getGCstVRegValWithLookThrough(Imm1, MRI); if (!MaybeImmVal) return false; @@ -1779,7 +1779,7 @@ Register Base = Shl2Def->getOperand(1).getReg(); Register Imm2 = Shl2Def->getOperand(2).getReg(); - auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); + auto MaybeImm2Val = getGCstVRegValWithLookThrough(Imm2, MRI); if (!MaybeImm2Val) return false; @@ -1863,7 +1863,7 @@ // Find a matching one-use shift by constant. const Register C1 = MI.getOperand(2).getReg(); - auto MaybeImmVal = getConstantVRegValWithLookThrough(C1, MRI); + auto MaybeImmVal = getGCstVRegValWithLookThrough(C1, MRI); if (!MaybeImmVal) return false; @@ -1877,7 +1877,7 @@ // Must be a constant. auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); if (!MaybeImmVal) return false; @@ -1949,7 +1949,7 @@ unsigned &ShiftVal) { assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeImmVal) return false; @@ -1984,7 +1984,7 @@ // TODO: Should handle vector splat. Register RHS = MI.getOperand(2).getReg(); - auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI); + auto MaybeShiftAmtVal = getGCstVRegValWithLookThrough(RHS, MRI); if (!MaybeShiftAmtVal) return false; @@ -2229,7 +2229,7 @@ return false; auto MaybeImmVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeImmVal) return false; @@ -2403,7 +2403,7 @@ Register RHS = MI.getOperand(2).getReg(); MachineRegisterInfo &MRI = Builder.getMF().getRegInfo(); - if (auto RHSCst = getConstantVRegSExtVal(RHS, MRI)) { + if (auto RHSCst = getGConstantVRegSExtVal(RHS, MRI)) { int64_t Cst; if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { NewCst = Cst + *RHSCst; @@ -2634,7 +2634,7 @@ bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) { assert(MI.getOpcode() == TargetOpcode::G_SELECT); if (auto MaybeCstCmp = - getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) { + getGCstVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) { OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2; return true; } @@ -2722,7 +2722,7 @@ if (!MOP.isReg()) return false; // MIPatternMatch doesn't let us look through G_ZEXT etc. - auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI); + auto ValAndVReg = getGCstVRegValWithLookThrough(MOP.getReg(), MRI); return ValAndVReg && ValAndVReg->Value == C; } @@ -3334,7 +3334,7 @@ return false; if (Ty.isPointer()) { - auto ConstVal = getConstantVRegVal(MI.getOperand(1).getReg(), MRI); + auto ConstVal = getGConstantVRegVal(MI.getOperand(1).getReg(), MRI); return ConstVal && *ConstVal == 0; } @@ -3824,7 +3824,7 @@ {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}})) return false; - auto Cst = getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + auto Cst = getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements()) return false; @@ -3897,7 +3897,7 @@ MRI.use_instr_nodbg_end())) { if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT) return false; - auto Cst = getConstantVRegVal(II.getOperand(2).getReg(), MRI); + auto Cst = getGConstantVRegVal(II.getOperand(2).getReg(), MRI); if (!Cst) return false; unsigned Idx = Cst.getValue().getZExtValue(); @@ -4124,10 +4124,10 @@ if (MRI.hasOneNonDBGUse(Src1Reg)) return false; - auto C1 = getConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI); + auto C1 = getGConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI); if (!C1) return false; - auto C2 = getConstantVRegVal(Src2Reg, MRI); + auto C2 = getGConstantVRegVal(Src2Reg, MRI); if (!C2) return false; @@ -4197,7 +4197,7 @@ // Try to match example 1). if (RHS->getOpcode() != TargetOpcode::G_ADD) return false; - auto C2 = getConstantVRegVal(RHS->getOperand(2).getReg(), MRI); + auto C2 = getGConstantVRegVal(RHS->getOperand(2).getReg(), MRI); if (!C2) return false; @@ -4215,10 +4215,10 @@ // Try to match example 2. Register LHSSrc1 = LHS->getOperand(1).getReg(); Register LHSSrc2 = LHS->getOperand(2).getReg(); - auto C1 = getConstantVRegVal(LHSSrc2, MRI); + auto C1 = getGConstantVRegVal(LHSSrc2, MRI); if (!C1) return false; - auto C2 = getConstantVRegVal(Src2Reg, MRI); + auto C2 = getGConstantVRegVal(Src2Reg, MRI); if (!C2) return false; Index: llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp +++ llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -245,7 +245,7 @@ break; } case TargetOpcode::G_CONSTANT: { - auto CstVal = getConstantVRegVal(R, MRI); + auto CstVal = getGConstantVRegVal(R, MRI); if (!CstVal) break; Known = KnownBits::makeConstant(*CstVal); Index: llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp +++ llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -37,7 +37,7 @@ const MachineOperand &MO, int64_t Value, const MachineRegisterInfo &MRI) const { if (MO.isReg() && MO.getReg()) - if (auto VRegVal = getConstantVRegValWithLookThrough(MO.getReg(), MRI)) + if (auto VRegVal = getGCstVRegValWithLookThrough(MO.getReg(), MRI)) return VRegVal->Value.getSExtValue() == Value; return false; } Index: llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -3869,9 +3869,7 @@ // If the index is a constant, we can really break this down as you would // expect, and index into the target size pieces. int64_t IdxVal; - auto MaybeCst = - getConstantVRegValWithLookThrough(Idx, MRI, /*LookThroughInstrs*/ true, - /*HandleFConstants*/ false); + auto MaybeCst = getGCstVRegValWithLookThrough(Idx, MRI); if (MaybeCst) { IdxVal = MaybeCst->Value.getSExtValue(); // Avoid out of bounds indexing the pieces. Index: llvm/lib/CodeGen/GlobalISel/Utils.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -267,10 +267,10 @@ reportGISelFailure(MF, TPC, MORE, R); } -Optional llvm::getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { +Optional llvm::getGConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI) { Optional ValAndVReg = - getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); + getGCstVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); assert((!ValAndVReg || ValAndVReg->VReg == VReg) && "Value found while looking through instrs"); if (!ValAndVReg) @@ -278,41 +278,27 @@ return ValAndVReg->Value; } -Optional llvm::getConstantVRegSExtVal(Register VReg, - const MachineRegisterInfo &MRI) { - Optional Val = getConstantVRegVal(VReg, MRI); +Optional +llvm::getGConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) { + Optional Val = getGConstantVRegVal(VReg, MRI); if (Val && Val->getBitWidth() <= 64) return Val->getSExtValue(); return None; } -Optional llvm::getConstantVRegValWithLookThrough( - Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, - bool HandleFConstant, bool LookThroughAnyExt) { +namespace { + +typedef std::function isOpcodeFn; +typedef std::function(const MachineInstr *MI)> getAPCstFn; + +Optional getConstantVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, isOpcodeFn isConstantOpcode, + getAPCstFn getAPCstValue, bool LookThroughInstrs = true, + bool LookThroughAnyExt = false) { SmallVector, 4> SeenOpcodes; MachineInstr *MI; - auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) { - return Opcode == TargetOpcode::G_CONSTANT || - (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT); - }; - auto GetImmediateValue = [HandleFConstant, - &MRI](const MachineInstr &MI) -> Optional { - const MachineOperand &CstVal = MI.getOperand(1); - if (!CstVal.isImm() && !CstVal.isCImm() && - (!HandleFConstant || !CstVal.isFPImm())) - return None; - if (!CstVal.isFPImm()) { - unsigned BitWidth = - MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); - APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) - : CstVal.getCImm()->getValue(); - assert(Val.getBitWidth() == BitWidth && - "Value bitwidth doesn't match definition type"); - return Val; - } - return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); - }; - while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) && + + while ((MI = MRI.getVRegDef(VReg)) && !isConstantOpcode(MI) && LookThroughInstrs) { switch (MI->getOpcode()) { case TargetOpcode::G_ANYEXT: @@ -339,10 +325,10 @@ return None; } } - if (!MI || !IsConstantOpcode(MI->getOpcode())) + if (!MI || !isConstantOpcode(MI)) return None; - Optional MaybeVal = GetImmediateValue(*MI); + Optional MaybeVal = getAPCstValue(MI); if (!MaybeVal) return None; APInt &Val = *MaybeVal; @@ -365,12 +351,59 @@ return ValueAndVReg{Val, VReg}; } -const ConstantInt *llvm::getConstantIntVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { - MachineInstr *MI = MRI.getVRegDef(VReg); - if (MI->getOpcode() != TargetOpcode::G_CONSTANT) - return nullptr; - return MI->getOperand(1).getCImm(); +bool isGConstant(const MachineInstr *MI) { + return MI->getOpcode() == TargetOpcode::G_CONSTANT; +} + +bool isGFConstant(const MachineInstr *MI) { + return MI->getOpcode() == TargetOpcode::G_FCONSTANT; +} + +bool isGOrGFConstant(const MachineInstr *MI) { + unsigned Opc = MI->getOpcode(); + return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT; +} + +Optional getCImmAsAPInt(const MachineInstr *MI) { + const MachineOperand &CstVal = MI->getOperand(1); + if (CstVal.isCImm()) + return CstVal.getCImm()->getValue(); + return None; +} + +Optional getCImmOrFPImmAsAPInt(const MachineInstr *MI) { + const MachineOperand &CstVal = MI->getOperand(1); + if (CstVal.isCImm()) + return CstVal.getCImm()->getValue(); + if (CstVal.isFPImm()) + return CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); + return None; +} + +} // end anonymous namespace + +Optional llvm::getGCstVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { + return getConstantVRegValWithLookThrough(VReg, MRI, isGConstant, + getCImmAsAPInt, LookThroughInstrs); +} + +Optional llvm::getGOrGFCstVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, + bool LookThroughAnyExt) { + return getConstantVRegValWithLookThrough( + VReg, MRI, isGOrGFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs, + LookThroughAnyExt); +} + +Optional llvm::getGFCstVRegValWithLookThrough( + Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) { + auto Reg = getConstantVRegValWithLookThrough( + VReg, MRI, isGFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs); + if (!Reg) + return None; + return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(), + Reg->VReg}; } const ConstantFP * @@ -437,16 +470,16 @@ Optional llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI) { - auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI); + auto MaybeOp2Cst = getGOrGFCstVRegValWithLookThrough(Op2, MRI, false); if (!MaybeOp2Cst) return None; - auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + auto MaybeOp1Cst = getGOrGFCstVRegValWithLookThrough(Op1, MRI, false); if (!MaybeOp1Cst) return None; - const APInt &C1 = *MaybeOp1Cst; - const APInt &C2 = *MaybeOp2Cst; + const APInt &C1 = MaybeOp1Cst->Value; + const APInt &C2 = MaybeOp2Cst->Value; switch (Opcode) { default: break; @@ -659,7 +692,7 @@ Optional llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI) { - auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); + auto MaybeOp1Cst = getGConstantVRegVal(Op1, MRI); if (MaybeOp1Cst) { switch (Opcode) { default: @@ -694,7 +727,7 @@ // shifting the bit off the end is undefined. // TODO: Constant splat - if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { + if (auto ConstLHS = getGConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { if (*ConstLHS == 1) return true; } @@ -702,7 +735,7 @@ break; } case TargetOpcode::G_LSHR: { - if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { + if (auto ConstLHS = getGConstantVRegVal(MI.getOperand(1).getReg(), MRI)) { if (ConstLHS->isSignMask()) return true; } @@ -724,7 +757,7 @@ // zeros is greater than the truncation amount. const unsigned BitWidth = Ty.getScalarSizeInBits(); for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { - auto Const = getConstantVRegVal(MI.getOperand(I).getReg(), MRI); + auto Const = getGConstantVRegVal(MI.getOperand(I).getReg(), MRI); if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2()) return false; } Index: llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp @@ -26,7 +26,7 @@ return None; Register Src = MI.getOperand(1).getReg(); if (auto ValAndVReg = - getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) + getGOrGFCstVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) return RegOrConstant(ValAndVReg->Value.getSExtValue()); return RegOrConstant(Src); } @@ -56,7 +56,7 @@ !CmpInst::isEquality(Pred)) return false; auto MaybeZero = - getConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI); + getGCstVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI); return MaybeZero && MaybeZero->Value.getZExtValue() == 0; } @@ -68,7 +68,7 @@ auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); if (!TLI.getLibcallName(RTLIB::BZERO)) return false; - auto Zero = getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI); + auto Zero = getGCstVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI); if (!Zero || Zero->Value.getSExtValue() != 0) return false; @@ -79,7 +79,7 @@ // If the size is known, check it. If it is not known, assume using bzero is // better. if (auto Size = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI)) { + getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI)) { if (Size->Value.getSExtValue() <= 256) return false; } Index: llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -599,8 +599,7 @@ else if (Root.isCImm()) Immed = Root.getCImm()->getZExtValue(); else if (Root.isReg()) { - auto ValAndVReg = - getConstantVRegValWithLookThrough(Root.getReg(), MRI, true); + auto ValAndVReg = getGCstVRegValWithLookThrough(Root.getReg(), MRI, true); if (!ValAndVReg) return None; Immed = ValAndVReg->Value.getSExtValue(); @@ -1143,8 +1142,8 @@ &Optimized]() { if (Optimized) return false; - auto TrueCst = getConstantVRegValWithLookThrough(True, MRI); - auto FalseCst = getConstantVRegValWithLookThrough(False, MRI); + auto TrueCst = getGCstVRegValWithLookThrough(True, MRI); + auto FalseCst = getGCstVRegValWithLookThrough(False, MRI); if (!TrueCst && !FalseCst) return false; @@ -1282,13 +1281,13 @@ case TargetOpcode::G_XOR: { TestReg = MI->getOperand(1).getReg(); Register ConstantReg = MI->getOperand(2).getReg(); - auto VRegAndVal = getConstantVRegValWithLookThrough(ConstantReg, MRI); + auto VRegAndVal = getGCstVRegValWithLookThrough(ConstantReg, MRI); if (!VRegAndVal) { // AND commutes, check the other side for a constant. // FIXME: Can we canonicalize the constant so that it's always on the // same side at some point earlier? std::swap(ConstantReg, TestReg); - VRegAndVal = getConstantVRegValWithLookThrough(ConstantReg, MRI); + VRegAndVal = getGCstVRegValWithLookThrough(ConstantReg, MRI); } if (VRegAndVal) C = VRegAndVal->Value.getSExtValue(); @@ -1299,7 +1298,7 @@ case TargetOpcode::G_SHL: { TestReg = MI->getOperand(1).getReg(); auto VRegAndVal = - getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); if (VRegAndVal) C = VRegAndVal->Value.getSExtValue(); break; @@ -1427,8 +1426,8 @@ // Check if the AND has a constant on its RHS which we can use as a mask. // If it's a power of 2, then it's the same as checking a specific bit. // (e.g, ANDing with 8 == ANDing with 000...100 == testing if bit 3 is set) - auto MaybeBit = getConstantVRegValWithLookThrough( - AndInst.getOperand(2).getReg(), *MIB.getMRI()); + auto MaybeBit = getGCstVRegValWithLookThrough(AndInst.getOperand(2).getReg(), + *MIB.getMRI()); if (!MaybeBit) return false; @@ -1503,7 +1502,7 @@ Register RHS = ICmp.getOperand(3).getReg(); // We're allowed to emit a TB(N)Z/CB(N)Z. Try to do that. - auto VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI); + auto VRegAndVal = getGCstVRegValWithLookThrough(RHS, MRI); MachineInstr *AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI); // When we can emit a TB(N)Z, prefer that. @@ -1538,7 +1537,7 @@ if (ICmpInst::isEquality(Pred)) { if (!VRegAndVal) { std::swap(RHS, LHS); - VRegAndVal = getConstantVRegValWithLookThrough(RHS, MRI); + VRegAndVal = getGCstVRegValWithLookThrough(RHS, MRI); AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI); } @@ -1997,7 +1996,7 @@ // selector which will match the register variant. assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op"); const auto &MO = I.getOperand(2); - auto VRegAndVal = getConstantVRegVal(MO.getReg(), MRI); + auto VRegAndVal = getGConstantVRegVal(MO.getReg(), MRI); if (!VRegAndVal) return false; @@ -2079,7 +2078,7 @@ // Before selecting a DUP instruction, check if it is better selected as a // MOV or load from a constant pool. Register Src = I.getOperand(1).getReg(); - auto ValAndVReg = getConstantVRegValWithLookThrough(Src, MRI); + auto ValAndVReg = getGCstVRegValWithLookThrough(Src, MRI); if (!ValAndVReg) return false; LLVMContext &Ctx = MF.getFunction().getContext(); @@ -2299,11 +2298,9 @@ bool IsSigned = Opcode == TargetOpcode::G_SBFX; unsigned Size = Ty.getSizeInBits(); unsigned Opc = OpcTable[IsSigned][Size == 64]; - auto Cst1 = - getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), MRI); + auto Cst1 = getGCstVRegValWithLookThrough(I.getOperand(2).getReg(), MRI); assert(Cst1 && "Should have gotten a constant for src 1?"); - auto Cst2 = - getConstantVRegValWithLookThrough(I.getOperand(3).getReg(), MRI); + auto Cst2 = getGCstVRegValWithLookThrough(I.getOperand(3).getReg(), MRI); assert(Cst2 && "Should have gotten a constant for src 2?"); auto LSB = Cst1->Value.getZExtValue(); auto Width = Cst2->Value.getZExtValue(); @@ -2723,9 +2720,8 @@ // If we're storing a 0, use WZR/XZR. if (Opcode == TargetOpcode::G_STORE) { - auto CVal = getConstantVRegValWithLookThrough( - LoadStore->getOperand(0).getReg(), MRI, /*LookThroughInstrs = */ true, - /*HandleFConstants = */ false); + auto CVal = + getGCstVRegValWithLookThrough(LoadStore->getOperand(0).getReg(), MRI); if (CVal && CVal->Value == 0) { switch (LoadStore->getOpcode()) { case AArch64::STRWui: @@ -2855,7 +2851,7 @@ case TargetOpcode::G_PTRMASK: { Register MaskReg = I.getOperand(2).getReg(); - Optional MaskVal = getConstantVRegSExtVal(MaskReg, MRI); + Optional MaskVal = getGConstantVRegSExtVal(MaskReg, MRI); // TODO: Implement arbitrary cases if (!MaskVal || !isShiftedMask_64(*MaskVal)) return false; @@ -3890,7 +3886,7 @@ } // Find the index to extract from. - auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI); + auto VRegAndVal = getGCstVRegValWithLookThrough(LaneIdxOp.getReg(), MRI); if (!VRegAndVal) return false; unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); @@ -4267,7 +4263,7 @@ {AArch64::ANDSXrr, AArch64::ANDSWrr}}; // ANDS needs a logical immediate for its immediate form. Check if we can // fold one in. - if (auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI)) { + if (auto ValAndVReg = getGCstVRegValWithLookThrough(RHS.getReg(), MRI)) { int64_t Imm = ValAndVReg->Value.getSExtValue(); if (AArch64_AM::isLogicalImmediate(Imm, RegSize)) { @@ -4612,7 +4608,7 @@ if (!CmpInst::isUnsigned(P) && LHSDef && LHSDef->getOpcode() == TargetOpcode::G_AND) { // Make sure that the RHS is 0. - auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI); + auto ValAndVReg = getGCstVRegValWithLookThrough(RHS.getReg(), MRI); if (!ValAndVReg || ValAndVReg->Value != 0) return nullptr; @@ -4757,7 +4753,7 @@ // Find the definition of the index. Bail out if it's not defined by a // G_CONSTANT. Register IdxReg = I.getOperand(3).getReg(); - auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI); + auto VRegAndVal = getGCstVRegValWithLookThrough(IdxReg, MRI); if (!VRegAndVal) return false; unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); @@ -5311,7 +5307,7 @@ // constant is the RHS. Register OffsetReg = OffsetInst->getOperand(1).getReg(); Register ConstantReg = OffsetInst->getOperand(2).getReg(); - auto ValAndVReg = getConstantVRegValWithLookThrough(ConstantReg, MRI); + auto ValAndVReg = getGCstVRegValWithLookThrough(ConstantReg, MRI); if (!ValAndVReg) { // We didn't get a constant on the RHS. If the opcode is a shift, then // we're done. @@ -5320,7 +5316,7 @@ // If we have a G_MUL, we can use either register. Try looking at the RHS. std::swap(OffsetReg, ConstantReg); - ValAndVReg = getConstantVRegValWithLookThrough(ConstantReg, MRI); + ValAndVReg = getGCstVRegValWithLookThrough(ConstantReg, MRI); if (!ValAndVReg) return None; } @@ -5488,7 +5484,7 @@ // mov x0, wide // ldr x2, [base, x0] auto ValAndVReg = - getConstantVRegValWithLookThrough(PtrAdd->getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(PtrAdd->getOperand(2).getReg(), MRI); if (ValAndVReg) { unsigned Scale = Log2_32(SizeInBytes); int64_t ImmOff = ValAndVReg->Value.getSExtValue(); @@ -5953,7 +5949,7 @@ assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT"); Optional CstVal = - getConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI); + getGConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI); assert(CstVal && "Expected constant value"); MIB.addImm(CstVal.getValue()); } Index: llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -941,7 +941,7 @@ // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the // imported patterns can select it later. Either way, it will be legal. Register AmtReg = MI.getOperand(2).getReg(); - auto VRegAndVal = getConstantVRegValWithLookThrough(AmtReg, MRI); + auto VRegAndVal = getGCstVRegValWithLookThrough(AmtReg, MRI); if (!VRegAndVal) return true; // Check the shift amount is in range for an immediate form. @@ -1044,8 +1044,8 @@ MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const { // Only legal if we can select immediate forms. // TODO: Lower this otherwise. - return getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) && - getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); + return getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) && + getGCstVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); } bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI, Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -54,7 +54,7 @@ Register Src2 = MI.getOperand(2).getReg(); LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); - auto Cst = getConstantVRegValWithLookThrough(Src2, MRI); + auto Cst = getGCstVRegValWithLookThrough(Src2, MRI); if (!Cst || Cst->Value != 0) return false; // SDAG also checks for FullFP16, but this looks to be beneficial anyway. @@ -128,7 +128,7 @@ const LLT Ty = MRI.getType(LHS); // The below optimizations require a constant RHS. - auto Const = getConstantVRegValWithLookThrough(RHS, MRI); + auto Const = getGCstVRegValWithLookThrough(RHS, MRI); if (!Const) return false; Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp @@ -527,7 +527,7 @@ // If the RHS is not a constant, or the RHS is already a valid arithmetic // immediate, then there is nothing to change. - auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI); + auto ValAndVReg = getGCstVRegValWithLookThrough(RHS, MRI); if (!ValAndVReg) return None; uint64_t C = ValAndVReg->Value.getZExtValue(); @@ -757,7 +757,7 @@ if (MI.getOpcode() != TargetOpcode::G_AND) return false; auto ValAndVReg = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!ValAndVReg) return false; uint64_t Mask = ValAndVReg->Value.getZExtValue(); @@ -774,7 +774,7 @@ return 0; auto MaybeShiftAmt = - getConstantVRegValWithLookThrough(Def->getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(Def->getOperand(2).getReg(), MRI); if (!MaybeShiftAmt) return 0; uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue(); @@ -814,7 +814,7 @@ // Don't swap if there's a constant on the RHS, because we know we can fold // that. Register RHS = MI.getOperand(3).getReg(); - auto RHSCst = getConstantVRegValWithLookThrough(RHS, MRI); + auto RHSCst = getGCstVRegValWithLookThrough(RHS, MRI); if (RHSCst && isLegalArithImmed(RHSCst->Value.getSExtValue())) return false; Index: llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp @@ -147,7 +147,7 @@ if (UseInstr.getOpcode() != TargetOpcode::G_PTR_ADD) return false; auto Cst = - getConstantVRegValWithLookThrough(UseInstr.getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(UseInstr.getOperand(2).getReg(), MRI); if (!Cst) return false; MinOffset = std::min(MinOffset, Cst->Value.getZExtValue()); Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -140,7 +140,7 @@ = TRI.getConstrainedRegClassForOperand(Src, *MRI); Optional ConstVal = - getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true); + getGCstVRegValWithLookThrough(SrcReg, *MRI, true); if (ConstVal) { unsigned MovOpc = STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; @@ -608,11 +608,9 @@ const DebugLoc &DL = MI.getDebugLoc(); MachineBasicBlock *BB = MI.getParent(); - auto ConstSrc1 = - getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true); + auto ConstSrc1 = getGOrGFCstVRegValWithLookThrough(Src1, *MRI, true, true); if (ConstSrc1) { - auto ConstSrc0 = - getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true); + auto ConstSrc0 = getGOrGFCstVRegValWithLookThrough(Src0, *MRI, true, true); if (ConstSrc0) { const int64_t K0 = ConstSrc0->Value.getSExtValue(); const int64_t K1 = ConstSrc1->Value.getSExtValue(); @@ -844,7 +842,7 @@ auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); Optional ConstSelect = - getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true); + getGCstVRegValWithLookThrough(LaneSelect, *MRI); if (ConstSelect) { // The selector has to be an inline immediate, so we can use whatever for // the other operands. @@ -852,8 +850,7 @@ MIB.addImm(ConstSelect->Value.getSExtValue() & maskTrailingOnes(STI.getWavefrontSizeLog2())); } else { - Optional ConstVal = - getConstantVRegValWithLookThrough(Val, *MRI, true, true); + Optional ConstVal = getGCstVRegValWithLookThrough(Val, *MRI); // If the value written is an inline immediate, we can get away without a // copy to m0. @@ -1130,7 +1127,7 @@ return false; Optional Arg = - getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true); + getGCstVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); if (Arg.hasValue()) { const int64_t Value = Arg.getValue().Value.getSExtValue(); @@ -3977,8 +3974,8 @@ return {Root, 0}; MachineOperand &RHS = RootI->getOperand(2); - Optional MaybeOffset - = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true); + Optional MaybeOffset = + getGCstVRegValWithLookThrough(RHS.getReg(), MRI); if (!MaybeOffset) return {Root, 0}; return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; @@ -4306,8 +4303,8 @@ /// Get an immediate that must be 32-bits, and treated as zero extended. static Optional getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) { - // getConstantVRegVal sexts any values, so see if that matters. - Optional OffsetVal = getConstantVRegSExtVal(Reg, MRI); + // getGConstantVRegVal sexts any values, so see if that matters. + Optional OffsetVal = getGConstantVRegSExtVal(Reg, MRI); if (!OffsetVal || !isInt<32>(*OffsetVal)) return None; return Lo_32(*OffsetVal); Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -2183,9 +2183,9 @@ // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need - // getConstantVRegValWithLookThrough. + // getGCstVRegValWithLookThrough. Optional MaybeIdxVal = - getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + getGCstVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; const int64_t IdxVal = MaybeIdxVal->Value.getSExtValue(); @@ -2215,9 +2215,9 @@ // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need - // getConstantVRegValWithLookThrough. + // getGCstVRegValWithLookThrough. Optional MaybeIdxVal = - getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); + getGCstVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; @@ -2748,7 +2748,7 @@ static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) { if (MI.getOpcode() != TargetOpcode::G_XOR) return false; - auto ConstVal = getConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI); + auto ConstVal = getGConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI); return ConstVal && *ConstVal == -1; } Index: llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp @@ -57,9 +57,9 @@ MinMaxMedOpc getMinMaxPair(unsigned Opc); - template + template bool matchMed(MachineInstr &MI, MachineRegisterInfo &MRI, MinMaxMedOpc MMMOpc, - Register &Val, Register &K0, Register &K1); + Register &Val, CstTy &K0, CstTy &K1); bool matchIntMinMaxToMed3(MachineInstr &MI, Med3MatchInfo &MatchInfo); void applyMed3(MachineInstr &MI, Med3MatchInfo &MatchInfo); @@ -83,11 +83,11 @@ } } -template +template bool AMDGPURegBankCombinerHelper::matchMed(MachineInstr &MI, MachineRegisterInfo &MRI, MinMaxMedOpc MMMOpc, Register &Val, - Register &K0, Register &K1) { + CstTy &K0, CstTy &K1) { // 4 operand commutes of: min(max(Val, K0), K1). // Find K1 from outer instr: min(max(...), K1) or min(K1, max(...)). // Find K0 and Val from inner instr: max(K0, Val) or max(Val, K0). @@ -115,19 +115,18 @@ return false; MinMaxMedOpc OpcodeTriple = getMinMaxPair(MI.getOpcode()); - Register Val, K0, K1; + Register Val; + Optional K0, K1; // Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1. - if (!matchMed(MI, MRI, OpcodeTriple, Val, K0, K1)) + if (!matchMed(MI, MRI, OpcodeTriple, Val, K0, K1)) return false; - const APInt &K0_Imm = getConstantIntVRegVal(K0, MRI)->getValue(); - const APInt &K1_Imm = getConstantIntVRegVal(K1, MRI)->getValue(); - if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_SMED3 && K0_Imm.sgt(K1_Imm)) + if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_SMED3 && K0->Value.sgt(K1->Value)) return false; - if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_UMED3 && K0_Imm.ugt(K1_Imm)) + if (OpcodeTriple.Med == AMDGPU::G_AMDGPU_UMED3 && K0->Value.ugt(K1->Value)) return false; - MatchInfo = {OpcodeTriple.Med, Val, K0, K1}; + MatchInfo = {OpcodeTriple.Med, Val, K0->VReg, K1->VReg}; return true; } Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1336,7 +1336,7 @@ const LLT S32 = LLT::scalar(32); MachineRegisterInfo *MRI = B.getMRI(); - if (Optional Imm = getConstantVRegSExtVal(CombinedOffset, *MRI)) { + if (Optional Imm = getGConstantVRegSExtVal(CombinedOffset, *MRI)) { uint32_t SOffset, ImmOffset; if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget, Alignment)) { @@ -1569,7 +1569,7 @@ // A 64-bit bitfield extract uses the 32-bit bitfield extract instructions // if the width is a constant. - if (auto ConstWidth = getConstantVRegValWithLookThrough(WidthReg, MRI)) { + if (auto ConstWidth = getGCstVRegValWithLookThrough(WidthReg, MRI)) { // Use the 32-bit bitfield extract instruction if the width is a constant. // Depending on the width size, use either the low or high 32-bits. auto Zero = B.buildConstant(S32, 0); Index: llvm/lib/Target/X86/X86InstructionSelector.cpp =================================================================== --- llvm/lib/Target/X86/X86InstructionSelector.cpp +++ llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -479,7 +479,7 @@ "unsupported type."); if (I.getOpcode() == TargetOpcode::G_PTR_ADD) { - if (auto COff = getConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) { + if (auto COff = getGConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) { int64_t Imm = *COff; if (isInt<32>(Imm)) { // Check for displacement overflow. AM.Disp = static_cast(Imm); @@ -1065,7 +1065,7 @@ return false; Opcode = X86::ADC32rr; - } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) { + } else if (auto val = getGConstantVRegVal(CarryInReg, MRI)) { // carry is constant, support only 0. if (*val != 0) return false; Index: llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp =================================================================== --- llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -45,10 +45,10 @@ if (!TM) return; auto MIBCst = B.buildConstant(LLT::scalar(64), 42); - Register Src0; - bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Src0)); + Optional Src0; + bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0)); EXPECT_TRUE(match); - EXPECT_EQ(Src0, MIBCst.getReg(0)); + EXPECT_EQ(Src0->VReg, MIBCst.getReg(0)); } TEST_F(AArch64GISelMITest, MachineInstrPtrBind) { @@ -555,6 +555,25 @@ EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_AllOnesInt())); } +TEST_F(AArch64GISelMITest, MatchFPOrIntConst) { + setUp(); + if (!TM) + return; + + Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0); + Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0); + Optional ValReg; + Optional FValReg; + + EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg))); + EXPECT_EQ(IntOne, ValReg->VReg); + EXPECT_FALSE(mi_match(IntOne, *MRI, m_GFCst(FValReg))); + + EXPECT_FALSE(mi_match(FPOne, *MRI, m_GCst(ValReg))); + EXPECT_TRUE(mi_match(FPOne, *MRI, m_GFCst(FValReg))); + EXPECT_EQ(FPOne, FValReg->VReg); +} + TEST_F(AArch64GISelMITest, MatchNeg) { setUp(); if (!TM)