Index: llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h +++ llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h @@ -43,7 +43,7 @@ int64_t &CR; ConstantMatch(int64_t &C) : CR(C) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { - if (auto MaybeCst = getConstantVRegVal(Reg, MRI)) { + if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) { CR = *MaybeCst; return true; } Index: llvm/include/llvm/CodeGen/GlobalISel/Utils.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/Utils.h +++ llvm/include/llvm/CodeGen/GlobalISel/Utils.h @@ -121,14 +121,19 @@ MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R); +/// If \p VReg is defined by a G_CONSTANT, return the corresponding value. +Optional getConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI); + /// If \p VReg is defined by a G_CONSTANT fits in int64_t /// returns it. -Optional getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI); +Optional getConstantVRegSExtVal(Register VReg, + const MachineRegisterInfo &MRI); + /// Simple struct used to hold a constant integer value and a virtual /// register. struct ValueAndVReg { - int64_t Value; + APInt Value; Register VReg; }; /// If \p VReg is defined by a statically evaluable chain of Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1029,8 +1029,7 @@ unsigned NumBits = Ty.getScalarSizeInBits(); auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); if (!Ty.isVector() && ValVRegAndVal) { - unsigned KnownVal = ValVRegAndVal->Value; - APInt Scalar = APInt(8, KnownVal); + APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8); APInt SplatVal = APInt::getSplat(NumBits, Scalar); return MIB.buildConstant(Ty, SplatVal).getReg(0); } @@ -1411,7 +1410,7 @@ auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); if (!LenVRegAndVal) return false; // Leave it to the legalizer to lower it to a libcall. - unsigned KnownLen = LenVRegAndVal->Value; + unsigned KnownLen = LenVRegAndVal->Value.getZExtValue(); if (KnownLen == 0) { MI.eraseFromParent(); @@ -1521,7 +1520,7 @@ return false; // Pass the combined immediate to the apply function. - MatchInfo.Imm = MaybeImmVal->Value + MaybeImm2Val->Value; + MatchInfo.Imm = (MaybeImmVal->Value + MaybeImm2Val->Value).getSExtValue(); MatchInfo.Base = Base; return true; } @@ -1571,7 +1570,7 @@ return false; // Pass the combined immediate to the apply function. - MatchInfo.Imm = MaybeImmVal->Value + MaybeImm2Val->Value; + MatchInfo.Imm = (MaybeImmVal->Value + MaybeImm2Val->Value).getSExtValue(); MatchInfo.Reg = Base; // There is no simple replacement for a saturating unsigned left shift that @@ -1654,7 +1653,7 @@ if (!MaybeImmVal) return false; - const uint64_t C1Val = MaybeImmVal->Value; + const uint64_t C1Val = MaybeImmVal->Value.getZExtValue(); auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) { // Shift should match previous one and should be a one-use. @@ -1668,7 +1667,7 @@ if (!MaybeImmVal) return false; - ShiftVal = MaybeImmVal->Value; + ShiftVal = MaybeImmVal->Value.getSExtValue(); return true; }; @@ -1738,10 +1737,11 @@ assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); auto MaybeImmVal = getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); - if (!MaybeImmVal || !isPowerOf2_64(MaybeImmVal->Value)) + if (!MaybeImmVal) return false; - ShiftVal = Log2_64(MaybeImmVal->Value); - return true; + + ShiftVal = MaybeImmVal->Value.exactLogBase2(); + return (static_cast(ShiftVal) != -1); } bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI, @@ -1787,7 +1787,7 @@ return false; } - int64_t ShiftAmt = MaybeShiftAmtVal->Value; + int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue(); MatchData.Reg = ExtSrc; MatchData.Imm = ShiftAmt; @@ -2026,7 +2026,7 @@ if (!MaybeImmVal) return false; - ShiftVal = MaybeImmVal->Value; + ShiftVal = MaybeImmVal->Value.getSExtValue(); return ShiftVal >= Size / 2 && ShiftVal < Size; } @@ -2200,7 +2200,7 @@ Register RHS = MI.getOperand(2).getReg(); MachineRegisterInfo &MRI = Builder.getMF().getRegInfo(); - if (auto RHSCst = getConstantVRegVal(RHS, MRI)) { + if (auto RHSCst = getConstantVRegSExtVal(RHS, MRI)) { int64_t Cst; if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { NewCst = Cst + *RHSCst; @@ -2441,7 +2441,7 @@ assert(MI.getOpcode() == TargetOpcode::G_SELECT); if (auto MaybeCstCmp = getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) { - OpIdx = MaybeCstCmp->Value ? 2 : 3; + OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2; return true; } return false; Index: llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp +++ llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp @@ -50,7 +50,7 @@ const MachineRegisterInfo &MRI) const { if (MO.isReg() && MO.getReg()) if (auto VRegVal = getConstantVRegValWithLookThrough(MO.getReg(), MRI)) - return VRegVal->Value == Value; + return VRegVal->Value.getSExtValue() == Value; return false; } Index: llvm/lib/CodeGen/GlobalISel/Utils.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -255,8 +255,8 @@ reportGISelFailure(MF, TPC, MORE, R); } -Optional llvm::getConstantVRegVal(Register VReg, - const MachineRegisterInfo &MRI) { +Optional llvm::getConstantVRegVal(Register VReg, + const MachineRegisterInfo &MRI) { Optional ValAndVReg = getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); assert((!ValAndVReg || ValAndVReg->VReg == VReg) && @@ -266,6 +266,17 @@ return ValAndVReg->Value; } +Optional llvm::getConstantVRegSExtVal(Register VReg, + const MachineRegisterInfo &MRI) { + Optional ValAndVReg = + getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false); + assert((!ValAndVReg || ValAndVReg->VReg == VReg) && + "Value found while looking through instrs"); + if (!ValAndVReg || ValAndVReg->Value.getBitWidth() > 64) + return None; + return ValAndVReg->Value.getSExtValue(); +} + Optional llvm::getConstantVRegValWithLookThrough( Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs, bool HandleFConstant) { @@ -337,10 +348,7 @@ } } - if (Val.getBitWidth() > 64) - return None; - - return ValueAndVReg{Val.getSExtValue(), VReg}; + return ValueAndVReg{Val, VReg}; } const ConstantFP * @@ -413,9 +421,8 @@ if (!MaybeOp1Cst) return None; - LLT Ty = MRI.getType(Op1); - APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); - APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true); + const APInt &C1 = *MaybeOp1Cst; + const APInt &C2 = *MaybeOp2Cst; switch (Opcode) { default: break; @@ -535,13 +542,13 @@ const MachineRegisterInfo &MRI) { auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI); if (MaybeOp1Cst) { - LLT Ty = MRI.getType(Op1); - APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); switch (Opcode) { default: break; - case TargetOpcode::G_SEXT_INREG: - return C1.trunc(Imm).sext(C1.getBitWidth()); + case TargetOpcode::G_SEXT_INREG: { + LLT Ty = MRI.getType(Op1); + return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits()); + } } } return None; Index: llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -574,7 +574,7 @@ getConstantVRegValWithLookThrough(Root.getReg(), MRI, true); if (!ValAndVReg) return None; - Immed = ValAndVReg->Value; + Immed = ValAndVReg->Value.getSExtValue(); } else return None; return Immed; @@ -1109,8 +1109,8 @@ Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR; if (TrueCst && FalseCst) { - auto T = TrueCst->Value; - auto F = FalseCst->Value; + int64_t T = TrueCst->Value.getSExtValue(); + int64_t F = FalseCst->Value.getSExtValue(); if (T == 0 && F == 1) { // G_SELECT cc, 0, 1 -> CSINC zreg, zreg, cc @@ -1130,7 +1130,7 @@ } if (TrueCst) { - auto T = TrueCst->Value; + int64_t T = TrueCst->Value.getSExtValue(); if (T == 1) { // G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr; @@ -1151,7 +1151,7 @@ } if (FalseCst) { - auto F = FalseCst->Value; + int64_t F = FalseCst->Value.getSExtValue(); if (F == 1) { // G_SELECT cc, t, 1 -> CSINC t, zreg, cc Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr; @@ -1304,7 +1304,7 @@ VRegAndVal = getConstantVRegValWithLookThrough(ConstantReg, MRI); } if (VRegAndVal) - C = VRegAndVal->Value; + C = VRegAndVal->Value.getSExtValue(); break; } case TargetOpcode::G_ASHR: @@ -1314,7 +1314,7 @@ auto VRegAndVal = getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); if (VRegAndVal) - C = VRegAndVal->Value; + C = VRegAndVal->Value.getSExtValue(); break; } } @@ -1442,10 +1442,13 @@ // (e.g, ANDing with 8 == ANDing with 000...100 == testing if bit 3 is set) auto MaybeBit = getConstantVRegValWithLookThrough( AndInst.getOperand(2).getReg(), *MIB.getMRI()); - if (!MaybeBit || !isPowerOf2_64(MaybeBit->Value)) + if (!MaybeBit) + return false; + + int32_t Bit = MaybeBit->Value.exactLogBase2(); + if (Bit < 0) return false; - uint64_t Bit = Log2_64(static_cast(MaybeBit->Value)); Register TestReg = AndInst.getOperand(1).getReg(); // Emit a TB(N)Z. @@ -1522,7 +1525,7 @@ // Note that we don't want to do this when we have a G_AND because it can // become a tst. The tst will make the test bit in the TB(N)Z redundant. if (VRegAndVal && !AndInst) { - int64_t C = VRegAndVal->Value; + int64_t C = VRegAndVal->Value.getSExtValue(); // When we have a greater-than comparison, we can just test if the msb is // zero. @@ -1654,8 +1657,8 @@ return None; if (Idx == 1) - ImmVal = VRegAndVal->Value; - if (ImmVal != VRegAndVal->Value) + ImmVal = VRegAndVal->Value.getSExtValue(); + if (ImmVal != VRegAndVal->Value.getSExtValue()) return None; } @@ -2735,7 +2738,7 @@ case TargetOpcode::G_PTRMASK: { Register MaskReg = I.getOperand(2).getReg(); - Optional MaskVal = getConstantVRegVal(MaskReg, MRI); + Optional MaskVal = getConstantVRegSExtVal(MaskReg, MRI); // TODO: Implement arbitrary cases if (!MaskVal || !isShiftedMask_64(*MaskVal)) return false; @@ -3749,7 +3752,7 @@ auto VRegAndVal = getConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI); if (!VRegAndVal) return false; - unsigned LaneIdx = VRegAndVal->Value; + unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); MachineIRBuilder MIRBuilder(I); @@ -4116,10 +4119,11 @@ // ANDS needs a logical immediate for its immediate form. Check if we can // fold one in. if (auto ValAndVReg = getConstantVRegValWithLookThrough(RHS.getReg(), MRI)) { - if (AArch64_AM::isLogicalImmediate(ValAndVReg->Value, RegSize)) { + int64_t Imm = ValAndVReg->Value.getSExtValue(); + + if (AArch64_AM::isLogicalImmediate(Imm, RegSize)) { auto TstMI = MIRBuilder.buildInstr(OpcTable[0][Is32Bit], {Ty}, {LHS}); - TstMI.addImm( - AArch64_AM::encodeLogicalImmediate(ValAndVReg->Value, RegSize)); + TstMI.addImm(AArch64_AM::encodeLogicalImmediate(Imm, RegSize)); constrainSelectedInstRegOperands(*TstMI, TII, TRI, RBI); return &*TstMI; } @@ -4658,7 +4662,7 @@ auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI); if (!VRegAndVal) return false; - unsigned LaneIdx = VRegAndVal->Value; + unsigned LaneIdx = VRegAndVal->Value.getSExtValue(); // Perform the lane insert. Register SrcReg = I.getOperand(1).getReg(); @@ -5198,7 +5202,7 @@ // The value must fit into 3 bits, and must be positive. Make sure that is // true. - int64_t ImmVal = ValAndVReg->Value; + int64_t ImmVal = ValAndVReg->Value.getSExtValue(); // Since we're going to pull this into a shift, the constant value must be // a power of 2. If we got a multiply, then we need to check this. @@ -5362,7 +5366,7 @@ getConstantVRegValWithLookThrough(PtrAdd->getOperand(2).getReg(), MRI); if (ValAndVReg) { unsigned Scale = Log2_32(SizeInBytes); - int64_t ImmOff = ValAndVReg->Value; + int64_t ImmOff = ValAndVReg->Value.getSExtValue(); // Skip immediates that can be selected in the load/store addresing // mode. @@ -5821,7 +5825,8 @@ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT"); - Optional CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), MRI); + Optional CstVal = + getConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI); assert(CstVal && "Expected constant value"); MIB.addImm(CstVal.getValue()); } Index: llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -837,7 +837,7 @@ if (!VRegAndVal) return true; // Check the shift amount is in range for an immediate form. - int64_t Amount = VRegAndVal->Value; + int64_t Amount = VRegAndVal->Value.getSExtValue(); if (Amount > 31) return true; // This will have to remain a register variant. auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount); Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -130,7 +130,7 @@ if (!Const) return false; - const APInt &ConstValue = APInt(Ty.getSizeInBits(), Const->Value, true); + const APInt ConstValue = Const->Value.sextOrSelf(Ty.getSizeInBits()); // The following code is ported from AArch64ISelLowering. // Multiplication of a power of two plus/minus one can be done more // cheaply as as shift+add/sub. For now, this is true unilaterally. If Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp @@ -438,7 +438,7 @@ auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI); if (!ValAndVReg) return None; - uint64_t C = ValAndVReg->Value; + uint64_t C = ValAndVReg->Value.getZExtValue(); if (isLegalArithImmed(C)) return None; Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -611,8 +611,10 @@ if (ConstSrc1) { auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true); if (ConstSrc0) { - uint32_t Lo16 = static_cast(ConstSrc0->Value) & 0xffff; - uint32_t Hi16 = static_cast(ConstSrc1->Value) & 0xffff; + const int64_t K0 = ConstSrc0->Value.getSExtValue(); + const int64_t K1 = ConstSrc1->Value.getSExtValue(); + uint32_t Lo16 = static_cast(K0) & 0xffff; + uint32_t Hi16 = static_cast(K1) & 0xffff; BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) .addImm(Lo16 | (Hi16 << 16)); @@ -820,7 +822,7 @@ // The selector has to be an inline immediate, so we can use whatever for // the other operands. MIB.addReg(Val); - MIB.addImm(ConstSelect->Value & + MIB.addImm(ConstSelect->Value.getSExtValue() & maskTrailingOnes(STI.getWavefrontSizeLog2())); } else { Optional ConstVal = @@ -828,9 +830,9 @@ // If the value written is an inline immediate, we can get away without a // copy to m0. - if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value, + if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), STI.hasInv2PiInlineImm())) { - MIB.addImm(ConstVal->Value); + MIB.addImm(ConstVal->Value.getSExtValue()); MIB.addReg(LaneSelect); } else { MIB.addReg(Val); @@ -1101,7 +1103,7 @@ getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true); if (Arg.hasValue()) { - const int64_t Value = Arg.getValue().Value; + const int64_t Value = Arg.getValue().Value.getSExtValue(); if (Value == 0) { unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); @@ -3430,7 +3432,7 @@ return Default; Optional Offset = - getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI); + getConstantVRegSExtVal(OpDef->getOperand(2).getReg(), *MRI); if (!Offset.hasValue()) return Default; @@ -3858,7 +3860,7 @@ = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true); if (!MaybeOffset) return {Root, 0}; - return {RootI->getOperand(1).getReg(), MaybeOffset->Value}; + return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; } static void addZeroImm(MachineInstrBuilder &MIB) { @@ -4186,7 +4188,7 @@ static Optional getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) { // getConstantVRegVal sexts any values, so see if that matters. - Optional OffsetVal = getConstantVRegVal(Reg, MRI); + Optional OffsetVal = getConstantVRegSExtVal(Reg, MRI); if (!OffsetVal || !isInt<32>(*OffsetVal)) return None; return Lo_32(*OffsetVal); Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -2087,10 +2087,11 @@ // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need // getConstantVRegValWithLookThrough. - Optional IdxVal = getConstantVRegValWithLookThrough( - MI.getOperand(2).getReg(), MRI); - if (!IdxVal) // Dynamic case will be selected to register indexing. + Optional MaybeIdxVal = + getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); + if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; + const int64_t IdxVal = MaybeIdxVal->Value.getSExtValue(); Register Dst = MI.getOperand(0).getReg(); Register Vec = MI.getOperand(1).getReg(); @@ -2099,8 +2100,8 @@ LLT EltTy = VecTy.getElementType(); assert(EltTy == MRI.getType(Dst)); - if (IdxVal->Value < VecTy.getNumElements()) - B.buildExtract(Dst, Vec, IdxVal->Value * EltTy.getSizeInBits()); + if (IdxVal < VecTy.getNumElements()) + B.buildExtract(Dst, Vec, IdxVal * EltTy.getSizeInBits()); else B.buildUndef(Dst); @@ -2118,11 +2119,12 @@ // FIXME: Artifact combiner probably should have replaced the truncated // constant before this, so we shouldn't need // getConstantVRegValWithLookThrough. - Optional IdxVal = getConstantVRegValWithLookThrough( - MI.getOperand(3).getReg(), MRI); - if (!IdxVal) // Dynamic case will be selected to register indexing. + Optional MaybeIdxVal = + getConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI); + if (!MaybeIdxVal) // Dynamic case will be selected to register indexing. return true; + int64_t IdxVal = MaybeIdxVal->Value.getSExtValue(); Register Dst = MI.getOperand(0).getReg(); Register Vec = MI.getOperand(1).getReg(); Register Ins = MI.getOperand(2).getReg(); @@ -2131,8 +2133,8 @@ LLT EltTy = VecTy.getElementType(); assert(EltTy == MRI.getType(Ins)); - if (IdxVal->Value < VecTy.getNumElements()) - B.buildInsert(Dst, Vec, Ins, IdxVal->Value * EltTy.getSizeInBits()); + if (IdxVal < VecTy.getNumElements()) + B.buildInsert(Dst, Vec, Ins, IdxVal * EltTy.getSizeInBits()); else B.buildUndef(Dst); @@ -2643,7 +2645,7 @@ static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) { if (MI.getOpcode() != TargetOpcode::G_XOR) return false; - auto ConstVal = getConstantVRegVal(MI.getOperand(2).getReg(), MRI); + auto ConstVal = getConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI); return ConstVal && *ConstVal == -1; } Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1331,7 +1331,7 @@ const LLT S32 = LLT::scalar(32); MachineRegisterInfo *MRI = B.getMRI(); - if (Optional Imm = getConstantVRegVal(CombinedOffset, *MRI)) { + if (Optional Imm = getConstantVRegSExtVal(CombinedOffset, *MRI)) { uint32_t SOffset, ImmOffset; if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget, Alignment)) { Index: llvm/lib/Target/X86/X86InstructionSelector.cpp =================================================================== --- llvm/lib/Target/X86/X86InstructionSelector.cpp +++ llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -479,7 +479,7 @@ "unsupported type."); if (I.getOpcode() == TargetOpcode::G_PTR_ADD) { - if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) { + if (auto COff = getConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) { int64_t Imm = *COff; if (isInt<32>(Imm)) { // Check for displacement overflow. AM.Disp = static_cast(Imm);