diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h --- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h @@ -466,7 +466,7 @@ virtual ~GIMatchTableExecutor() = default; CodeGenCoverage *CoverageInfo = nullptr; - GISelKnownBits *KnownBits = nullptr; + GISelKnownBits *KB = nullptr; MachineFunction *MF = nullptr; ProfileSummaryInfo *PSI = nullptr; BlockFrequencyInfo *BFI = nullptr; @@ -478,12 +478,12 @@ } /// Setup per-MF executor state. - virtual void setupMF(MachineFunction &mf, GISelKnownBits *KB, + virtual void setupMF(MachineFunction &mf, GISelKnownBits *kb, CodeGenCoverage *covinfo = nullptr, ProfileSummaryInfo *psi = nullptr, BlockFrequencyInfo *bfi = nullptr) { CoverageInfo = covinfo; - KnownBits = KB; + KB = kb; MF = &mf; PSI = psi; BFI = bfi; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -1593,7 +1593,7 @@ .addImm(0); } else { std::tie(BaseOffset, ImmOffset) = - AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KnownBits); + AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KB); if (Readfirstlane) { // We have the constant offset now, so put the readfirstlane back on the @@ -2765,7 +2765,7 @@ // Try to avoid emitting a bit operation when we only need to touch half of // the 64-bit pointer. - APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64); + APInt MaskOnes = KB->getKnownOnes(MaskReg).zext(64); const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); @@ -2917,7 +2917,7 @@ unsigned SubReg; std::tie(IdxReg, SubReg) = computeIndirectRegIndex( - *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KnownBits); + *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KB); if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { if (DstTy.getSizeInBits() != 32 && !Is64) @@ -2997,8 +2997,8 @@ return false; unsigned SubReg; - std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, - ValSize / 8, *KnownBits); + std::tie(IdxReg, SubReg) = + computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, ValSize / 8, *KB); const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && STI.useVGPRIndexMode(); @@ -4085,9 +4085,9 @@ // The bug affects the swizzling of SVS accesses if there is any carry out // from the two low order bits (i.e. from bit 1 into bit 2) when adding // voffset to (soffset + inst_offset). - auto VKnown = KnownBits->getKnownBits(VAddr); + auto VKnown = KB->getKnownBits(VAddr); auto SKnown = KnownBits::computeForAddSub( - true, false, KnownBits->getKnownBits(SAddr), + true, false, KB->getKnownBits(SAddr), KnownBits::makeConstant(APInt(32, ImmOffset))); uint64_t VMax = VKnown.getMaxValue().getZExtValue(); uint64_t SMax = SKnown.getMaxValue().getZExtValue(); @@ -4195,7 +4195,7 @@ if (ConstOffset != 0) { if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) && (!STI.privateMemoryResourceIsRangeChecked() || - KnownBits->signBitIsZero(PtrBase))) { + KB->signBitIsZero(PtrBase))) { const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase); if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX) FI = PtrBaseDef->getOperand(1).getIndex(); @@ -4237,7 +4237,7 @@ // On Southern Islands instruction with a negative base value and an offset // don't seem to work. - return KnownBits->signBitIsZero(Base); + return KB->signBitIsZero(Base); } bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, @@ -4253,7 +4253,7 @@ // On Southern Islands instruction with a negative base value and an offset // don't seem to work. - return KnownBits->signBitIsZero(Base); + return KB->signBitIsZero(Base); } bool AMDGPUInstructionSelector::isFlatScratchBaseLegal( @@ -4263,7 +4263,7 @@ // When value in 32-bit Base can be negative calculate scratch offset using // 32-bit add instruction, otherwise use Base(unsigned) + offset. - return KnownBits->signBitIsZero(Base); + return KB->signBitIsZero(Base); } bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI, @@ -4278,8 +4278,7 @@ if (RHS->countr_one() >= ShAmtBits) return true; - const APInt &LHSKnownZeros = - KnownBits->getKnownZeroes(MI.getOperand(1).getReg()); + const APInt &LHSKnownZeros = KB->getKnownZeroes(MI.getOperand(1).getReg()); return (LHSKnownZeros | *RHS).countr_one() >= ShAmtBits; } @@ -4770,7 +4769,7 @@ Register SOffset; unsigned Offset; std::tie(SOffset, Offset) = - AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KnownBits); + AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KB); if (!SOffset) return std::nullopt;