diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -6044,7 +6044,7 @@ // Generate a new GEP to replace the current one. LLVMContext &Ctx = GEP->getContext(); - Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); + Type *PtrIdxTy = DL->getPtrIndexType(GEP->getType()); Type *I8PtrTy = Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); Type *I8Ty = Type::getInt8Ty(Ctx); @@ -6074,7 +6074,7 @@ } IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); // Create a new base. - Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); + Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset); NewBaseGEP = OldBase; if (NewBaseGEP->getType() != I8PtrTy) NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); @@ -6090,7 +6090,7 @@ NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); } else { // Calculate the new offset for the new GEP. - Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); + Value *Index = ConstantInt::get(PtrIdxTy, Offset - BaseOffset); NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); if (GEP->getType() != I8PtrTy) diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -846,7 +846,7 @@ unsigned NumValues = SplitVTs.size(); Align BaseAlign = DL.getPrefTypeAlign(RetTy); Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace()); - LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL); + LLT OffsetLLTy = getLLTForType(*DL.getPtrIndexType(RetPtrTy), DL); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); @@ -877,7 +877,7 @@ Align BaseAlign = DL.getPrefTypeAlign(RetTy); unsigned AS = DL.getAllocaAddrSpace(); LLT OffsetLLTy = - getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL); + getLLTForType(*DL.getPtrIndexType(RetTy->getPointerTo(AS)), DL); MachinePointerInfo PtrInfo(AS); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1294,7 +1294,7 @@ AAMDNodes AAInfo = LI.getAAMetadata(); const Value *Ptr = LI.getPointerOperand(); - Type *OffsetIRTy = DL->getIntPtrType(Ptr->getType()); + Type *OffsetIRTy = DL->getPtrIndexType(Ptr->getType()); LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); if (CLI->supportSwiftError() && isSwiftError(Ptr)) { @@ -1342,7 +1342,7 @@ ArrayRef Offsets = *VMap.getOffsets(*SI.getValueOperand()); Register Base = getOrCreateVReg(*SI.getPointerOperand()); - Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); + Type *OffsetIRTy = DL->getPtrIndexType(SI.getPointerOperandType()); LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { @@ -1488,7 +1488,7 @@ Register BaseReg = getOrCreateVReg(Op0); Type *PtrIRTy = Op0.getType(); LLT PtrTy = getLLTForType(*PtrIRTy, *DL); - Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); + Type *OffsetIRTy = DL->getPtrIndexType(PtrIRTy); LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); // Normalize Vector GEP - all scalar operands should be converted to the @@ -1513,7 +1513,7 @@ .getReg(0); PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); PtrTy = getLLTForType(*PtrIRTy, *DL); - OffsetIRTy = DL->getIntPtrType(PtrIRTy); + OffsetIRTy = DL->getPtrIndexType(PtrIRTy); OffsetTy = getLLTForType(*OffsetIRTy, *DL); } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll @@ -6,8 +6,8 @@ ; CHECK-LABEL: name: no_auto_constfold_gep ; CHECK: bb.1 (%ir-block.0): ; CHECK-NEXT: [[C:%[0-9]+]]:_(p7) = G_CONSTANT i128 0 - ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 123 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p7) = G_PTR_ADD [[C]], [[C1]](s128) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 123 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p7) = G_PTR_ADD [[C]], [[C1]](s32) ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTR_ADD]](p7) ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32) ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32) @@ -26,8 +26,7 @@ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p7>) = G_BUILD_VECTOR [[C]](p7), [[C]](p7) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 123 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32) - ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<2 x s128>) = G_SEXT [[BUILD_VECTOR1]](<2 x s32>) - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(<2 x p7>) = G_PTR_ADD [[BUILD_VECTOR]], [[SEXT]](<2 x s128>) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(<2 x p7>) = G_PTR_ADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]](<2 x s32>) ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x p7>) = COPY [[PTR_ADD]](<2 x p7>) ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x p7>) ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)