diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -553,8 +553,6 @@ bool NeedBitcast = OrigTy->isFloatingPointTy(); if (NeedBitcast) { IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits()); - unsigned AS = Addr->getType()->getPointerAddressSpace(); - Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS)); NewVal = Builder.CreateBitCast(NewVal, IntTy); Loaded = Builder.CreateBitCast(Loaded, IntTy); } @@ -727,7 +725,7 @@ assert(ValueSize < MinWordSize); PointerType *PtrTy = cast(Addr->getType()); - Type *WordPtrType = PMV.WordType->getPointerTo(PtrTy->getAddressSpace()); + Type *WordPtrType = PointerType::get(PMV.WordType->getContext(), PtrTy->getAddressSpace()); IntegerType *IntTy = DL.getIntPtrType(Ctx, PtrTy->getAddressSpace()); Value *PtrLSB; diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -7849,9 +7849,7 @@ bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); auto CreateSplitStore = [&](Value *V, bool Upper) { V = Builder.CreateZExtOrBitCast(V, SplitStoreType); - Value *Addr = Builder.CreateBitCast( - SI.getOperand(1), - SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); + Value *Addr = SI.getOperand(1); Align Alignment = SI.getAlign(); const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); if (IsOffsetStore) { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2250,7 +2250,7 @@ // Also pass the return address of the remainder. SDValue FIPtr = DAG.CreateStackTemporary(RetVT); Entry.Node = FIPtr; - Entry.Ty = RetTy->getPointerTo(); + Entry.Ty = PointerType::getUnqual(RetTy->getContext()); Entry.IsSExt = isSigned; Entry.IsZExt = !isSigned; Args.push_back(Entry); @@ -2341,7 +2341,7 @@ // Pass the return address of sin. SDValue SinPtr = DAG.CreateStackTemporary(RetVT); Entry.Node = SinPtr; - Entry.Ty = RetTy->getPointerTo(); + Entry.Ty = PointerType::getUnqual(RetTy->getContext()); Entry.IsSExt = false; Entry.IsZExt = false; Args.push_back(Entry); @@ -2349,7 +2349,7 @@ // Also pass the return address of the cos. SDValue CosPtr = DAG.CreateStackTemporary(RetVT); Entry.Node = CosPtr; - Entry.Ty = RetTy->getPointerTo(); + Entry.Ty = PointerType::getUnqual(RetTy->getContext()); Entry.IsSExt = false; Entry.IsZExt = false; Args.push_back(Entry); diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -2890,9 +2890,7 @@ auto *VecTy = cast(CI->getType()); Type *EltTy = VecTy->getElementType(); unsigned EltNum = VecTy->getNumElements(); - Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), - EltTy->getPointerTo()); - Value *Load = Builder.CreateLoad(EltTy, Cast); + Value *Load = Builder.CreateLoad(EltTy, CI->getArgOperand(0)); Type *I32Ty = Type::getInt32Ty(C); Rep = PoisonValue::get(VecTy); for (unsigned I = 0; I < EltNum; ++I) diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3847,11 +3847,6 @@ : Intrinsic::hexagon_L4_loadd_locked; Function *Fn = Intrinsic::getDeclaration(M, IntID); - auto PtrTy = cast(Addr->getType()); - PointerType *NewPtrTy = - Builder.getIntNTy(SZ)->getPointerTo(PtrTy->getAddressSpace()); - Addr = Builder.CreateBitCast(Addr, NewPtrTy); - Value *Call = Builder.CreateCall(Fn, Addr, "larx"); return Builder.CreateBitCast(Call, ValueTy); diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -701,7 +701,7 @@ if (Adjust % ElemSize == 0 && Adjust != 0) { Value *Tmp0 = Builder.CreateGEP( ElemTy, Ptr, HVC.getConstInt(Adjust / ElemSize), "gep"); - return Builder.CreatePointerCast(remap(Tmp0), ValTy->getPointerTo(), + return Builder.CreatePointerCast(remap(Tmp0), PointerType::getUnqual(ValTy), "cst"); } } @@ -710,7 +710,7 @@ Value *Tmp0 = Builder.CreatePointerCast(Ptr, CharPtrTy, "cst"); Value *Tmp1 = Builder.CreateGEP(Type::getInt8Ty(HVC.F.getContext()), remap(Tmp0), HVC.getConstInt(Adjust), "gep"); - return Builder.CreatePointerCast(remap(Tmp1), ValTy->getPointerTo(), "cst"); + return Builder.CreatePointerCast(remap(Tmp1), PointerType::getUnqual(ValTy->getContext()), "cst"); } auto AlignVectors::createAlignedPointer(IRBuilderBase &Builder, Value *Ptr, @@ -728,7 +728,7 @@ Value *AsInt = Builder.CreatePtrToInt(Ptr, HVC.getIntTy(), "pti"); Value *Mask = HVC.getConstInt(-Alignment); Value *And = Builder.CreateAnd(remap(AsInt), Mask, "and"); - return Builder.CreateIntToPtr(And, ValTy->getPointerTo(), "itp"); + return Builder.CreateIntToPtr(And, PointerType::getUnqual(ValTy->getContext()), "itp"); } auto AlignVectors::createLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -5746,7 +5746,7 @@ } InstructionCost AddressUnpackCost = getScalarizationOverhead( - FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts, + FixedVectorType::get(PointerType::getUnqual(ScalarTy->getContext()), VF), DemandedElts, /*Insert=*/false, /*Extract=*/true, CostKind); // The cost of the scalar loads/stores. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -585,14 +585,14 @@ Value *Ptr = LI.getPointerOperand(); unsigned AS = LI.getPointerAddressSpace(); - Type *NewPtrTy = NewTy->getPointerTo(AS); + Type *NewPtrTy = PointerType::get(NewTy->getContext(), AS); Value *NewPtr = nullptr; if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) && NewPtr->getType() == NewPtrTy)) - NewPtr = Builder.CreateBitCast(Ptr, NewPtrTy); + NewPtr = Ptr; LoadInst *NewLoad = Builder.CreateAlignedLoad( - NewTy, NewPtr, LI.getAlign(), LI.isVolatile(), LI.getName() + Suffix); + NewTy, Ptr, LI.getAlign(), LI.isVolatile(), LI.getName() + Suffix); NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); copyMetadataForLoad(*NewLoad, LI); return NewLoad; @@ -607,12 +607,11 @@ "can't fold an atomic store of requested type"); Value *Ptr = SI.getPointerOperand(); - unsigned AS = SI.getPointerAddressSpace(); SmallVector, 8> MD; SI.getAllMetadata(MD); StoreInst *NewStore = IC.Builder.CreateAlignedStore( - V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), + V, Ptr, SI.getAlign(), SI.isVolatile()); NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); for (const auto &MDPair : MD) { diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp --- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp @@ -190,7 +190,7 @@ auto *OrigBiasInst = dyn_cast(AddrInst->getOperand(0)); assert(OrigBiasInst->getOpcode() == Instruction::BinaryOps::Add); Value *BiasInst = Builder.Insert(OrigBiasInst->clone()); - Addr = Builder.CreateIntToPtr(BiasInst, Ty->getPointerTo()); + Addr = Builder.CreateIntToPtr(BiasInst, PointerType::getUnqual(Ty->getContext())); } if (AtomicCounterUpdatePromoted) // automic update currently can only be promoted across the current diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1676,7 +1676,7 @@ VectTy->getElementCount()); } assert(IntPtrTy == MS.IntptrTy); - return ShadowTy->getPointerTo(); + return PointerType::getUnqual(ShadowTy->getContext()); } Constant *constToIntPtr(Type *IntPtrTy, uint64_t C) const { @@ -1800,11 +1800,11 @@ // TODO: Support callbacs with vectors of addresses. unsigned NumElements = cast(VectTy)->getNumElements(); Value *ShadowPtrs = ConstantInt::getNullValue( - FixedVectorType::get(ShadowTy->getPointerTo(), NumElements)); + FixedVectorType::get(PointerType::getUnqual(ShadowTy->getContext()), NumElements)); Value *OriginPtrs = nullptr; if (MS.TrackOrigins) OriginPtrs = ConstantInt::getNullValue( - FixedVectorType::get(MS.OriginTy->getPointerTo(), NumElements)); + FixedVectorType::get(PointerType::getUnqual(MS.OriginTy->getContext()), NumElements)); for (unsigned i = 0; i < NumElements; ++i) { Value *OneAddr = IRB.CreateExtractElement(Addr, ConstantInt::get(IRB.getInt32Ty(), i)); @@ -3386,7 +3386,7 @@ getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first; IRB.CreateStore(getCleanShadow(Ty), - IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo())); + IRB.CreatePointerCast(ShadowPtr, PointerType::getUnqual(Ty->getContext()))); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -262,7 +262,7 @@ UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); Type *Ty = Type::getIntNTy(Ctx, BitSize); - Type *PtrTy = Ty->getPointerTo(); + Type *PtrTy = PointerType::getUnqual(Ty->getContext()); SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load"); TsanAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName, @@ -727,7 +727,7 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); + Type *PtrTy = PointerType::getUnqual(Ty->getContext()); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), createOrdering(&IRB, LI->getOrdering())}; Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args); @@ -742,7 +742,7 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); + Type *PtrTy = PointerType::getUnqual(Ty->getContext()); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty), createOrdering(&IRB, SI->getOrdering())}; @@ -760,7 +760,7 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); + Type *PtrTy = PointerType::getUnqual(Ty->getContext()); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), createOrdering(&IRB, RMWI->getOrdering())}; @@ -775,7 +775,7 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); + Type *PtrTy = PointerType::getUnqual(Ty->getContext()); Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty); Value *NewOperand = diff --git a/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp b/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp --- a/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp +++ b/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp @@ -35,7 +35,7 @@ Triple TargetTriple(M.getTargetTriple()); if (TargetTriple.isOSAIX() && Func == "__mcount") { Type *SizeTy = M.getDataLayout().getIntPtrType(C); - Type *SizePtrTy = SizeTy->getPointerTo(); + Type *SizePtrTy = PointerType::getUnqual(SizeTy->getContext()); GlobalVariable *GV = new GlobalVariable(M, SizeTy, /*isConstant=*/false, GlobalValue::InternalLinkage, ConstantInt::get(SizeTy, 0)); diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -2244,7 +2244,7 @@ // instruction. Type *ToTy; if (DefI->getType()->isIntegerTy()) - ToTy = DefI->getType()->getPointerTo(); + ToTy = PointerType::getUnqual(DefI->getContext()); else ToTy = Type::getInt32Ty(DefI->getContext()); Instruction *User = diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2630,11 +2630,7 @@ if (auto *gep = dyn_cast(AddrPart->stripPointerCasts())) InBounds = gep->isInBounds(); AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Idx, "", InBounds); - - // Cast to the vector pointer type. - unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); - Type *PtrTy = VecTy->getPointerTo(AddressSpace); - AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); + AddrParts.push_back(AddrPart); } State.setDebugLocFromInst(Instr); @@ -9697,7 +9693,7 @@ const DataLayout &DL = Builder.GetInsertBlock()->getModule()->getDataLayout(); Type *IndexTy = State.VF.isScalable() && (isReverse() || Part > 0) - ? DL.getIndexType(ScalarDataTy->getPointerTo()) + ? DL.getIndexType(PointerType::getUnqual(ScalarDataTy->getContext())) : Builder.getInt32Ty(); bool InBounds = false; if (auto *gep = dyn_cast(Ptr->stripPointerCasts())) @@ -9725,8 +9721,7 @@ PartPtr = Builder.CreateGEP(ScalarDataTy, Ptr, Increment, "", InBounds); } - unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); - return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); + return PartPtr; }; // Handle Stores: