diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6047,9 +6047,8 @@ if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) return nullptr; - Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); + Type *UnqualPtrTy = PointerType::getUnqual(Ptr->getContext()); Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); - Type *Int32PtrTy = Int32Ty->getPointerTo(); Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); auto *OffsetConstInt = dyn_cast(Offset); @@ -6061,8 +6060,7 @@ return nullptr; Constant *C = ConstantExpr::getGetElementPtr( - Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), - ConstantInt::get(Int64Ty, OffsetInt / 4)); + Int32Ty, Ptr, ConstantInt::get(Int64Ty, OffsetInt / 4)); Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); if (!Loaded) return nullptr; @@ -6093,7 +6091,7 @@ PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) return nullptr; - return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); + return ConstantExpr::getBitCast(LoadedLHSPtr, UnqualPtrTy); } static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0, diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -4585,10 +4585,11 @@ bool ForEndCall) { assert((!ForEndCall || Info.separateBeginEndCalls()) && "expected region end call to runtime only when end call is separate"); - auto VoidPtrTy = Type::getInt8PtrTy(M.getContext()); - auto VoidPtrPtrTy = VoidPtrTy->getPointerTo(0); + auto UnqualPtrTy = PointerType::getUnqual(M.getContext()); + auto VoidPtrTy = UnqualPtrTy; + auto VoidPtrPtrTy = UnqualPtrTy; auto Int64Ty = Type::getInt64Ty(M.getContext()); - auto Int64PtrTy = Type::getInt64PtrTy(M.getContext()); + auto Int64PtrTy = UnqualPtrTy; if (!Info.NumberOfPtrs) { RTArgs.BasePointersArray = ConstantPointerNull::get(VoidPtrPtrTy); @@ -4737,7 +4738,7 @@ // need to fill up the arrays as we do for the pointers. Type *Int64Ty = Builder.getInt64Ty(); SmallVector ConstSizes(CombinedInfo.Sizes.size(), - ConstantInt::get(Builder.getInt64Ty(), 0)); + ConstantInt::get(Int64Ty, 0)); SmallBitVector RuntimeSizes(CombinedInfo.Sizes.size()); for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) { if (auto *CI = dyn_cast(CombinedInfo.Sizes[I])) { @@ -4746,7 +4747,7 @@ static_cast>( CombinedInfo.Types[I] & OpenMPOffloadMappingFlags::OMP_MAP_NON_CONTIG)) - ConstSizes[I] = ConstantInt::get(Builder.getInt64Ty(), + ConstSizes[I] = ConstantInt::get(Int64Ty, CombinedInfo.NonContigInfo.Dims[I]); else ConstSizes[I] = CI; @@ -4780,11 +4781,9 @@ SizeArrayType, /* ArraySize = */ nullptr, ".offload_sizes"); Buffer->setAlignment(OffloadSizeAlign); Builder.restoreIP(CodeGenIP); - Value *GblConstPtr = Builder.CreatePointerBitCastOrAddrSpaceCast( - SizesArrayGbl, Int64Ty->getPointerTo()); Builder.CreateMemCpy( Buffer, M.getDataLayout().getPrefTypeAlign(Buffer->getType()), - GblConstPtr, OffloadSizeAlign, + SizesArrayGbl, OffloadSizeAlign, Builder.getIntN( IndexSize, Buffer->getAllocationSize(M.getDataLayout())->getFixedValue())); @@ -4813,7 +4812,7 @@ Info.RTArgs.MapNamesArray = MapNamesArrayGbl; } else { Info.RTArgs.MapNamesArray = Constant::getNullValue( - Type::getInt8Ty(Builder.getContext())->getPointerTo()); + PointerType::getUnqual(Builder.getContext())); } // If there's a present map type modifier, it must not be applied to the end @@ -4834,22 +4833,20 @@ } } + PointerType *PtrTy = Builder.getPtrTy(); for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) { Value *BPVal = CombinedInfo.BasePointers[I]; Value *BP = Builder.CreateConstInBoundsGEP2_32( - ArrayType::get(Builder.getInt8PtrTy(), Info.NumberOfPtrs), + ArrayType::get(PtrTy, Info.NumberOfPtrs), Info.RTArgs.BasePointersArray, 0, I); - BP = Builder.CreatePointerBitCastOrAddrSpaceCast( - BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0)); Builder.CreateAlignedStore( - BPVal, BP, M.getDataLayout().getPrefTypeAlign(Builder.getInt8PtrTy())); + BPVal, BP, M.getDataLayout().getPrefTypeAlign(PtrTy)); if (Info.requiresDevicePointerInfo()) { if (CombinedInfo.DevicePointers[I] == DeviceInfoTy::Pointer) { CodeGenIP = Builder.saveIP(); Builder.restoreIP(AllocaIP); - Info.DevicePtrInfoMap[BPVal] = { - BP, Builder.CreateAlloca(Builder.getPtrTy())}; + Info.DevicePtrInfoMap[BPVal] = {BP, Builder.CreateAlloca(PtrTy)}; Builder.restoreIP(CodeGenIP); assert(DeviceAddrCB && "DeviceAddrCB missing for DevicePtr code generation"); @@ -4864,13 +4861,11 @@ Value *PVal = CombinedInfo.Pointers[I]; Value *P = Builder.CreateConstInBoundsGEP2_32( - ArrayType::get(Builder.getInt8PtrTy(), Info.NumberOfPtrs), + ArrayType::get(PtrTy, Info.NumberOfPtrs), Info.RTArgs.PointersArray, 0, I); - P = Builder.CreatePointerBitCastOrAddrSpaceCast( - P, PVal->getType()->getPointerTo(/*AddrSpace=*/0)); // TODO: Check alignment correct. Builder.CreateAlignedStore( - PVal, P, M.getDataLayout().getPrefTypeAlign(Builder.getInt8PtrTy())); + PVal, P, M.getDataLayout().getPrefTypeAlign(PtrTy)); if (RuntimeSizes.test(I)) { Value *S = Builder.CreateConstInBoundsGEP2_32( @@ -4880,14 +4875,14 @@ Builder.CreateAlignedStore( Builder.CreateIntCast(CombinedInfo.Sizes[I], Int64Ty, /*isSigned=*/true), - S, M.getDataLayout().getPrefTypeAlign(Builder.getInt8PtrTy())); + S, M.getDataLayout().getPrefTypeAlign(PtrTy)); } // Fill up the mapper array. unsigned IndexSize = M.getDataLayout().getIndexSizeInBits(0); - Value *MFunc = ConstantPointerNull::get(Builder.getInt8PtrTy()); + Value *MFunc = ConstantPointerNull::get(PtrTy); if (CustomMapperCB) if (Value *CustomMFunc = CustomMapperCB(I)) - MFunc = Builder.CreatePointerCast(CustomMFunc, Builder.getInt8PtrTy()); + MFunc = Builder.CreatePointerCast(CustomMFunc, PtrTy); Value *MAddr = Builder.CreateInBoundsGEP( MappersArray->getAllocatedType(), MappersArray, {Builder.getIntN(IndexSize, 0), Builder.getIntN(IndexSize, I)}); @@ -5483,7 +5478,7 @@ std::string VarName) { llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( llvm::ArrayType::get( - llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), + llvm::PointerType::getUnqual(M.getContext()), Names.size()), Names); auto *MapNamesArrayGlobal = new llvm::GlobalVariable( M, MapNamesArrayInit->getType(), diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp --- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2263,11 +2263,11 @@ if (DestVolatile) { Type *Int32Ty = Type::getInt32Ty(Ctx); - Type *Int32PtrTy = Type::getInt32PtrTy(Ctx); + Type *PtrTy = PointerType::get(Ctx, 0); Type *VoidTy = Type::getVoidTy(Ctx); Module *M = Func->getParent(); FunctionCallee Fn = M->getOrInsertFunction( - HexagonVolatileMemcpyName, VoidTy, Int32PtrTy, Int32PtrTy, Int32Ty); + HexagonVolatileMemcpyName, VoidTy, PtrTy, PtrTy, Int32Ty); const SCEV *OneS = SE->getConstant(Int32Ty, 1); const SCEV *BECount32 = SE->getTruncateOrZeroExtend(BECount, Int32Ty); @@ -2278,13 +2278,8 @@ if (Value *Simp = simplifyInstruction(In, {*DL, TLI, DT})) NumWords = Simp; - Value *Op0 = (StoreBasePtr->getType() == Int32PtrTy) - ? StoreBasePtr - : CondBuilder.CreateBitCast(StoreBasePtr, Int32PtrTy); - Value *Op1 = (LoadBasePtr->getType() == Int32PtrTy) - ? LoadBasePtr - : CondBuilder.CreateBitCast(LoadBasePtr, Int32PtrTy); - NewCall = CondBuilder.CreateCall(Fn, {Op0, Op1, NumWords}); + NewCall = CondBuilder.CreateCall(Fn, + {StoreBasePtr, LoadBasePtr, NumWords}); } else { NewCall = CondBuilder.CreateMemMove( StoreBasePtr, SI->getAlign(), LoadBasePtr, LI->getAlign(), NumBytes); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -313,7 +313,6 @@ // If an offset is left we use byte-wise adjustment. if (IntOffset != 0) { - Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), GEPName + ".b" + Twine(IntOffset.getZExtValue())); } diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1905,10 +1905,8 @@ Builder.SetInsertPoint(InsertBefore); auto *Alloca = Builder.CreateAlloca(ArgType, AddressSpace, nullptr, "paarg"); - auto *BitCast = Builder.CreateBitCast( - Alloca, Type::getInt8PtrTy(M->getContext()), UseCall->getName()); - ArgAllocas[AllocArgIndex] = BitCast; - AllocaReplacement = BitCast; + ArgAllocas[AllocArgIndex] = Alloca; + AllocaReplacement = Alloca; } UseCall->replaceAllUsesWith(AllocaReplacement); @@ -2117,19 +2115,18 @@ const auto *VEPT = cast(VAT->getArrayElementType()); // Type of pointer to the array of pointers. - PointerType *Int8PtrTy = - Type::getInt8PtrTy(V.getContext(), VEPT->getAddressSpace()); + PointerType *PtrTy = + PointerType::get(V.getContext(), VEPT->getAddressSpace()); SmallVector UsedArray; for (GlobalValue *GV : Init) { - Constant *Cast = - ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); + Constant *Cast = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, PtrTy); UsedArray.push_back(Cast); } // Sort to get deterministic order. array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); - ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); + ArrayType *ATy = ArrayType::get(PtrTy, UsedArray.size()); Module *M = V.getParent(); V.removeFromParent(); diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -205,7 +205,7 @@ Attr = Attr.addFnAttribute(Ctx, Attribute::NoUnwind); // Initialize the callbacks. TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr, - IRB.getVoidTy(), IRB.getInt8PtrTy()); + IRB.getVoidTy(), IRB.getPtrTy()); TsanFuncExit = M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy()); TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr, @@ -220,49 +220,49 @@ std::string BitSizeStr = utostr(BitSize); SmallString<32> ReadName("__tsan_read" + ByteSizeStr); TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(), - IRB.getInt8PtrTy()); + IRB.getPtrTy()); SmallString<32> WriteName("__tsan_write" + ByteSizeStr); TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(), - IRB.getInt8PtrTy()); + IRB.getPtrTy()); SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr); TsanUnalignedRead[i] = M.getOrInsertFunction( - UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr); TsanUnalignedWrite[i] = M.getOrInsertFunction( - UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr); TsanVolatileRead[i] = M.getOrInsertFunction( - VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + VolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr); TsanVolatileWrite[i] = M.getOrInsertFunction( - VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" + ByteSizeStr); TsanUnalignedVolatileRead[i] = M.getOrInsertFunction( - UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> UnalignedVolatileWriteName( "__tsan_unaligned_volatile_write" + ByteSizeStr); TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction( - UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr); TsanCompoundRW[i] = M.getOrInsertFunction( - CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + CompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" + ByteSizeStr); TsanUnalignedCompoundRW[i] = M.getOrInsertFunction( - UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy()); Type *Ty = Type::getIntNTy(Ctx, BitSize); - Type *PtrTy = Ty->getPointerTo(); + Type *PtrTy = PointerType::get(Ctx, 0); SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load"); TsanAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName, @@ -318,9 +318,9 @@ } TsanVptrUpdate = M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(), - IRB.getInt8PtrTy(), IRB.getInt8PtrTy()); + IRB.getPtrTy(), IRB.getPtrTy()); TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr, - IRB.getVoidTy(), IRB.getInt8PtrTy()); + IRB.getVoidTy(), IRB.getPtrTy()); TsanAtomicThreadFence = M.getOrInsertFunction( "__tsan_atomic_thread_fence", TLI.getAttrList(&Ctx, {0}, /*Signed=*/true, /*Ret=*/false, Attr), @@ -332,15 +332,15 @@ IRB.getVoidTy(), OrdTy); MemmoveFn = - M.getOrInsertFunction("__tsan_memmove", Attr, IRB.getInt8PtrTy(), - IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); + M.getOrInsertFunction("__tsan_memmove", Attr, IRB.getPtrTy(), + IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy); MemcpyFn = - M.getOrInsertFunction("__tsan_memcpy", Attr, IRB.getInt8PtrTy(), - IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); + M.getOrInsertFunction("__tsan_memcpy", Attr, IRB.getPtrTy(), + IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy); MemsetFn = M.getOrInsertFunction( "__tsan_memset", TLI.getAttrList(&Ctx, {1}, /*Signed=*/true, /*Ret=*/false, Attr), - IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy); + IRB.getPtrTy(), IRB.getPtrTy(), IRB.getInt32Ty(), IntptrTy); } static bool isVtableAccess(Instruction *I) { @@ -613,17 +613,14 @@ StoredValue = IRB.CreateExtractElement( StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); if (StoredValue->getType()->isIntegerTy()) - StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); + StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getPtrTy()); // Call TsanVptrUpdate. - IRB.CreateCall(TsanVptrUpdate, - {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), - IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())}); + IRB.CreateCall(TsanVptrUpdate, {Addr, StoredValue}); NumInstrumentedVtableWrites++; return true; } if (!IsWrite && isVtableAccess(II.Inst)) { - IRB.CreateCall(TsanVptrLoad, - IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); + IRB.CreateCall(TsanVptrLoad, Addr); NumInstrumentedVtableReads++; return true; } @@ -655,7 +652,7 @@ else OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; } - IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); + IRB.CreateCall(OnAccessFunc, Addr); if (IsCompoundRW || IsWrite) NumInstrumentedWrites++; if (IsCompoundRW || !IsWrite) @@ -691,17 +688,19 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { InstrumentationIRBuilder IRB(I); if (MemSetInst *M = dyn_cast(I)) { + Value *Cast1 = IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false); + Value *Cast2 = IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false); IRB.CreateCall( MemsetFn, - {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), - IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), - IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); + {M->getArgOperand(0), + Cast1, + Cast2}); I->eraseFromParent(); } else if (MemTransferInst *M = dyn_cast(I)) { IRB.CreateCall( isa(M) ? MemcpyFn : MemmoveFn, - {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), - IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), + {M->getArgOperand(0), + M->getArgOperand(1), IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); I->eraseFromParent(); } @@ -724,11 +723,7 @@ int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL); if (Idx < 0) return false; - const unsigned ByteSize = 1U << Idx; - const unsigned BitSize = ByteSize * 8; - Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); - Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + Value *Args[] = {Addr, createOrdering(&IRB, LI->getOrdering())}; Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args); Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy); @@ -742,8 +737,7 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); - Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + Value *Args[] = {Addr, IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty), createOrdering(&IRB, SI->getOrdering())}; CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args); @@ -760,8 +754,7 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); - Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + Value *Args[] = {Addr, IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), createOrdering(&IRB, RMWI->getOrdering())}; CallInst *C = CallInst::Create(F, Args); @@ -775,12 +768,11 @@ const unsigned ByteSize = 1U << Idx; const unsigned BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty); Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty); - Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + Value *Args[] = {Addr, CmpOperand, NewOperand, createOrdering(&IRB, CASI->getSuccessOrdering()),