diff --git a/llvm/include/llvm/IR/Argument.h b/llvm/include/llvm/IR/Argument.h --- a/llvm/include/llvm/IR/Argument.h +++ b/llvm/include/llvm/IR/Argument.h @@ -76,8 +76,13 @@ bool hasByValOrInAllocaAttr() const; /// If this is a byval or inalloca argument, return its alignment. + /// FIXME: Remove this function once transition to Align is over. + /// Use getParamAlign() instead. unsigned getParamAlignment() const; + /// If this is a byval or inalloca argument, return its alignment. + MaybeAlign getParamAlign() const; + /// If this is a byval argument, return its type. Type *getParamByValType() const; diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h --- a/llvm/include/llvm/IR/Function.h +++ b/llvm/include/llvm/IR/Function.h @@ -435,12 +435,18 @@ void addDereferenceableOrNullParamAttr(unsigned ArgNo, uint64_t Bytes); /// Extract the alignment for a call or parameter (0=unknown). + /// FIXME: Remove this function once transition to Align is over. + /// Use getParamAlign() instead. unsigned getParamAlignment(unsigned ArgNo) const { - if (const auto MA = AttributeSets.getParamAlignment(ArgNo)) + if (const auto MA = getParamAlign(ArgNo)) return MA->value(); return 0; } + MaybeAlign getParamAlign(unsigned ArgNo) const { + return AttributeSets.getParamAlignment(ArgNo); + } + /// Extract the byval type for a parameter. Type *getParamByValType(unsigned ArgNo) const { Type *Ty = AttributeSets.getParamByValType(ArgNo); diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -493,12 +493,14 @@ /// and noalias tags. /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src, - unsigned SrcAlign, uint64_t Size, - bool isVolatile = false, MDNode *TBAATag = nullptr, - MDNode *TBAAStructTag = nullptr, - MDNode *ScopeTag = nullptr, - MDNode *NoAliasTag = nullptr) { + LLVM_ATTRIBUTE_DEPRECATED( + CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src, + unsigned SrcAlign, uint64_t Size, + bool isVolatile = false, MDNode *TBAATag = nullptr, + MDNode *TBAAStructTag = nullptr, + MDNode *ScopeTag = nullptr, + MDNode *NoAliasTag = nullptr), + "Use the version that takes MaybeAlign instead") { return CreateMemCpy(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign), getInt64(Size), isVolatile, TBAATag, TBAAStructTag, ScopeTag, NoAliasTag); @@ -517,12 +519,14 @@ /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src, - unsigned SrcAlign, Value *Size, - bool isVolatile = false, MDNode *TBAATag = nullptr, - MDNode *TBAAStructTag = nullptr, - MDNode *ScopeTag = nullptr, - MDNode *NoAliasTag = nullptr); + LLVM_ATTRIBUTE_DEPRECATED( + CallInst *CreateMemCpy(Value *Dst, unsigned DstAlign, Value *Src, + unsigned SrcAlign, Value *Size, + bool isVolatile = false, MDNode *TBAATag = nullptr, + MDNode *TBAAStructTag = nullptr, + MDNode *ScopeTag = nullptr, + MDNode *NoAliasTag = nullptr), + "Use the version that takes MaybeAlign instead"); CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile = false, MDNode *TBAATag = nullptr, @@ -562,12 +566,15 @@ /// and noalias tags. /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - CallInst *CreateMemMove(Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, - uint64_t Size, bool isVolatile = false, - MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, - MDNode *NoAliasTag = nullptr) { - return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), isVolatile, - TBAATag, ScopeTag, NoAliasTag); + LLVM_ATTRIBUTE_DEPRECATED( + CallInst *CreateMemMove( + Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, + uint64_t Size, bool isVolatile = false, MDNode *TBAATag = nullptr, + MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr), + "Use the version that takes MaybeAlign") { + return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign), + getInt64(Size), isVolatile, TBAATag, ScopeTag, + NoAliasTag); } CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, @@ -579,11 +586,12 @@ } /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - CallInst *CreateMemMove(Value *Dst, unsigned DstAlign, Value *Src, - unsigned SrcAlign, Value *Size, - bool isVolatile = false, MDNode *TBAATag = nullptr, - MDNode *ScopeTag = nullptr, - MDNode *NoAliasTag = nullptr) { + LLVM_ATTRIBUTE_DEPRECATED( + CallInst *CreateMemMove( + Value *Dst, unsigned DstAlign, Value *Src, unsigned SrcAlign, + Value *Size, bool isVolatile = false, MDNode *TBAATag = nullptr, + MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr), + "Use the version that takes MaybeAlign") { return CreateMemMove(Dst, MaybeAlign(DstAlign), Src, MaybeAlign(SrcAlign), Size, isVolatile, TBAATag, ScopeTag, NoAliasTag); } diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -1585,19 +1585,31 @@ } /// Extract the alignment of the return value. + /// FIXME: Remove this function once transition to Align is over. + /// Use getRetAlign() instead. unsigned getRetAlignment() const { if (const auto MA = Attrs.getRetAlignment()) return MA->value(); return 0; } + /// Extract the alignment of the return value. + MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); } + /// Extract the alignment for a call or parameter (0=unknown). + /// FIXME: Remove this function once transition to Align is over. + /// Use getParamAlign() instead. unsigned getParamAlignment(unsigned ArgNo) const { if (const auto MA = Attrs.getParamAlignment(ArgNo)) return MA->value(); return 0; } + /// Extract the alignment for a call or parameter (0=unknown). + MaybeAlign getParamAlign(unsigned ArgNo) const { + return Attrs.getParamAlignment(ArgNo); + } + /// Extract the byval type for a call or parameter. Type *getParamByValType(unsigned ArgNo) const { Type *Ty = Attrs.getParamByValType(ArgNo); diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -239,14 +239,20 @@ } /// Return the alignment of the access that is being performed. + /// FIXME: Remove this function once transition to Align is over. + /// Use getAlign() instead. unsigned getAlignment() const { - if (const auto MA = - decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31)) + if (const auto MA = getAlign()) return MA->value(); return 0; } - void setAlignment(MaybeAlign Align); + /// Return the alignment of the access that is being performed. + MaybeAlign getAlign() const { + return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); + } + + void setAlignment(MaybeAlign Alignment); /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { @@ -365,14 +371,19 @@ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Return the alignment of the access that is being performed + /// FIXME: Remove this function once transition to Align is over. + /// Use getAlign() instead. unsigned getAlignment() const { - if (const auto MA = - decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31)) + if (const auto MA = getAlign()) return MA->value(); return 0; } - void setAlignment(MaybeAlign Align); + MaybeAlign getAlign() const { + return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); + } + + void setAlignment(MaybeAlign Alignment); /// Returns the ordering constraint of this store instruction. AtomicOrdering getOrdering() const { diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -348,7 +348,10 @@ return cast(getRawDest()->getType())->getAddressSpace(); } + /// FIXME: Remove this function once transition to Align is over. + /// Use getDestAlign() instead. unsigned getDestAlignment() const { return getParamAlignment(ARG_DEST); } + MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); } /// Set the specified arguments of the instruction. void setDest(Value *Ptr) { @@ -406,10 +409,16 @@ return cast(getRawSource()->getType())->getAddressSpace(); } + /// FIXME: Remove this function once transition to Align is over. + /// Use getSourceAlign() instead. unsigned getSourceAlignment() const { return BaseCL::getParamAlignment(ARG_SOURCE); } + MaybeAlign getSourceAlign() const { + return BaseCL::getParamAlign(ARG_SOURCE); + } + void setSource(Value *Ptr) { assert(getRawSource()->getType() == Ptr->getType() && "setSource called with pointer of wrong type!"); diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -563,7 +563,7 @@ for (Argument *Arg : ByValArguments) { unsigned Offset = SSL.getObjectOffset(Arg); - unsigned Align = SSL.getObjectAlignment(Arg); + MaybeAlign Align(SSL.getObjectAlignment(Arg)); Type *Ty = Arg->getType()->getPointerElementType(); uint64_t Size = DL.getTypeStoreSize(Ty); @@ -580,7 +580,7 @@ DIExpression::ApplyOffset, -Offset); Arg->replaceAllUsesWith(NewArg); IRB.SetInsertPoint(cast(NewArg)->getNextNode()); - IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlignment(), Size); + IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size); } // Allocate space for every unsafe static AllocaInst on the unsafe stack. diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -3450,8 +3450,8 @@ LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size) { - return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), DstAlign, - unwrap(Src), SrcAlign, + return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), MaybeAlign(DstAlign), + unwrap(Src), MaybeAlign(SrcAlign), unwrap(Size))); } @@ -3459,8 +3459,8 @@ LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size) { - return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), DstAlign, - unwrap(Src), SrcAlign, + return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), MaybeAlign(DstAlign), + unwrap(Src), MaybeAlign(SrcAlign), unwrap(Size))); } diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -126,6 +126,11 @@ return getParent()->getParamAlignment(getArgNo()); } +MaybeAlign Argument::getParamAlign() const { + assert(getType()->isPointerTy() && "Only pointers have alignments"); + return getParent()->getParamAlign(getArgNo()); +} + Type *Argument::getParamByValType() const { assert(getType()->isPointerTy() && "Only pointers have byval types"); return getParent()->getParamByValType(getArgNo()); diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1342,11 +1342,7 @@ "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | (encode(Align) << 1)); - if (Align) - assert(getAlignment() == Align->value() && - "Alignment representation error!"); - else - assert(getAlignment() == 0 && "Alignment representation error!"); + assert(getAlign() == Align && "Alignment representation error!"); } //===----------------------------------------------------------------------===// @@ -1416,16 +1412,12 @@ AssertOK(); } -void StoreInst::setAlignment(MaybeAlign Align) { - assert((!Align || *Align <= MaximumAlignment) && +void StoreInst::setAlignment(MaybeAlign Alignment) { + assert((!Alignment || *Alignment <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | - (encode(Align) << 1)); - if (Align) - assert(getAlignment() == Align->value() && - "Alignment representation error!"); - else - assert(getAlignment() == 0 && "Alignment representation error!"); + (encode(Alignment) << 1)); + assert(getAlign() == Alignment && "Alignment representation error!"); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -884,16 +884,16 @@ continue; case Intrinsic::memcpy: { MemCpyInst *MemCpy = cast(Intr); - Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(), - MemCpy->getRawSource(), MemCpy->getSourceAlignment(), + Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlign(), + MemCpy->getRawSource(), MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile()); Intr->eraseFromParent(); continue; } case Intrinsic::memmove: { MemMoveInst *MemMove = cast(Intr); - Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(), - MemMove->getRawSource(), MemMove->getSourceAlignment(), + Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlign(), + MemMove->getRawSource(), MemMove->getSourceAlign(), MemMove->getLength(), MemMove->isVolatile()); Intr->eraseFromParent(); continue; diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp --- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2275,14 +2275,12 @@ : CondBuilder.CreateBitCast(LoadBasePtr, Int32PtrTy); NewCall = CondBuilder.CreateCall(Fn, {Op0, Op1, NumWords}); } else { - NewCall = CondBuilder.CreateMemMove(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), - NumBytes); + NewCall = CondBuilder.CreateMemMove( + StoreBasePtr, SI->getAlign(), LoadBasePtr, LI->getAlign(), NumBytes); } } else { - NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), - NumBytes); + NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, + LI->getAlign(), NumBytes); // Okay, the memcpy has been formed. Zap the original store and // anything that feeds into it. RecursivelyDeleteTriviallyDeadInstructions(SI, TLI); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -2901,15 +2901,14 @@ for (Argument &Arg : F.args()) { if (Arg.hasByValAttr()) { Type *Ty = Arg.getType()->getPointerElementType(); - unsigned Alignment = Arg.getParamAlignment(); - if (Alignment == 0) - Alignment = DL.getABITypeAlignment(Ty); + const Align Alignment = + DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); AllocaInst *AI = IRB.CreateAlloca( Ty, nullptr, (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + ".byval"); - AI->setAlignment(Align(Alignment)); + AI->setAlignment(Alignment); Arg.replaceAllUsesWith(AI); uint64_t AllocSize = DL.getTypeAllocSize(Ty); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1634,8 +1634,8 @@ Size, ArgAlign); } else { const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); - Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign.value(), - Base, CopyAlign.value(), Size); + Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base, + CopyAlign, Size); LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); (void)Cpy; } @@ -3327,9 +3327,8 @@ /*isStore*/ false) .first; - Store = IRB.CreateMemCpy(ArgShadowBase, - Alignment ? Alignment->value() : 0, AShadowPtr, - Alignment ? Alignment->value() : 0, Size); + Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr, + Alignment, Size); // TODO(glider): need to copy origins. } else { Size = DL.getTypeAllocSize(A->getType()); @@ -3826,11 +3825,11 @@ MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false); - IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment.value(), ShadowPtr, - kShadowTLSAlignment.value(), ArgSize); + IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr, + kShadowTLSAlignment, ArgSize); if (MS.TrackOrigins) - IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment.value(), OriginPtr, - kShadowTLSAlignment.value(), ArgSize); + IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr, + kShadowTLSAlignment, ArgSize); } else { ArgKind AK = classifyArgument(A); if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) @@ -3953,10 +3952,11 @@ IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); if (MS.TrackOrigins) { VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSOriginCopy, 8, MS.VAArgOriginTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS, + Align(8), CopySize); } } @@ -3979,12 +3979,11 @@ std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment.value(), VAArgTLSCopy, - Alignment.value(), AMD64FpEndOffset); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + AMD64FpEndOffset); if (MS.TrackOrigins) - IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment.value(), - VAArgTLSOriginCopy, Alignment.value(), - AMD64FpEndOffset); + IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy, + Alignment, AMD64FpEndOffset); Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C); Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), @@ -3998,13 +3997,13 @@ Alignment, /*isStore*/ true); Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, AMD64FpEndOffset); - IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment.value(), SrcPtr, - Alignment.value(), VAArgOverflowSize); + IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); if (MS.TrackOrigins) { SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy, AMD64FpEndOffset); - IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment.value(), SrcPtr, - Alignment.value(), VAArgOverflowSize); + IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); } } } @@ -4102,7 +4101,7 @@ // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); } // Instrument va_start. @@ -4122,8 +4121,8 @@ std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment.value(), VAArgTLSCopy, - Alignment.value(), CopySize); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); } } }; @@ -4294,7 +4293,7 @@ IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); } Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize); @@ -4352,7 +4351,8 @@ GrRegSaveAreaShadowPtrOff); Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff); - IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, 8, GrSrcPtr, 8, GrCopySize); + IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8), + GrCopySize); // Again, but for FP/SIMD values. Value *VrRegSaveAreaShadowPtrOff = @@ -4370,7 +4370,8 @@ VrRegSaveAreaShadowPtrOff); Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff); - IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, 8, VrSrcPtr, 8, VrCopySize); + IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8), + VrCopySize); // And finally for remaining arguments. Value *StackSaveAreaShadowPtr = @@ -4382,8 +4383,8 @@ IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset)); - IRB.CreateMemCpy(StackSaveAreaShadowPtr, 16, StackSrcPtr, 16, - VAArgOverflowSize); + IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr, + Align(16), VAArgOverflowSize); } } }; @@ -4443,8 +4444,8 @@ MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false); - IRB.CreateMemCpy(Base, kShadowTLSAlignment.value(), AShadowPtr, - kShadowTLSAlignment.value(), ArgSize); + IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr, + kShadowTLSAlignment, ArgSize); } } VAArgOffset += alignTo(ArgSize, 8); @@ -4541,7 +4542,7 @@ // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); } // Instrument va_start. @@ -4561,8 +4562,8 @@ std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment.value(), VAArgTLSCopy, - Alignment.value(), CopySize); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); } } }; diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp --- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp +++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -808,15 +808,13 @@ if (isa(MTI)) { MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); - B.CreateMemCpy(Dest, MTI->getDestAlignment(), - Src, MTI->getSourceAlignment(), + B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), MTI->getLength(), false, // isVolatile TBAA, TBAAStruct, ScopeMD, NoAliasMD); } else { assert(isa(MTI)); - B.CreateMemMove(Dest, MTI->getDestAlignment(), - Src, MTI->getSourceAlignment(), + B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), MTI->getLength(), false, // isVolatile TBAA, ScopeMD, NoAliasMD); diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1084,8 +1084,8 @@ // If the load or store are atomic, then they must necessarily be unordered // by previous checks. if (!SI->isAtomic() && !LI->isAtomic()) - NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), NumBytes); + NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, + LI->getAlign(), NumBytes); else { // We cannot allow unaligned ops for unordered load/store, so reject // anything where the alignment isn't at least the element size. diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -413,25 +413,21 @@ return AMemSet; } -static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) { - unsigned StoreAlign = SI->getAlignment(); - if (!StoreAlign) - StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); - return StoreAlign; +static Align findStoreAlignment(const DataLayout &DL, const StoreInst *SI) { + return DL.getValueOrABITypeAlignment(MaybeAlign(SI->getAlignment()), + SI->getOperand(0)->getType()); } -static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) { - unsigned LoadAlign = LI->getAlignment(); - if (!LoadAlign) - LoadAlign = DL.getABITypeAlignment(LI->getType()); - return LoadAlign; +static Align findLoadAlignment(const DataLayout &DL, const LoadInst *LI) { + return DL.getValueOrABITypeAlignment(MaybeAlign(LI->getAlignment()), + LI->getType()); } -static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, - const LoadInst *LI) { - unsigned StoreAlign = findStoreAlignment(DL, SI); - unsigned LoadAlign = findLoadAlignment(DL, LI); - return MinAlign(StoreAlign, LoadAlign); +static Align findCommonAlignment(const DataLayout &DL, const StoreInst *SI, + const LoadInst *LI) { + Align StoreAlign = findStoreAlignment(DL, SI); + Align LoadAlign = findLoadAlignment(DL, LI); + return commonAlignment(StoreAlign, LoadAlign); } // This method try to lift a store instruction before position P. @@ -646,7 +642,7 @@ LI, SI->getPointerOperand()->stripPointerCasts(), LI->getPointerOperand()->stripPointerCasts(), DL.getTypeStoreSize(SI->getOperand(0)->getType()), - findCommonAlignment(DL, SI, LI), C); + findCommonAlignment(DL, SI, LI).value(), C); if (changed) { MD->removeInstruction(SI); SI->eraseFromParent(); @@ -978,12 +974,12 @@ // example we could be moving from movaps -> movq on x86. IRBuilder<> Builder(M); if (UseMemMove) - Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(), - MDep->getRawSource(), MDep->getSourceAlignment(), + Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), + MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); else - Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(), - MDep->getRawSource(), MDep->getSourceAlignment(), + Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), + MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); // Remove the instruction we're replacing. diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1254,7 +1254,8 @@ // Always generate a memcpy of alignment 1 here because we don't know // the alignment of the src pointer. Other optimizations can infer // better alignment. - Builder.CreateMemCpy(Dst, /*DstAlign*/1, Src, /*SrcAlign*/1, Size); + Builder.CreateMemCpy(Dst, /*DstAlign*/ Align::None(), Src, + /*SrcAlign*/ Align::None(), Size); } /// When inlining a call site that has a byval argument, diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -288,8 +288,9 @@ // We have enough information to now generate the memcpy call to do the // concatenation for us. Make a memcpy to copy the nul byte with align = 1. - B.CreateMemCpy(CpyDst, 1, Src, 1, - ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1)); + B.CreateMemCpy( + CpyDst, Align::None(), Src, Align::None(), + ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1)); return Dst; } @@ -561,7 +562,7 @@ // We have enough information to now generate the memcpy call to do the // copy for us. Make a memcpy to copy the nul byte with align = 1. CallInst *NewCI = - B.CreateMemCpy(Dst, 1, Src, 1, + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len)); NewCI->setAttributes(CI->getAttributes()); return Dst; @@ -589,7 +590,8 @@ // We have enough information to now generate the memcpy call to do the // copy for us. Make a memcpy to copy the nul byte with align = 1. - CallInst *NewCI = B.CreateMemCpy(Dst, 1, Src, 1, LenV); + CallInst *NewCI = + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), LenV); NewCI->setAttributes(CI->getAttributes()); return DstEnd; } @@ -637,7 +639,8 @@ Type *PT = Callee->getFunctionType()->getParamType(0); // strncpy(x, s, c) -> memcpy(align 1 x, align 1 s, c) [s and c are constant] - CallInst *NewCI = B.CreateMemCpy(Dst, 1, Src, 1, ConstantInt::get(DL.getIntPtrType(PT), Len)); + CallInst *NewCI = B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), + ConstantInt::get(DL.getIntPtrType(PT), Len)); NewCI->setAttributes(CI->getAttributes()); return Dst; } @@ -1113,8 +1116,8 @@ return nullptr; // memcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n) - CallInst *NewCI = - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, Size); + CallInst *NewCI = B.CreateMemCpy(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), Size); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } @@ -1143,7 +1146,8 @@ size_t Pos = SrcStr.find(StopChar->getSExtValue() & 0xFF); if (Pos == StringRef::npos) { if (N->getZExtValue() <= SrcStr.size()) { - B.CreateMemCpy(Dst, 1, Src, 1, CI->getArgOperand(3)); + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), + CI->getArgOperand(3)); return Constant::getNullValue(CI->getType()); } return nullptr; @@ -1152,7 +1156,7 @@ Value *NewN = ConstantInt::get(N->getType(), std::min(uint64_t(Pos + 1), N->getZExtValue())); // memccpy -> llvm.memcpy - B.CreateMemCpy(Dst, 1, Src, 1, NewN); + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), NewN); return Pos + 1 <= N->getZExtValue() ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, NewN) : Constant::getNullValue(CI->getType()); @@ -1162,7 +1166,8 @@ Value *Dst = CI->getArgOperand(0); Value *N = CI->getArgOperand(2); // mempcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n), x + n - CallInst *NewCI = B.CreateMemCpy(Dst, 1, CI->getArgOperand(1), 1, N); + CallInst *NewCI = B.CreateMemCpy(Dst, Align::None(), CI->getArgOperand(1), + Align::None(), N); NewCI->setAttributes(CI->getAttributes()); return B.CreateInBoundsGEP(B.getInt8Ty(), Dst, N); } @@ -1174,8 +1179,8 @@ return nullptr; // memmove(x, y, n) -> llvm.memmove(align 1 x, align 1 y, n) - CallInst *NewCI = - B.CreateMemMove(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, Size); + CallInst *NewCI = B.CreateMemMove(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), Size); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } @@ -2466,9 +2471,11 @@ return nullptr; // we found a format specifier, bail out. // sprintf(str, fmt) -> llvm.memcpy(align 1 str, align 1 fmt, strlen(fmt)+1) - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, - ConstantInt::get(DL.getIntPtrType(CI->getContext()), - FormatStr.size() + 1)); // Copy the null byte. + B.CreateMemCpy( + CI->getArgOperand(0), Align::None(), CI->getArgOperand(1), + Align::None(), + ConstantInt::get(DL.getIntPtrType(CI->getContext()), + FormatStr.size() + 1)); // Copy the null byte. return ConstantInt::get(CI->getType(), FormatStr.size()); } @@ -2503,7 +2510,8 @@ return nullptr; Value *IncLen = B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc"); - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(2), 1, IncLen); + B.CreateMemCpy(CI->getArgOperand(0), Align::None(), CI->getArgOperand(2), + Align::None(), IncLen); // The sprintf result is the unincremented number of bytes in the string. return B.CreateIntCast(Len, CI->getType(), false); @@ -2574,7 +2582,8 @@ // snprintf(dst, size, fmt) -> llvm.memcpy(align 1 dst, align 1 fmt, // strlen(fmt)+1) B.CreateMemCpy( - CI->getArgOperand(0), 1, CI->getArgOperand(2), 1, + CI->getArgOperand(0), Align::None(), CI->getArgOperand(2), + Align::None(), ConstantInt::get(DL.getIntPtrType(CI->getContext()), FormatStr.size() + 1)); // Copy the null byte. return ConstantInt::get(CI->getType(), FormatStr.size()); @@ -2615,7 +2624,8 @@ else if (N < Str.size() + 1) return nullptr; - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(3), 1, + B.CreateMemCpy(CI->getArgOperand(0), Align::None(), CI->getArgOperand(3), + Align::None(), ConstantInt::get(CI->getType(), Str.size() + 1)); // The snprintf result is the unincremented number of bytes in the string. @@ -2833,7 +2843,8 @@ Value *LibCallSimplifier::optimizeBCopy(CallInst *CI, IRBuilder<> &B) { // bcopy(src, dst, n) -> llvm.memmove(dst, src, n) - return B.CreateMemMove(CI->getArgOperand(1), 1, CI->getArgOperand(0), 1, + return B.CreateMemMove(CI->getArgOperand(1), Align::None(), + CI->getArgOperand(0), Align::None(), CI->getArgOperand(2)); } @@ -3266,8 +3277,9 @@ Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B) { if (isFortifiedCallFoldable(CI, 3, 2)) { - CallInst *NewCI = B.CreateMemCpy( - CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, CI->getArgOperand(2)); + CallInst *NewCI = B.CreateMemCpy(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), + CI->getArgOperand(2)); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } @@ -3277,8 +3289,9 @@ Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B) { if (isFortifiedCallFoldable(CI, 3, 2)) { - CallInst *NewCI = B.CreateMemMove( - CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, CI->getArgOperand(2)); + CallInst *NewCI = B.CreateMemMove(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), + CI->getArgOperand(2)); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); }