diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -9714,9 +9714,8 @@ return MaskVec; } -static Value *EmitX86MaskedStore(CodeGenFunction &CGF, - ArrayRef Ops, - unsigned Align) { +static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef Ops, + Align Alignment) { // Cast the pointer to right type. Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -9724,7 +9723,7 @@ Value *MaskVec = getMaskVecValue(CGF, Ops[2], Ops[1]->getType()->getVectorNumElements()); - return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec); + return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); } static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef Ops, @@ -10592,12 +10591,12 @@ case X86::BI__builtin_ia32_storedquqi512_mask: case X86::BI__builtin_ia32_storeupd512_mask: case X86::BI__builtin_ia32_storeups512_mask: - return EmitX86MaskedStore(*this, Ops, 1); + return EmitX86MaskedStore(*this, Ops, Align::None()); case X86::BI__builtin_ia32_storess128_mask: - case X86::BI__builtin_ia32_storesd128_mask: { - return EmitX86MaskedStore(*this, Ops, 1); - } + case X86::BI__builtin_ia32_storesd128_mask: + return EmitX86MaskedStore(*this, Ops, Align::None()); + case X86::BI__builtin_ia32_vpopcntb_128: case X86::BI__builtin_ia32_vpopcntd_128: case X86::BI__builtin_ia32_vpopcntq_128: @@ -10708,11 +10707,11 @@ case X86::BI__builtin_ia32_movdqa32store512_mask: case X86::BI__builtin_ia32_movdqa64store512_mask: case X86::BI__builtin_ia32_storeaps512_mask: - case X86::BI__builtin_ia32_storeapd512_mask: { - unsigned Align = - getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); - return EmitX86MaskedStore(*this, Ops, Align); - } + case X86::BI__builtin_ia32_storeapd512_mask: + return EmitX86MaskedStore( + *this, Ops, + getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); + case X86::BI__builtin_ia32_loadups128_mask: case X86::BI__builtin_ia32_loadups256_mask: case X86::BI__builtin_ia32_loadups512_mask: diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -157,6 +157,10 @@ return Val.getSExtValue(); } + /// Return the constant as an llvm::Align. Note that this method can assert if + /// the value does not fit in 64 bits or is not a power of two. + inline Align getAlignValue() const { return Align(getZExtValue()); } + /// A helper method that can be used to determine if the constant contained /// within is equal to a constant. This only works for very small values, /// because this is all that can be represented with all types. diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -752,13 +752,21 @@ Value *PassThru = nullptr, const Twine &Name = ""), "Use the version that takes Align instead") { - return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name); + return CreateMaskedLoad(Ptr, assumeAligned(Alignment), Mask, PassThru, + Name); } CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask, Value *PassThru = nullptr, const Twine &Name = ""); /// Create a call to Masked Store intrinsic - CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, + LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedStore(Value *Val, Value *Ptr, + unsigned Alignment, + Value *Mask), + "Use the version that takes Align instead") { + return CreateMaskedStore(Val, Ptr, assumeAligned(Alignment), Mask); + } + + CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask); /// Create a call to Masked Gather intrinsic diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp --- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp @@ -849,39 +849,41 @@ bool &ModifiedDT) { IntrinsicInst *II = dyn_cast(CI); if (II) { - unsigned Alignment; switch (II->getIntrinsicID()) { default: break; - case Intrinsic::masked_load: { + case Intrinsic::masked_load: // Scalarize unsupported vector masked load - Alignment = cast(CI->getArgOperand(1))->getZExtValue(); - if (TTI->isLegalMaskedLoad(CI->getType(), MaybeAlign(Alignment))) + if (TTI->isLegalMaskedLoad( + CI->getType(), + cast(CI->getArgOperand(1))->getAlignValue())) return false; scalarizeMaskedLoad(CI, ModifiedDT); return true; - } - case Intrinsic::masked_store: { - Alignment = cast(CI->getArgOperand(2))->getZExtValue(); - if (TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), - MaybeAlign(Alignment))) + case Intrinsic::masked_store: + if (TTI->isLegalMaskedStore( + CI->getArgOperand(0)->getType(), + cast(CI->getArgOperand(2))->getAlignValue())) return false; scalarizeMaskedStore(CI, ModifiedDT); return true; - } - case Intrinsic::masked_gather: - Alignment = cast(CI->getArgOperand(1))->getZExtValue(); + case Intrinsic::masked_gather: { + unsigned Alignment = + cast(CI->getArgOperand(1))->getZExtValue(); if (TTI->isLegalMaskedGather(CI->getType(), MaybeAlign(Alignment))) return false; scalarizeMaskedGather(CI, ModifiedDT); return true; - case Intrinsic::masked_scatter: - Alignment = cast(CI->getArgOperand(2))->getZExtValue(); + } + case Intrinsic::masked_scatter: { + unsigned Alignment = + cast(CI->getArgOperand(2))->getZExtValue(); if (TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType(), MaybeAlign(Alignment))) return false; scalarizeMaskedScatter(CI, ModifiedDT); return true; + } case Intrinsic::masked_expandload: if (TTI->isLegalMaskedExpandLoad(CI->getType())) return false; diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1237,18 +1237,19 @@ // Cast the pointer to the right type. Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(Data->getType())); - unsigned Align = - Aligned ? cast(Data->getType())->getBitWidth() / 8 : 1; + const Align Alignment = + Aligned ? Align(cast(Data->getType())->getBitWidth() / 8) + : Align::None(); // If the mask is all ones just emit a regular store. if (const auto *C = dyn_cast(Mask)) if (C->isAllOnesValue()) - return Builder.CreateAlignedStore(Data, Ptr, Align); + return Builder.CreateAlignedStore(Data, Ptr, Alignment); // Convert the mask from an integer type to a vector of i1. unsigned NumElts = Data->getType()->getVectorNumElements(); Mask = getX86MaskVec(Builder, Mask, NumElts); - return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); + return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask); } static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -487,19 +487,19 @@ } /// Create a call to a Masked Store intrinsic. -/// \p Val - data to be stored, -/// \p Ptr - base pointer for the store -/// \p Align - alignment of the destination location -/// \p Mask - vector of booleans which indicates what vector lanes should -/// be accessed in memory +/// \p Val - data to be stored, +/// \p Ptr - base pointer for the store +/// \p Alignment - alignment of the destination location +/// \p Mask - vector of booleans which indicates what vector lanes should +/// be accessed in memory CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, - unsigned Align, Value *Mask) { + Align Alignment, Value *Mask) { auto *PtrTy = cast(Ptr->getType()); Type *DataTy = PtrTy->getElementType(); assert(DataTy->isVectorTy() && "Ptr should point to a vector"); assert(Mask && "Mask should not be all-ones (null)"); Type *OverloadedTypes[] = { DataTy, PtrTy }; - Value *Ops[] = { Val, Ptr, getInt32(Align), Mask }; + Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask}; return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1372,7 +1372,7 @@ // on each element's most significant bit (the sign bit). Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask); - IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask); + IC.Builder.CreateMaskedStore(Vec, PtrCast, Align::None(), BoolMask); // 'Replace uses' doesn't work for stores. Erase the original masked store. IC.eraseInstFromFunction(II); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2904,7 +2904,7 @@ IRBuilder<> IRB(&I); Value *V = I.getArgOperand(0); Value *Addr = I.getArgOperand(1); - const MaybeAlign Alignment( + const Align Alignment( cast(I.getArgOperand(2))->getZExtValue()); Value *Mask = I.getArgOperand(3); Value *Shadow = getShadow(V); @@ -2921,21 +2921,20 @@ insertShadowCheck(Mask, &I); } - IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment ? Alignment->value() : 0, - Mask); + IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask); if (MS.TrackOrigins) { auto &DL = F.getParent()->getDataLayout(); paintOrigin(IRB, getOrigin(V), OriginPtr, DL.getTypeStoreSize(Shadow->getType()), - llvm::max(Alignment, kMinOriginAlignment)); + std::max(Alignment, kMinOriginAlignment)); } } bool handleMaskedLoad(IntrinsicInst &I) { IRBuilder<> IRB(&I); Value *Addr = I.getArgOperand(0); - const MaybeAlign Alignment( + const Align Alignment( cast(I.getArgOperand(1))->getZExtValue()); Value *Mask = I.getArgOperand(2); Value *PassThru = I.getArgOperand(3); @@ -2945,7 +2944,7 @@ if (PropagateShadow) { std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); - setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask, + setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Alignment, Mask, getShadow(PassThru), "_msmaskedld")); } else { setShadow(&I, getCleanShadow(&I)); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2343,7 +2343,7 @@ Value *ShuffledMask = Builder.CreateShuffleVector( BlockInMaskPart, Undefs, RepMask, "interleaved.mask"); NewStoreInstr = Builder.CreateMaskedStore( - IVec, AddrParts[Part], Group->getAlignment(), ShuffledMask); + IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); } else NewStoreInstr = Builder.CreateAlignedStore(IVec, AddrParts[Part], @@ -2449,8 +2449,8 @@ } auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); if (isMaskRequired) - NewSI = Builder.CreateMaskedStore( - StoredVal, VecPtr, Alignment.value(), BlockInMaskParts[Part]); + NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, + BlockInMaskParts[Part]); else NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment.value());