Index: include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- include/llvm/Analysis/TargetTransformInfo.h +++ include/llvm/Analysis/TargetTransformInfo.h @@ -705,6 +705,10 @@ /// if false is returned. bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const; + /// \returns The maximum element size, in bytes, for an element + /// unordered-atomic memory intrinsic. + unsigned getAtomicMemIntrinsicMaxElementSize() const; + /// \returns A value which is the result of the given memory intrinsic. New /// instructions may be created to extract the result from the given intrinsic /// memory operation. Returns nullptr if the target cannot create a result @@ -904,6 +908,7 @@ virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef Tys) = 0; virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) = 0; + virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0; virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) = 0; virtual bool areInlineCompatible(const Function *Caller, @@ -1201,6 +1206,9 @@ MemIntrinsicInfo &Info) override { return Impl.getTgtMemIntrinsic(Inst, Info); } + unsigned getAtomicMemIntrinsicMaxElementSize() const override { + return Impl.getAtomicMemIntrinsicMaxElementSize(); + } Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) override { return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); Index: include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- include/llvm/Analysis/TargetTransformInfoImpl.h +++ include/llvm/Analysis/TargetTransformInfoImpl.h @@ -420,6 +420,15 @@ return false; } + unsigned getAtomicMemIntrinsicMaxElementSize() const { + // Note for overrides: You must ensure for all element unordered-atomic + // memory intrinsics that all power-of-2 element sizes up to, and + // including, the return value of this method have a corresponding + // runtime lib call. These runtime lib call definitions can be found + // in RuntimeLibcalls.h + return 4; + } + Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) { return nullptr; Index: include/llvm/IR/IRBuilder.h =================================================================== --- include/llvm/IR/IRBuilder.h +++ include/llvm/IR/IRBuilder.h @@ -435,6 +435,28 @@ MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); + /// \brief Create and insert an atomic memcpy between the specified + /// pointers. + /// + /// If the pointers aren't i8*, they will be converted. If a TBAA tag is + /// specified, it will be added to the instruction. Likewise with alias.scope + /// and noalias tags. + CallInst *CreateElementAtomicMemCpy( + Value *Dst, Value *Src, uint64_t NumElements, uint32_t ElementSize, + MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, + MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { + return CreateElementAtomicMemCpy(Dst, Src, getInt64(NumElements), + ElementSize, TBAATag, TBAAStructTag, + ScopeTag, NoAliasTag); + } + + CallInst *CreateElementAtomicMemCpy(Value *Dst, Value *Src, + Value *NumElements, uint32_t ElementSize, + MDNode *TBAATag = nullptr, + MDNode *TBAAStructTag = nullptr, + MDNode *ScopeTag = nullptr, + MDNode *NoAliasTag = nullptr); + /// \brief Create and insert a memmove between the specified /// pointers. /// Index: lib/Analysis/TargetTransformInfo.cpp =================================================================== --- lib/Analysis/TargetTransformInfo.cpp +++ lib/Analysis/TargetTransformInfo.cpp @@ -464,6 +464,10 @@ return TTIImpl->getTgtMemIntrinsic(Inst, Info); } +unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const { + return TTIImpl->getAtomicMemIntrinsicMaxElementSize(); +} + Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic( IntrinsicInst *Inst, Type *ExpectedType) const { return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); Index: lib/IR/IRBuilder.cpp =================================================================== --- lib/IR/IRBuilder.cpp +++ lib/IR/IRBuilder.cpp @@ -134,6 +134,38 @@ return CI; } +CallInst *IRBuilderBase::CreateElementAtomicMemCpy( + Value *Dst, Value *Src, Value *NumElements, uint32_t ElementSize, + MDNode *TBAATag, MDNode *TBAAStructTag, MDNode *ScopeTag, + MDNode *NoAliasTag) { + Dst = getCastedInt8PtrValue(Dst); + Src = getCastedInt8PtrValue(Src); + + Value *Ops[] = {Dst, Src, NumElements, getInt32(ElementSize)}; + Type *Tys[] = {Dst->getType(), Src->getType()}; + Module *M = BB->getParent()->getParent(); + Value *TheFn = + Intrinsic::getDeclaration(M, Intrinsic::memcpy_element_atomic, Tys); + + CallInst *CI = createCallHelper(TheFn, Ops, this); + + // Set the TBAA info if present. + if (TBAATag) + CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + // Set the TBAA Struct info if present. + if (TBAAStructTag) + CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); + + if (ScopeTag) + CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); + + if (NoAliasTag) + CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); + + return CI; +} + CallInst *IRBuilderBase:: CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align, bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag, Index: lib/Target/X86/X86TargetTransformInfo.h =================================================================== --- lib/Target/X86/X86TargetTransformInfo.h +++ lib/Target/X86/X86TargetTransformInfo.h @@ -76,6 +76,8 @@ int getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr); + unsigned getAtomicMemIntrinsicMaxElementSize() const; + int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, ArrayRef Tys, FastMathFlags FMF, unsigned ScalarizationCostPassed = UINT_MAX); Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -1383,6 +1383,8 @@ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); } +unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } + int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, ArrayRef Tys, FastMathFlags FMF, unsigned ScalarizationCostPassed) { Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -116,6 +116,7 @@ Memset, MemsetPattern, Memcpy, + UnorderedAtomicMemcpy, DontUse // Dummy retval never to be used. Allows catching errors in retval // handling. }; @@ -353,8 +354,12 @@ LoopIdiomRecognize::LegalStoreKind LoopIdiomRecognize::isLegalStore(StoreInst *SI) { + // Don't touch volatile stores. - if (!SI->isSimple()) + if (SI->isVolatile()) + return LegalStoreKind::None; + // We only want simple or unordered-atomic stores. + if (!SI->isUnordered()) return LegalStoreKind::None; // Don't convert stores of non-integral pointer types to memsets (which stores @@ -395,15 +400,18 @@ Value *SplatValue = isBytewiseValue(StoredVal); Constant *PatternValue = nullptr; + // Note: memset and memset_pattern on unordered-atomic is yet not supported + bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple(); + // If we're allowed to form a memset, and the stored value would be // acceptable for memset, use it. - if (HasMemset && SplatValue && + if (!UnorderedAtomic && HasMemset && SplatValue && // Verify that the stored value is loop invariant. If not, we can't // promote the memset. CurLoop->isLoopInvariant(SplatValue)) { // It looks like we can use SplatValue. return LegalStoreKind::Memset; - } else if (HasMemsetPattern && + } else if (!UnorderedAtomic && HasMemsetPattern && // Don't create memset_pattern16s with address spaces. StorePtr->getType()->getPointerAddressSpace() == 0 && (PatternValue = getMemSetPatternValue(StoredVal, DL))) { @@ -422,7 +430,12 @@ // The store must be feeding a non-volatile load. LoadInst *LI = dyn_cast(SI->getValueOperand()); - if (!LI || !LI->isSimple()) + + // Only allow non-volatile loads + if (!LI || LI->isVolatile()) + return LegalStoreKind::None; + // Only allow simple or unordered-atomic loads + if (!LI->isUnordered()) return LegalStoreKind::None; // See if the pointer expression is an AddRec like {base,+,1} on the current @@ -438,7 +451,9 @@ return LegalStoreKind::None; // Success. This store can be converted into a memcpy. - return LegalStoreKind::Memcpy; + UnorderedAtomic = UnorderedAtomic || LI->isAtomic(); + return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy + : LegalStoreKind::Memcpy; } // This store can't be transformed into a memset/memcpy. return LegalStoreKind::None; @@ -469,6 +484,7 @@ StoreRefsForMemsetPattern[Ptr].push_back(SI); } break; case LegalStoreKind::Memcpy: + case LegalStoreKind::UnorderedAtomicMemcpy: StoreRefsForMemcpy.push_back(SI); break; default: @@ -882,7 +898,7 @@ /// for (i) A[i] = B[i]; bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount) { - assert(SI->isSimple() && "Expected only non-volatile stores."); + assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores."); Value *StorePtr = SI->getPointerOperand(); const SCEVAddRecExpr *StoreEv = cast(SE->getSCEV(StorePtr)); @@ -892,7 +908,7 @@ // The store must be feeding a non-volatile load. LoadInst *LI = cast(SI->getValueOperand()); - assert(LI->isSimple() && "Expected only non-volatile stores."); + assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads."); // See if the pointer expression is an AddRec like {base,+,1} on the current // loop, which indicates a strided load. If we have something else, it's a @@ -966,16 +982,47 @@ const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW); - if (StoreSize != 1) - NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize), - SCEV::FlagNUW); - Value *NumBytes = - Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); + unsigned Align = std::min(SI->getAlignment(), LI->getAlignment()); + CallInst *NewCall = nullptr; + // Check whether to generate an unordered atomic memcpy: + // If the load or store are atomic, then they must neccessarily be unordered + // by previous checks. + if (!SI->isAtomic() && !LI->isAtomic()) { + if (StoreSize != 1) + NumBytesS = SE->getMulExpr( + NumBytesS, SE->getConstant(IntPtrTy, StoreSize), SCEV::FlagNUW); - CallInst *NewCall = - Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, - std::min(SI->getAlignment(), LI->getAlignment())); + Value *NumBytes = + Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); + + NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, Align); + } else { + // We cannot allow unaligned ops for unordered load/store, so reject + // anything where the alignment isn't at least the element size. + if (Align < StoreSize) + return false; + + // If the element.atomic memcpy is not lowered into explicit + // loads/stores later, then it will be lowered into an element-size + // specific lib call. If the lib call doesn't exist for our store size, then + // we shouldn't generate the memcpy. + if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) + return false; + + Value *NumElements = + Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); + + NewCall = Builder.CreateElementAtomicMemCpy(StoreBasePtr, LoadBasePtr, + NumElements, StoreSize); + // Propagate alignment info onto the pointer args. Note that unordered + // atomic loads/stores are *required* by the spec to have an alignment + // but non-atomic loads/stores may not. + NewCall->addParamAttr(0, Attribute::getWithAlignment(NewCall->getContext(), + SI->getAlignment())); + NewCall->addParamAttr(1, Attribute::getWithAlignment(NewCall->getContext(), + LI->getAlignment())); + } NewCall->setDebugLoc(SI->getDebugLoc()); DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" Index: test/Transforms/LoopIdiom/unordered-atomic-memcpy-noarch.ll =================================================================== --- /dev/null +++ test/Transforms/LoopIdiom/unordered-atomic-memcpy-noarch.ll @@ -0,0 +1,77 @@ +; RUN: opt -basicaa -loop-idiom < %s -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" + +;; memcpy.atomic formation (atomic load & store) -- element size 2 +define void @test1(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test1( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 2 %Dest{{[0-9]*}}, i8* align 2 %Base{{[0-9]*}}, i64 %Size, i32 2) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i16, i32 10000 + %Dest = alloca i16, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar + %DestI = getelementptr i16, i16* %Dest, i64 %indvar + %V = load atomic i16, i16* %I.0.014 unordered, align 2 + store atomic i16 %V, i16* %DestI unordered, align 2 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation (atomic load & store) -- element size 4 +define void @test2(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test2( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 4 %Dest{{[0-9]*}}, i8* align 4 %Base{{[0-9]*}}, i64 %Size, i32 4) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i32, i32 10000 + %Dest = alloca i32, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar + %DestI = getelementptr i32, i32* %Dest, i64 %indvar + %V = load atomic i32, i32* %I.0.014 unordered, align 4 + store atomic i32 %V, i32* %DestI unordered, align 4 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation (atomic load & store) -- element size 8 +define void @test3(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test3( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i64, i32 10000 + %Dest = alloca i64, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i64, i64* %Base, i64 %indvar + %DestI = getelementptr i64, i64* %Dest, i64 %indvar + %V = load atomic i64, i64* %I.0.014 unordered, align 8 + store atomic i64 %V, i64* %DestI unordered, align 8 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} Index: test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll =================================================================== --- /dev/null +++ test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll @@ -0,0 +1,452 @@ +; RUN: opt -basicaa -loop-idiom < %s -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-unknown-linux-gnu" + +;; memcpy.atomic formation (atomic load & store) +define void @test1(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test1( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 unordered, align 1 + store atomic i8 %V, i8* %DestI unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation (atomic store, normal load) +define void @test2(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test2( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load i8, i8* %I.0.014, align 1 + store atomic i8 %V, i8* %DestI unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (atomic store, normal load w/ no align) +define void @test2b(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test2b( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load i8, i8* %I.0.014 + store atomic i8 %V, i8* %DestI unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (atomic store, normal load w/ bad align) +define void @test2c(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test2c( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i32, i32 10000 + %Dest = alloca i32, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar + %DestI = getelementptr i32, i32* %Dest, i64 %indvar + %V = load i32, i32* %I.0.014, align 2 + store atomic i32 %V, i32* %DestI unordered, align 4 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (atomic store w/ bad align, normal load) +define void @test2d(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test2d( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i32, i32 10000 + %Dest = alloca i32, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar + %DestI = getelementptr i32, i32* %Dest, i64 %indvar + %V = load i32, i32* %I.0.014, align 4 + store atomic i32 %V, i32* %DestI unordered, align 2 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; memcpy.atomic formation (normal store, atomic load) +define void @test3(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test3( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 unordered, align 1 + store i8 %V, i8* %DestI, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (normal store w/ no align, atomic load) +define void @test3b(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test3b( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 unordered, align 1 + store i8 %V, i8* %DestI + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (normal store, atomic load w/ bad align) +define void @test3c(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test3c( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i32, i32 10000 + %Dest = alloca i32, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar + %DestI = getelementptr i32, i32* %Dest, i64 %indvar + %V = load atomic i32, i32* %I.0.014 unordered, align 2 + store i32 %V, i32* %DestI, align 4 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (normal store w/ bad align, atomic load) +define void @test3d(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test3d( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i32, i32 10000 + %Dest = alloca i32, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar + %DestI = getelementptr i32, i32* %Dest, i64 %indvar + %V = load atomic i32, i32* %I.0.014 unordered, align 4 + store i32 %V, i32* %DestI, align 2 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; memcpy.atomic formation rejection (atomic load, ordered-atomic store) +define void @test4(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test4( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 unordered, align 1 + store atomic i8 %V, i8* %DestI monotonic, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (ordered-atomic load, unordered-atomic store) +define void @test5(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test5( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 monotonic, align 1 + store atomic i8 %V, i8* %DestI unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation (atomic load & store) -- element size 2 +define void @test6(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test6( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 2 %Dest{{[0-9]*}}, i8* align 2 %Base{{[0-9]*}}, i64 %Size, i32 2) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i16, i32 10000 + %Dest = alloca i16, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar + %DestI = getelementptr i16, i16* %Dest, i64 %indvar + %V = load atomic i16, i16* %I.0.014 unordered, align 2 + store atomic i16 %V, i16* %DestI unordered, align 2 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation (atomic load & store) -- element size 4 +define void @test7(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test7( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 4 %Dest{{[0-9]*}}, i8* align 4 %Base{{[0-9]*}}, i64 %Size, i32 4) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i32, i32 10000 + %Dest = alloca i32, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar + %DestI = getelementptr i32, i32* %Dest, i64 %indvar + %V = load atomic i32, i32* %I.0.014 unordered, align 4 + store atomic i32 %V, i32* %DestI unordered, align 4 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation (atomic load & store) -- element size 8 +define void @test8(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test8( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 8 %Dest{{[0-9]*}}, i8* align 8 %Base{{[0-9]*}}, i64 %Size, i32 8) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i64, i32 10000 + %Dest = alloca i64, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i64, i64* %Base, i64 %indvar + %DestI = getelementptr i64, i64* %Dest, i64 %indvar + %V = load atomic i64, i64* %I.0.014 unordered, align 8 + store atomic i64 %V, i64* %DestI unordered, align 8 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (atomic load & store) -- element size 16 +define void @test9(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test9( +; CHECK: call void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* align 16 %Dest{{[0-9]*}}, i8* align 16 %Base{{[0-9]*}}, i64 %Size, i32 16) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i128, i32 10000 + %Dest = alloca i128, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i128, i128* %Base, i64 %indvar + %DestI = getelementptr i128, i128* %Dest, i64 %indvar + %V = load atomic i128, i128* %I.0.014 unordered, align 16 + store atomic i128 %V, i128* %DestI unordered, align 16 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.atomic formation rejection (atomic load & store) -- element size 32 +define void @test10(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test10( +; CHECK-NOT: call void @llvm.memcpy.element.atomic +; CHECK: store +; CHECK: ret void +bb.nph: + %Base = alloca i256, i32 10000 + %Dest = alloca i256, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i256, i256* %Base, i64 %indvar + %DestI = getelementptr i256, i256* %Dest, i64 %indvar + %V = load atomic i256, i256* %I.0.014 unordered, align 32 + store atomic i256 %V, i256* %DestI unordered, align 32 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + + + +; Make sure that atomic memset doesn't get recognized by mistake +define void @test_nomemset(i8* %Base, i64 %Size) nounwind ssp { +; CHECK-LABEL: @test_nomemset( +; CHECK-NOT: call void @llvm.memset +; CHECK: store +; CHECK: ret void +bb.nph: ; preds = %entry + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + store atomic i8 0, i8* %I.0.014 unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +; Verify that unordered memset_pattern isn't recognized. +; This is a replica of test11_pattern from basic.ll +define void @test_nomemset_pattern(i32* nocapture %P) nounwind ssp { +; CHECK-LABEL: @test_nomemset_pattern( +; CHECK-NEXT: entry: +; CHECK-NOT: bitcast +; CHECK-NOT: memset_pattern +; CHECK: store atomic +; CHECK: ret void +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ] + %arrayidx = getelementptr i32, i32* %P, i64 %indvar + store atomic i32 1, i32* %arrayidx unordered, align 4 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, 10000 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +}