Index: include/llvm/IR/IRBuilder.h =================================================================== --- include/llvm/IR/IRBuilder.h +++ include/llvm/IR/IRBuilder.h @@ -435,6 +435,28 @@ MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); + /// \brief Create and insert an unordered-atomic memcpy between the specified + /// pointers. + /// + /// If the pointers aren't i8*, they will be converted. If a TBAA tag is + /// specified, it will be added to the instruction. Likewise with alias.scope + /// and noalias tags. + CallInst *CreateElementUnorderedAtomicMemCpy( + Value *Dst, Value *Src, uint64_t Size, unsigned Align, bool dst_unordered, + bool src_unordered, uint8_t elementsize, MDNode *TBAATag = nullptr, + MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, + MDNode *NoAliasTag = nullptr) { + return CreateElementUnorderedAtomicMemCpy( + Dst, Src, getInt64(Size), Align, dst_unordered, src_unordered, + elementsize, TBAATag, TBAAStructTag, ScopeTag, NoAliasTag); + } + + CallInst *CreateElementUnorderedAtomicMemCpy( + Value *Dst, Value *Src, Value *Size, unsigned Align, bool dst_unordered, + bool src_isunordered, uint8_t elementsize, MDNode *TBAATag = nullptr, + MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, + MDNode *NoAliasTag = nullptr); + /// \brief Create and insert a memmove between the specified /// pointers. /// Index: lib/IR/IRBuilder.cpp =================================================================== --- lib/IR/IRBuilder.cpp +++ lib/IR/IRBuilder.cpp @@ -134,6 +134,45 @@ return CI; } +CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( + Value *Dst, Value *Src, Value *Size, unsigned Align, bool dst_unordered, + bool src_unordered, uint8_t elementsize, MDNode *TBAATag, + MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) { + Dst = getCastedInt8PtrValue(Dst); + Src = getCastedInt8PtrValue(Src); + + Value *Ops[] = {Dst, + Src, + Size, + getInt32(Align), + getInt1(0), + getInt1(dst_unordered), + getInt1(src_unordered), + getInt8(elementsize)}; + Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; + Module *M = BB->getParent()->getParent(); + Value *TheFn = Intrinsic::getDeclaration( + M, Intrinsic::memcpy_element_unordered_atomic, Tys); + + CallInst *CI = createCallHelper(TheFn, Ops, this); + + // Set the TBAA info if present. + if (TBAATag) + CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + // Set the TBAA Struct info if present. + if (TBAAStructTag) + CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); + + if (ScopeTag) + CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag); + + if (NoAliasTag) + CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag); + + return CI; +} + CallInst *IRBuilderBase:: CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align, bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag, Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -77,6 +77,12 @@ "with -Os/-Oz"), cl::init(true), cl::Hidden); +static cl::opt RecogUnorderedAtomicMemcpy( + "loop-idiom-unordered-atomic-memcpy", + cl::desc("Allow loop idiom recognition to find and insert unordered-atomic " + "memcpy intrinsics"), + cl::init(false), cl::Hidden); + namespace { class LoopIdiomRecognize { @@ -346,7 +352,8 @@ bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset, bool &ForMemsetPattern, bool &ForMemcpy) { // Don't touch volatile stores. - if (!SI->isSimple()) + if ((RecogUnorderedAtomicMemcpy && !SI->isUnordered()) || + (!RecogUnorderedAtomicMemcpy && !SI->isSimple())) return false; // Don't convert stores of non-integral pointer types to memsets (which stores @@ -416,7 +423,8 @@ // The store must be feeding a non-volatile load. LoadInst *LI = dyn_cast(SI->getValueOperand()); - if (!LI || !LI->isSimple()) + if (!LI || (RecogUnorderedAtomicMemcpy && !LI->isUnordered()) || + (!RecogUnorderedAtomicMemcpy && !LI->isSimple())) return false; // See if the pointer expression is an AddRec like {base,+,1} on the current @@ -840,9 +848,8 @@ Type *Int8PtrTy = DestInt8PtrTy; Module *M = TheStore->getModule(); - Value *MSP = - M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(), - Int8PtrTy, Int8PtrTy, IntPtr); + Value *MSP = M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(), + Int8PtrTy, Int8PtrTy, IntPtr); inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI); // Otherwise we should form a memset_pattern16. PatternValue is known to be @@ -873,7 +880,11 @@ /// for (i) A[i] = B[i]; bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount) { - assert(SI->isSimple() && "Expected only non-volatile stores."); + if (!RecogUnorderedAtomicMemcpy) + assert(SI->isSimple() && "Expected only non-volatile stores."); + else + assert(SI->isUnordered() && + "Expected only non-volatile non-ordered stores."); Value *StorePtr = SI->getPointerOperand(); const SCEVAddRecExpr *StoreEv = cast(SE->getSCEV(StorePtr)); @@ -883,7 +894,11 @@ // The store must be feeding a non-volatile load. LoadInst *LI = cast(SI->getValueOperand()); - assert(LI->isSimple() && "Expected only non-volatile stores."); + if (!RecogUnorderedAtomicMemcpy) + assert(LI->isSimple() && "Expected only non-volatile loads."); + else + assert(LI->isUnordered() && + "Expected only non-volatile non-ordered loads."); // See if the pointer expression is an AddRec like {base,+,1} on the current // loop, which indicates a strided load. If we have something else, it's a @@ -964,9 +979,19 @@ Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); - CallInst *NewCall = - Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, - std::min(SI->getAlignment(), LI->getAlignment())); + unsigned Align = std::min(SI->getAlignment(), LI->getAlignment()); + CallInst *NewCall = nullptr; + if (RecogUnorderedAtomicMemcpy && (SI->isAtomic() || LI->isAtomic())) { + // element.unordered.atomic is limited to 16-byte element-size because + // 1,2,4,8, and 16 are the only lib functions that are defined. Should this + // be a limit to min(platform register size, 16) ? + if (StoreSize > 16) + return false; + NewCall = Builder.CreateElementUnorderedAtomicMemCpy( + StoreBasePtr, LoadBasePtr, NumBytes, Align, SI->isAtomic(), LI->isAtomic(), StoreSize); + } else { + NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, Align); + } NewCall->setDebugLoc(SI->getDebugLoc()); DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" @@ -1092,10 +1117,9 @@ Instruction *SubInst = cast(SubOneOp); ConstantInt *Dec = dyn_cast(SubInst->getOperand(1)); - if (!Dec || - !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) || - (SubInst->getOpcode() == Instruction::Add && - Dec->isAllOnesValue()))) { + if (!Dec || !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) || + (SubInst->getOpcode() == Instruction::Add && + Dec->isAllOnesValue()))) { return false; } } @@ -1597,9 +1621,8 @@ PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); Builder.SetInsertPoint(LbCond); - Instruction *TcDec = cast( - Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), - "tcdec", false, true)); + Instruction *TcDec = cast(Builder.CreateSub( + TcPhi, ConstantInt::get(Ty, 1), "tcdec", false, true)); TcPhi->addIncoming(TripCnt, PreHead); TcPhi->addIncoming(TcDec, Body); Index: test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll =================================================================== --- /dev/null +++ test/Transforms/LoopIdiom/unordered-atomic-memcpy.ll @@ -0,0 +1,78 @@ +; RUN: opt -basicaa -loop-idiom -loop-idiom-unordered-atomic-memcpy < %s -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" + +;; memcpy.unordered.atomic formation (atomic load & store) +define void @test1(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test1( +; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* %Dest, i8* %Base, i64 %Size, i32 1, i1 false, i1 1, i1 1, i8 1) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 unordered, align 1 + store atomic i8 %V, i8* %DestI unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.unordered.atomic formation (atomic store, normal load) +define void @test2(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test2( +; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* %Dest, i8* %Base, i64 %Size, i32 1, i1 false, i1 1, i1 0, i8 1) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load i8, i8* %I.0.014, align 1 + store atomic i8 %V, i8* %DestI unordered, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +;; memcpy.unordered.atomic formation (normal store, atomic load) +define void @test3(i64 %Size) nounwind ssp { +; CHECK-LABEL: @test3( +; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* %Dest, i8* %Base, i64 %Size, i32 1, i1 false, i1 0, i1 1, i8 1) +; CHECK-NOT: store +; CHECK: ret void +bb.nph: + %Base = alloca i8, i32 10000 + %Dest = alloca i8, i32 10000 + br label %for.body + +for.body: ; preds = %bb.nph, %for.body + %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] + %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar + %DestI = getelementptr i8, i8* %Dest, i64 %indvar + %V = load atomic i8, i8* %I.0.014 unordered, align 1 + store i8 %V, i8* %DestI, align 1 + %indvar.next = add i64 %indvar, 1 + %exitcond = icmp eq i64 %indvar.next, %Size + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} +