diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1382,17 +1382,24 @@ Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); + AAMDNodes AATags; + TheLoad->getAAMetadata(AATags); + TheStore->getAAMetadata(AATags, /*Merge*/ true); + CallInst *NewCall = nullptr; // Check whether to generate an unordered atomic memcpy: // If the load or store are atomic, then they must necessarily be unordered // by previous checks. if (!TheStore->isAtomic() && !TheLoad->isAtomic()) { if (UseMemMove) - NewCall = Builder.CreateMemMove(StoreBasePtr, StoreAlign, LoadBasePtr, - LoadAlign, NumBytes); + NewCall = Builder.CreateMemMove( + StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes, + /*isVolatile*/ false, AATags.TBAA, AATags.Scope, AATags.NoAlias); else - NewCall = Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, - LoadAlign, NumBytes); + NewCall = + Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, + NumBytes, /*isVolatile*/ false, AATags.TBAA, + AATags.TBAAStruct, AATags.Scope, AATags.NoAlias); } else { // For now don't support unordered atomic memmove. if (UseMemMove) @@ -1416,7 +1423,8 @@ // have an alignment but non-atomic loads/stores may not. NewCall = Builder.CreateElementUnorderedAtomicMemCpy( StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(), - NumBytes, StoreSize); + NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope, + AATags.NoAlias); } NewCall->setDebugLoc(TheStore->getDebugLoc()); diff --git a/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll b/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll @@ -0,0 +1,42 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-idiom < %s -S | FileCheck %s + +define void @looper(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looper( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !5 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"}