diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1164,8 +1164,10 @@ CallInst *NewCall; if (SplatValue) { + AAMDNodes AATags; + TheStore->getAAMetadata(AATags); NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, - MaybeAlign(StoreAlignment)); + MaybeAlign(StoreAlignment), /*isVolatile=*/false, AATags.TBAA, AATags.Scope, AATags.NoAlias); } else { // Everything is emitted in default address space Type *Int8PtrTy = DestInt8PtrTy; @@ -1382,17 +1384,24 @@ Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); + AAMDNodes AATags; + TheLoad->getAAMetadata(AATags); + TheStore->getAAMetadata(AATags, /*Merge=*/ true); + CallInst *NewCall = nullptr; // Check whether to generate an unordered atomic memcpy: // If the load or store are atomic, then they must necessarily be unordered // by previous checks. if (!TheStore->isAtomic() && !TheLoad->isAtomic()) { if (UseMemMove) - NewCall = Builder.CreateMemMove(StoreBasePtr, StoreAlign, LoadBasePtr, - LoadAlign, NumBytes); + NewCall = Builder.CreateMemMove( + StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes, + /*isVolatile=*/ false, AATags.TBAA, AATags.Scope, AATags.NoAlias); else - NewCall = Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, - LoadAlign, NumBytes); + NewCall = + Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, + NumBytes, /*isVolatile=*/ false, AATags.TBAA, + AATags.TBAAStruct, AATags.Scope, AATags.NoAlias); } else { // For now don't support unordered atomic memmove. if (UseMemMove) @@ -1416,7 +1425,8 @@ // have an alignment but non-atomic loads/stores may not. NewCall = Builder.CreateElementUnorderedAtomicMemCpy( StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(), - NumBytes, StoreSize); + NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope, + AATags.NoAlias); } NewCall->setDebugLoc(TheStore->getDebugLoc()); diff --git a/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll b/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll @@ -0,0 +1,122 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s + +define void @looper(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looper( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !5 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + + +define void @looperBadMerge(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looperBadMerge( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAAF:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !3 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +define void @looperGoodMerge(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looperGoodMerge( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NOT: !tbaa +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false) +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +; CHECK: [[TBAA0]] = !{[[TBAA1:.+]], [[TBAA1]], i64 0} +; CHECK: [[TBAA1]] = !{!"double", [[TBAA2:.+]], i64 0} +; CHECK: [[TBAA2]] = !{!"omnipotent char", [[TBAA3:.+]], i64 0} + +; CHECK: [[TBAAF]] = !{[[TBAA2:.+]], [[TBAA2]], i64 0} + +!3 = !{!4, !4, i64 0} +!4 = !{!"float", !7, i64 0} +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"} diff --git a/llvm/test/Transforms/LoopIdiom/memset-tbaa.ll b/llvm/test/Transforms/LoopIdiom/memset-tbaa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/memset-tbaa.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s + + +define dso_local void @double_memset(i8* nocapture %p) { +; CHECK-LABEL: @double_memset( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA0:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, %entry ] +; CHECK-NEXT: [[INCDEC_PTR1:.+]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07 + store i8 0, i8* %ptr1, align 1, !tbaa !5 + %inc = add nuw nsw i64 %i.07, 1 + %exitcond.not = icmp eq i64 %inc, 16 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK: [[TBAA0]] = !{[[TBAA1:.+]], [[TBAA1]], i64 0} +; CHECK: [[TBAA1]] = !{!"double", [[TBAA2:.+]], i64 0} +; CHECK: [[TBAA2]] = !{!"omnipotent char", [[TBAA3:.+]], i64 0} + +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"}