diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h --- a/llvm/include/llvm/IR/Metadata.h +++ b/llvm/include/llvm/IR/Metadata.h @@ -682,6 +682,10 @@ // Shift tbaa.struct Metadata node to start off bytes later static MDNode *shiftTBAAStruct(MDNode *M, size_t off); + // Extend tbaa Metadata node to apply to a series of bytes of length len. + // A size of -1 denotes an unknown size. + static MDNode *extendToTBAA(MDNode *TBAA, ssize_t len); + /// Given two sets of AAMDNodes that apply to the same pointer, /// give the best AAMDNodes that are compatible with both (i.e. a set of /// nodes whose allowable aliasing conclusions are a subset of those @@ -708,6 +712,21 @@ return Result; } + /// Create a new AAMDNode that describes this AAMDNode after extending it to + /// apply to a series of bytes of length Len. A size of -1 denotes an unknown + /// size. + AAMDNodes extendTo(ssize_t Len) const { + AAMDNodes Result; + Result.TBAA = TBAA ? extendToTBAA(TBAA, Len) : nullptr; + // tbaa.struct contains (offset, size, type) triples. Extending the length + // of the tbaa.struct doesn't require changing this (though more information + // could be provided by adding more triples at subsequent lengths). + Result.TBAAStruct = TBAAStruct; + Result.Scope = Scope; + Result.NoAlias = NoAlias; + return Result; + } + /// Given two sets of AAMDNodes applying to potentially different locations, /// determine the best AAMDNodes that apply to both. AAMDNodes merge(const AAMDNodes &Other) const; diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp --- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp +++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -786,3 +786,36 @@ } return MDNode::get(MD->getContext(), Sub); } + +MDNode *AAMDNodes::extendToTBAA(MDNode *MD, ssize_t Len) { + // Fast path if 0-length + if (Len == 0) + return nullptr; + + // Regular TBAA is invariant of length, so we only need to consider + // struct-path TBAA. + if (!isStructPathTBAA(MD)) + return MD; + + TBAAStructTagNode Tag(MD); + + // Only new format TBAA has a size + if (!Tag.isNewFormat()) + return MD; + + // If unknown size, drop the TBAA. + if (Len == -1) + return nullptr; + + // Otherwise, create TBAA with the new Len + SmallVector NextNodes(MD->operands()); + ConstantInt *PreviousSize = mdconst::extract(NextNodes[3]); + + // Don't create a new MDNode if it is the same length. + if (PreviousSize->equalsInt(Len)) + return MD; + + NextNodes[3] = + ConstantAsMetadata::get(ConstantInt::get(PreviousSize->getType(), Len)); + return MDNode::get(MD->getContext(), NextNodes); +} diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1172,8 +1172,15 @@ CallInst *NewCall; if (SplatValue) { - NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, - MaybeAlign(StoreAlignment)); + AAMDNodes AATags = TheStore->getAAMetadata(); + if (auto CI = dyn_cast(NumBytes)) + AATags = AATags.extendTo(CI->getZExtValue()); + else + AATags = AATags.extendTo(-1); + + NewCall = Builder.CreateMemSet( + BasePtr, SplatValue, NumBytes, MaybeAlign(StoreAlignment), + /*isVolatile=*/false, AATags.TBAA, AATags.Scope, AATags.NoAlias); } else { // Everything is emitted in default address space Type *Int8PtrTy = DestInt8PtrTy; @@ -1452,17 +1459,28 @@ Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); + AAMDNodes AATags = TheLoad->getAAMetadata(); + AAMDNodes StoreAATags = TheStore->getAAMetadata(); + AATags = AATags.merge(StoreAATags); + if (auto CI = dyn_cast(NumBytes)) + AATags = AATags.extendTo(CI->getZExtValue()); + else + AATags = AATags.extendTo(-1); + CallInst *NewCall = nullptr; // Check whether to generate an unordered atomic memcpy: // If the load or store are atomic, then they must necessarily be unordered // by previous checks. if (!TheStore->isAtomic() && !TheLoad->isAtomic()) { if (UseMemMove) - NewCall = Builder.CreateMemMove(StoreBasePtr, StoreAlign, LoadBasePtr, - LoadAlign, NumBytes); + NewCall = Builder.CreateMemMove( + StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes, + /*isVolatile=*/false, AATags.TBAA, AATags.Scope, AATags.NoAlias); else - NewCall = Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, - LoadAlign, NumBytes); + NewCall = + Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, + NumBytes, /*isVolatile=*/false, AATags.TBAA, + AATags.TBAAStruct, AATags.Scope, AATags.NoAlias); } else { // For now don't support unordered atomic memmove. if (UseMemMove) @@ -1486,7 +1504,8 @@ // have an alignment but non-atomic loads/stores may not. NewCall = Builder.CreateElementUnorderedAtomicMemCpy( StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(), - NumBytes, StoreSize); + NumBytes, StoreSize, AATags.TBAA, AATags.TBAAStruct, AATags.Scope, + AATags.NoAlias); } NewCall->setDebugLoc(TheStore->getDebugLoc()); diff --git a/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll b/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/memcpy-tbaa.ll @@ -0,0 +1,185 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s + +define void @looper(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looper( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !5 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + + +define void @looperBadMerge(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looperBadMerge( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAAF:![0-9]+]] +; CHECK-NOT: tbaa +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !3 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +define void @looperGoodMerge(double* noalias nocapture readonly %M, double* noalias nocapture %out) { +; CHECK-LABEL: @looperGoodMerge( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false) +; CHECK-NOT: !tbaa +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[J_020:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[M]], i64 [[J_020]] +; CHECK-NEXT: [[A0:%.*]] = load double, double* [[ARRAYIDX]], align 8, !tbaa [[TBAA0]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[OUT]], i64 [[J_020]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[J_020]], 1 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[J_020]], 31 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +define void @looperConstantTBAAStruct(double* nocapture noalias %out, double* nocapture noalias %in) { +; CHECK-LABEL: @looperConstantTBAAStruct( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[IN1:%.*]] = bitcast double* [[IN:%.*]] to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[IN1]], i64 32, i1 false), !tbaa [[TBAA8:![0-9]+]] +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %in, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !10 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !10 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 3 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +define void @looperVarTBAAStruct(double* nocapture noalias %out, double* nocapture noalias %in, i64 %len) { +; CHECK-LABEL: @looperVarTBAAStruct( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[IN1:%.*]] = bitcast double* [[IN:%.*]] to i8* +; CHECK-NEXT: [[umax:%.*]] = call i64 @llvm.umax.i64(i64 %len, i64 1) +; CHECK-NEXT: [[I0:%.*]] = shl nuw i64 [[umax]], 3 +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[IN1]], i64 [[I0]], i1 false) +; CHECK-NOT: !tbaa +; CHECK-NEXT: br +; +entry: + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %in, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !10 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !10 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %inc, %len + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + + +; CHECK: [[TBAA0]] = !{[[TBAA1:.+]], [[TBAA1]], i64 0} +; CHECK: [[TBAA1]] = !{!"double", [[TBAA2:.+]], i64 0} +; CHECK: [[TBAA2]] = !{!"omnipotent char", [[TBAA3:.+]], i64 0} +; CHECK: [[TBAAF]] = !{[[TBAA2]], [[TBAA2]], i64 0} + +; CHECK: [[TBAA8]] = !{[[TBAA5:.+]], [[TBAA6:.+]], i64 0, i64 32} +; CHECK: [[TBAA5]] = !{[[TBAA7:.+]], i64 32, !"_ZTS1A", [[TBAA6]], i64 0, i64 8, [[TBAA6]], i64 8, i64 8, [[TBAA6]], i64 16, i64 8, [[TBAA6]], i64 24, i64 8} +; CHECK: [[TBAA7]] = !{[[TBAA3]], i64 0, !"omnipotent char"} +; CHECK: [[TBAA6]] = !{[[TBAA7]], i64 8, !"double"} + +!3 = !{!4, !4, i64 0} +!4 = !{!"float", !7, i64 0} +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"} + +!15 = !{!8, i64 0, !"omnipotent char"} +!17 = !{!15, i64 8, !"double"} +!9 = !{!15, i64 32, !"_ZTS1A", !17, i64 0, i64 8, !17, i64 8, i64 8, !17, i64 16, i64 8, !17, i64 24, i64 8} +!10 = !{!9, !17, i64 0, i64 8} diff --git a/llvm/test/Transforms/LoopIdiom/memmove-tbaa.ll b/llvm/test/Transforms/LoopIdiom/memmove-tbaa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/memmove-tbaa.ll @@ -0,0 +1,96 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s + +define void @looper(double* nocapture %out) { +; CHECK-LABEL: @looper( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M:%.*]] = getelementptr double, double* %out, i32 16 +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M]] to i8* +; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAA0:![0-9]+]] +; +entry: + %M = getelementptr double, double* %out, i32 16 + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !5 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + + +define void @looperBadMerge(double* nocapture %out) { +; CHECK-LABEL: @looperBadMerge( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M:%.*]] = getelementptr double, double* %out, i32 16 +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M]] to i8* +; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false), !tbaa [[TBAAF:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; +entry: + %M = getelementptr double, double* %out, i32 16 + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8, !tbaa !3 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +define void @looperGoodMerge(double* nocapture %out) { +; CHECK-LABEL: @looperGoodMerge( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[OUT1:%.*]] = bitcast double* [[OUT:%.*]] to i8* +; CHECK-NEXT: [[M:%.*]] = getelementptr double, double* %out, i32 16 +; CHECK-NEXT: [[M2:%.*]] = bitcast double* [[M]] to i8* +; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 [[OUT1]], i8* align 8 [[M2]], i64 256, i1 false) +; CHECK-NOT: !tbaa +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; +entry: + %M = getelementptr double, double* %out, i32 16 + br label %for.body4 + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %j.020 = phi i64 [ 0, %entry ], [ %inc, %for.body4 ] + %arrayidx = getelementptr inbounds double, double* %M, i64 %j.020 + %a0 = load double, double* %arrayidx, align 8, !tbaa !5 + %arrayidx8 = getelementptr inbounds double, double* %out, i64 %j.020 + store double %a0, double* %arrayidx8, align 8 + %inc = add nuw nsw i64 %j.020, 1 + %cmp2 = icmp ult i64 %j.020, 31 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup3 + ret void +} + +; CHECK: [[TBAA0]] = !{[[TBAA1:.+]], [[TBAA1]], i64 0} +; CHECK: [[TBAA1]] = !{!"double", [[TBAA2:.+]], i64 0} +; CHECK: [[TBAA2]] = !{!"omnipotent char", [[TBAA3:.+]], i64 0} +; CHECK: [[TBAAF]] = !{[[TBAA2]], [[TBAA2]], i64 0} + +!3 = !{!4, !4, i64 0} +!4 = !{!"float", !7, i64 0} +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"} diff --git a/llvm/test/Transforms/LoopIdiom/memset-tbaa.ll b/llvm/test/Transforms/LoopIdiom/memset-tbaa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/memset-tbaa.ll @@ -0,0 +1,111 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="loop-idiom" < %s -S | FileCheck %s + + +define dso_local void @double_memset(i8* nocapture %p) { +; CHECK-LABEL: @double_memset( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA0:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, %entry ] +; CHECK-NEXT: [[INCDEC_PTR1:.+]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07 + store i8 0, i8* %ptr1, align 1, !tbaa !5 + %inc = add nuw nsw i64 %i.07, 1 + %exitcond.not = icmp eq i64 %inc, 16 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + + +define dso_local void @struct_memset(i8* nocapture %p) { +; CHECK-LABEL: @struct_memset( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 16, i1 false), !tbaa [[TBAA8:![0-9]+]] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, %entry ] +; CHECK-NEXT: [[INCDEC_PTR1:.+]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07 + store i8 0, i8* %ptr1, align 1, !tbaa !10 + %inc = add nuw nsw i64 %i.07, 1 + %exitcond.not = icmp eq i64 %inc, 16 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +define dso_local void @var_memset(i8* nocapture %p, i64 %len) { +; CHECK-LABEL: @var_memset( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[P:%.*]], i8 0, i64 %len, i1 false) +; CHECK-NOT: !tbaa +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[I_07:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, %entry ] +; CHECK-NEXT: [[INCDEC_PTR1:.+]] = getelementptr inbounds i8, i8* [[P]], i64 [[I_07]] +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_07]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], %len +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %ptr1 = getelementptr inbounds i8, i8* %p, i64 %i.07 + store i8 0, i8* %ptr1, align 1, !tbaa !10 + %inc = add nuw nsw i64 %i.07, 1 + %exitcond.not = icmp eq i64 %inc, %len + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK: [[TBAA0]] = !{[[TBAA1:.+]], [[TBAA1]], i64 0} +; CHECK: [[TBAA1]] = !{!"double", [[TBAA2:.+]], i64 0} +; CHECK: [[TBAA2]] = !{!"omnipotent char", [[TBAA3:.+]], i64 0} + +; CHECK: [[TBAA8]] = !{[[TBAA5:.+]], [[TBAA6:.+]], i64 0, i64 16} +; CHECK: [[TBAA5]] = !{[[TBAA7:.+]], i64 32, !"_ZTS1A", [[TBAA6]], i64 0, i64 8, [[TBAA6]], i64 8, i64 8, [[TBAA6]], i64 16, i64 8, [[TBAA6]], i64 24, i64 8} +; CHECK: [[TBAA7]] = !{[[TBAA3]], i64 0, !"omnipotent char"} +; CHECK: [[TBAA6]] = !{[[TBAA7]], i64 8, !"double"} + +!5 = !{!6, !6, i64 0} +!6 = !{!"double", !7, i64 0} +!7 = !{!"omnipotent char", !8, i64 0} +!8 = !{!"Simple C++ TBAA"} + +!15 = !{!8, i64 0, !"omnipotent char"} +!17 = !{!15, i64 8, !"double"} +!9 = !{!15, i64 32, !"_ZTS1A", !17, i64 0, i64 8, !17, i64 8, i64 8, !17, i64 16, i64 8, !17, i64 24, i64 8} +!10 = !{!9, !17, i64 0, i64 1}