diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1056,7 +1056,7 @@ ValueMap ShadowMap, OriginMap; std::unique_ptr VAHelper; const TargetLibraryInfo *TLI; - Instruction *ActualFnStart; + Instruction *FnPrologueEnd; // The following flags disable parts of MSan instrumentation based on // exclusion list contents and command-line options. @@ -1095,11 +1095,12 @@ removeUnreachableBlocks(F); MS.initializeCallbacks(*F.getParent()); - ActualFnStart = F.getEntryBlock().getFirstNonPHI(); + FnPrologueEnd = IRBuilder<>(F.getEntryBlock().getFirstNonPHI()) + .CreateIntrinsic(Intrinsic::donothing, {}, {}); if (MS.CompileKernel) { - IRBuilder<> IRB(ActualFnStart); - insertKmsanPrologue(IRB); + IRBuilder<> IRB(FnPrologueEnd); + FnPrologueEnd = insertKmsanPrologue(IRB); } LLVM_DEBUG(if (!InsertChecks) dbgs() @@ -1107,6 +1108,11 @@ << F.getName() << "'\n"); } + bool isInPrologue(Instruction &I) { + return I.getParent() == FnPrologueEnd->getParent() && + (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd)); + } + Value *updateOrigin(Value *V, IRBuilder<> &IRB) { if (MS.TrackOrigins <= 1) return V; return IRB.CreateCall(MS.MsanChainOriginFn, V); @@ -1269,7 +1275,8 @@ LLVM_DEBUG(dbgs() << "DONE:\n" << F); } - void insertKmsanPrologue(IRBuilder<> &IRB) { + // Returns the last instruction in the new prologue + Instruction *insertKmsanPrologue(IRBuilder<> &IRB) { Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {}); Constant *Zero = IRB.getInt32(0); MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState, @@ -1288,6 +1295,8 @@ MS.RetvalOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState, {Zero, IRB.getInt32(6)}, "retval_origin"); + + return cast(MS.RetvalOriginTLS); } /// Add MemorySanitizer instrumentation to a function. @@ -1295,7 +1304,7 @@ // Iterate all BBs in depth-first order and create shadow instructions // for all instructions (where applicable). // For PHI nodes we create dummy shadow PHIs which will be finalized later. - for (BasicBlock *BB : depth_first(ActualFnStart->getParent())) + for (BasicBlock *BB : depth_first(FnPrologueEnd->getParent())) visit(*BB); // Finalize PHI nodes. @@ -1662,7 +1671,7 @@ if (*ShadowPtr) return *ShadowPtr; Function *F = A->getParent(); - IRBuilder<> EntryIRB(ActualFnStart); + IRBuilder<> EntryIRB(FnPrologueEnd); unsigned ArgOffset = 0; const DataLayout &DL = F->getParent()->getDataLayout(); for (auto &FArg : F->args()) { @@ -1880,9 +1889,8 @@ void visit(Instruction &I) { if (I.getMetadata("nosanitize")) return; - // Don't want to visit if we're in the zone before ActualFnStart - if (I.getParent() == ActualFnStart->getParent() && - I.comesBefore(ActualFnStart)) + // Don't want to visit if we're in the prologue + if (isInPrologue(I)) return; InstVisitor::visit(I); } @@ -4309,7 +4317,7 @@ if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(MSV.ActualFnStart); + IRBuilder<> IRB(MSV.FnPrologueEnd); VAArgOverflowSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); Value *CopySize = @@ -4455,7 +4463,7 @@ void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); - IRBuilder<> IRB(MSV.ActualFnStart); + IRBuilder<> IRB(MSV.FnPrologueEnd); VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); @@ -4648,7 +4656,7 @@ if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(MSV.ActualFnStart); + IRBuilder<> IRB(MSV.FnPrologueEnd); VAArgOverflowSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); Value *CopySize = @@ -4893,7 +4901,7 @@ void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); - IRBuilder<> IRB(MSV.ActualFnStart); + IRBuilder<> IRB(MSV.FnPrologueEnd); VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); @@ -5212,7 +5220,7 @@ if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(MSV.ActualFnStart); + IRBuilder<> IRB(MSV.FnPrologueEnd); VAArgOverflowSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); Value *CopySize = diff --git a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll --- a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll @@ -18,8 +18,8 @@ ; CHECK-LABEL: @InsertValue( ; CHECK-DAG: [[Sx:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i32*) -; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0 ; CHECK-DAG: [[Sy:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i64), i64 8) to i32*) +; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0 ; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1 ; CHECK: store [2 x i32] [[B]], [2 x i32]* {{.*}}@__msan_retval_tls ; CHECK: ret [2 x i32] @@ -34,8 +34,8 @@ ; CHECK-LABEL: @InsertValueDouble( ; CHECK-DAG: [[Sx:%.*]] = load i64, i64* getelementptr {{.*}}@__msan_param_tls, i32 0, i32 0 -; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0 ; CHECK-DAG: [[Sy:%.*]] = load i64, i64* {{.*}}@__msan_param_tls to i64), i64 8) to i64*) +; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0 ; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1 ; CHECK: store [2 x i64] [[B]], [2 x i64]* {{.*}}@__msan_retval_tls ; CHECK: ret [2 x double] diff --git a/llvm/test/Instrumentation/MemorySanitizer/attributes.ll b/llvm/test/Instrumentation/MemorySanitizer/attributes.ll --- a/llvm/test/Instrumentation/MemorySanitizer/attributes.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/attributes.ll @@ -49,3 +49,6 @@ ; CHECK-NOT: writeonly ; CHECK-NOT: argmemonly ; CHECK-NOT: speculatable + +; CHECK: Function Attrs: nounwind readnone willreturn +; CHECK-NEXT: declare void @llvm.donothing diff --git a/llvm/test/Instrumentation/MemorySanitizer/clmul.ll b/llvm/test/Instrumentation/MemorySanitizer/clmul.ll --- a/llvm/test/Instrumentation/MemorySanitizer/clmul.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/clmul.ll @@ -19,8 +19,8 @@ ; CHECK-LABEL: @clmul00 ; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls -; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer ; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls +; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer ; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> undef, <2 x i32> zeroinitializer ; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]] ; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls @@ -33,8 +33,8 @@ ; CHECK-LABEL: @clmul10 ; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls -; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer ; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls +; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> undef, <2 x i32> zeroinitializer ; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> undef, <2 x i32> ; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]] ; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls @@ -47,8 +47,8 @@ ; CHECK-LABEL: @clmul11_256 ; CHECK: %[[S0:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls -; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> undef, <4 x i32> ; CHECK: %[[S1:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls +; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> undef, <4 x i32> ; CHECK: %[[SHUF1:.*]] = shufflevector <4 x i64> %[[S1]], <4 x i64> undef, <4 x i32> ; CHECK: %[[SRET:.*]] = or <4 x i64> %[[SHUF0]], %[[SHUF1]] ; CHECK: store <4 x i64> %[[SRET]], <4 x i64>* {{.*}}@__msan_retval_tls @@ -61,8 +61,8 @@ ; CHECK-LABEL: @clmul01_512 ; CHECK: %[[S0:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls -; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> undef, <8 x i32> ; CHECK: %[[S1:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls +; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> undef, <8 x i32> ; CHECK: %[[SHUF1:.*]] = shufflevector <8 x i64> %[[S1]], <8 x i64> undef, <8 x i32> ; CHECK: %[[SRET:.*]] = or <8 x i64> %[[SHUF0]], %[[SHUF1]] ; ORIGIN: %[[FLAT:.*]] = bitcast <8 x i64> %[[SHUF1]] to i512 diff --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll --- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll @@ -72,14 +72,14 @@ } ; CHECK-LABEL: @Load( +; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8) +; CHECK-ORIGIN: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8) ; CHECK: %[[B:.*]] = ptrtoint <4 x double>* %p to i64 ; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080 ; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>* ; CHECK-ORIGIN: %[[E:.*]] = add i64 %[[C]], 17592186044416 ; CHECK-ORIGIN: %[[F:.*]] = and i64 %[[E]], -4 ; CHECK-ORIGIN: %[[G:.*]] = inttoptr i64 %[[F]] to i32* -; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-ORIGIN: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8) ; CHECK: %[[E:.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %[[D]], i32 1, <4 x i1> %mask, <4 x i64> %[[A]]) ; CHECK-ORIGIN: %[[H:.*]] = load i32, i32* %[[G]] ; CHECK-ORIGIN: %[[O2:.*]] = select i1 %{{.*}}, i32 %[[O]], i32 %[[H]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll --- a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -257,6 +257,7 @@ define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind { ; CHECK-LABEL: atomic_memcpy + ; CHECK-NEXT: call void @llvm.donothing ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1) ; CHECK-NEXT: ret void call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1) @@ -265,6 +266,7 @@ define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind { ; CHECK-LABEL: atomic_memmove + ; CHECK-NEXT: call void @llvm.donothing ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1) ; CHECK-NEXT: ret void call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1) @@ -273,6 +275,7 @@ define void @atomic_memset(i8* nocapture %x) nounwind { ; CHECK-LABEL: atomic_memset + ; CHECK-NEXT: call void @llvm.donothing ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1) ; CHECK-NEXT: ret void call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1) @@ -389,6 +392,7 @@ ; CHECK-LABEL: @IntToPtr ; CHECK: load i64, i64*{{.*}}__msan_param_tls ; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls +; CHECK-NEXT: call void @llvm.donothing ; CHECK-NEXT: inttoptr ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls ; CHECK: ret i8* diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll --- a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll @@ -7,6 +7,7 @@ define noundef i32 @NormalRet() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @NormalRet( +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: ret i32 123 ; ret i32 123 @@ -14,6 +15,7 @@ define i32 @PartialRet() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @PartialRet( +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8 ; CHECK-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret i32 123 @@ -23,6 +25,7 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @LoadedRet( +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32* ; CHECK-NEXT: [[O:%.*]] = load i32, i32* [[P]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64 @@ -35,7 +38,7 @@ ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof !0 ; CHECK: 7: -; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #1 +; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) [[ATTR2:#.*]] ; CHECK-NEXT: unreachable ; CHECK: 8: ; CHECK-NEXT: ret i32 [[O]] @@ -48,6 +51,7 @@ define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @NormalArg( +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32* ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 @@ -67,6 +71,7 @@ ; CHECK-LABEL: @PartialArg( ; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4 +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32* ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -90,8 +95,9 @@ define void @CallNormal() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @CallNormal( -; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() #0 -; CHECK-NEXT: call void @NormalArg(i32 [[R]]) #0 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() [[ATTR0:#.*]] +; CHECK-NEXT: call void @NormalArg(i32 [[R]]) [[ATTR0]] ; CHECK-NEXT: ret void ; %r = call i32 @NormalRet() nounwind uwtable sanitize_memory @@ -101,6 +107,7 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @CallWithLoaded( +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to i32* ; CHECK-NEXT: [[O:%.*]] = load i32, i32* [[P]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64 @@ -113,10 +120,10 @@ ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof !0 ; CHECK: 7: -; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #1 +; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) [[ATTR2]] ; CHECK-NEXT: unreachable ; CHECK: 8: -; CHECK-NEXT: call void @NormalArg(i32 [[O]]) #0 +; CHECK-NEXT: call void @NormalArg(i32 [[O]]) [[ATTR0]] ; CHECK-NEXT: ret void ; %p = inttoptr i64 0 to i32 * @@ -127,13 +134,14 @@ define void @CallPartial() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @CallPartial( +; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8 -; CHECK-NEXT: [[R:%.*]] = call i32 @PartialRet() #0 +; CHECK-NEXT: [[R:%.*]] = call i32 @PartialRet() [[ATTR0]] ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: store i32 [[_MSRET]], i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8 ; CHECK-NEXT: store i32 [[TMP1]], i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4 -; CHECK-NEXT: call void @PartialArg(i32 [[R]]) #0 +; CHECK-NEXT: call void @PartialArg(i32 [[R]]) [[ATTR0]] ; CHECK-NEXT: ret void ; %r = call i32 @PartialRet() nounwind uwtable sanitize_memory diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll --- a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll @@ -45,11 +45,11 @@ ; CHECK: [[BASE:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] ; CHECK: [[SHADOW_PTR:%[a-z0-9_]+]] = inttoptr {{.*}} [[BASE]] ; CHECK: [[SHADOW:%[a-z0-9]+]] = load i64, i64* [[SHADOW_PTR]] +; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] ; Load the shadow of %p and check it ; CHECK: icmp ne i64 [[SHADOW]] ; CHECK: br i1 ; CHECK: {{^[0-9]+}}: -; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] ; CHECK: @__msan_metadata_ptr_for_store_1(i8* %p) ; CHECK: store i8 ; If the new shadow is non-zero, jump to __msan_chain_origin() diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll --- a/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll @@ -63,6 +63,7 @@ ; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls ; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls ; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls +; CHECK-NEXT: call void @llvm.donothing ; CHECK-NEXT: = or <8 x i16> ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll b/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll --- a/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll @@ -18,6 +18,7 @@ ; CHECK: define void @foo ; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.donothing ; CHECK-NEXT: %id = call token @llvm.coro.id ; CHECK-NEXT: call i1 @llvm.coro.alloc(token %id) ; CHECK-NEXT: ret void