diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp --- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp +++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp @@ -12,9 +12,11 @@ #include "llvm/Transforms/Utils/MemoryTaggingSupport.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/BasicBlock.h" #include "llvm/IR/IntrinsicInst.h" namespace llvm { @@ -46,17 +48,24 @@ Callback(Ends[0]); return true; } + SmallPtrSet EndBlocks; + for (auto *End : Ends) { + EndBlocks.insert(End->getParent()); + } SmallVector ReachableRetVec; unsigned NumCoveredExits = 0; for (auto *RI : RetVec) { if (!isPotentiallyReachable(Start, RI, nullptr, &DT)) continue; ReachableRetVec.push_back(RI); - // TODO(fmayer): We don't support diamond shapes, where multiple lifetime - // ends together dominate the RI, but none of them does by itself. - // Check how often this happens and decide whether to support this here. - if (llvm::any_of(Ends, [&](auto *End) { return DT.dominates(End, RI); })) + // If there is an end in the same basic block as the return, we know for + // sure that the return is covered. Otherwise, we can check whether there + // is a way to reach the RI from the start of the lifetime without passing + // through an end. + if (EndBlocks.count(RI->getParent()) > 0 || + !isPotentiallyReachable(Start, RI, &EndBlocks, &DT)) { ++NumCoveredExits; + } } // If there's a mix of covered and non-covered exits, just put the untag // on exits, so we avoid the redundancy of untagging twice. diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll b/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll --- a/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll @@ -34,5 +34,125 @@ ret void } +define void @diamond(i1 %cond) local_unnamed_addr sanitize_memtag { +start: +; CHECK-LABEL: start: + %a = alloca i8, i32 48, align 8 + call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a) +; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48) + br i1 %cond, label %next0, label %next1 + +next0: +; CHECK-LABEL: next0: +; CHECK: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +next1: +; CHECK-LABEL: next1: +; CHECK: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +exit1: +; CHECK-LABEL: exit1: +; CHECK-NOT: call void @llvm.aarch64.settag + ret void +} + +define void @diamond_nocover(i1 %cond) local_unnamed_addr sanitize_memtag { +start: +; CHECK-LABEL: start: + %a = alloca i8, i32 48, align 8 + call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a) +; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48) + br i1 %cond, label %next0, label %next1 + +next0: +; CHECK-LABEL: next0: +; CHECK-NOT: llvm.lifetime.end + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +next1: +; CHECK-LABEL: next1: +; CHECK-NOT: llvm.lifetime.end + br label %exit1 + +exit1: +; CHECK-LABEL: exit1: +; CHECK: call void @llvm.aarch64.settag + ret void +} + +define void @diamond3(i1 %cond, i1 %cond1) local_unnamed_addr sanitize_memtag { +start: +; CHECK-LABEL: start: + %a = alloca i8, i32 48, align 8 + call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a) +; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48) + br i1 %cond, label %next0, label %start1 + +start1: + br i1 %cond1, label %next1, label %next2 + +next0: +; CHECK-LABEL: next0: +; CHECK: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +next1: +; CHECK-LABEL: next1: +; CHECK: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +next2: +; CHECK-LABEL: next2: +; CHECK: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +exit1: +; CHECK-LABEL: exit1: +; CHECK-NOT: call void @llvm.aarch64.settag + ret void +} + +define void @diamond3_nocover(i1 %cond, i1 %cond1) local_unnamed_addr sanitize_memtag { +start: +; CHECK-LABEL: start: + %a = alloca i8, i32 48, align 8 + call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a) +; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48) + br i1 %cond, label %next0, label %start1 + +start1: + br i1 %cond1, label %next1, label %next2 + +next0: +; CHECK-LABEL: next0: +; CHECK-NOT: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +next1: +; CHECK-LABEL: next1: +; CHECK-NOT: call void @llvm.aarch64.settag + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a) + br label %exit1 + +next2: +; CHECK-LABEL: next2: +; CHECK-NOT: call void @llvm.aarch64.settag + br label %exit1 + +exit1: +; CHECK-LABEL: exit1: +; CHECK: call void @llvm.aarch64.settag + ret void +} + declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll --- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll @@ -1037,6 +1037,291 @@ ret i32 0 } +define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress { +; X86-SCOPE-LABEL: @diamond_lifetime( +; X86-SCOPE-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call i8* asm "", "=r,0"(i8* null) +; X86-SCOPE-NEXT: [[TMP1:%.*]] = alloca { i8, [15 x i8] }, align 16 +; X86-SCOPE-NEXT: [[TMP2:%.*]] = bitcast { i8, [15 x i8] }* [[TMP1]] to i8* +; X86-SCOPE-NEXT: [[TMP3:%.*]] = call i8 @__hwasan_generate_tag() +; X86-SCOPE-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64 +; X86-SCOPE-NEXT: [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64 +; X86-SCOPE-NEXT: [[TMP6:%.*]] = shl i64 [[TMP4]], 57 +; X86-SCOPE-NEXT: [[TMP7:%.*]] = or i64 [[TMP5]], [[TMP6]] +; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP7]] to i8* +; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull [[TMP2]]) +; X86-SCOPE-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP4]] to i8 +; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(i8* [[TMP2]], i8 [[TMP8]], i64 16) +; X86-SCOPE-NEXT: [[TMP9:%.*]] = tail call i1 (...) @cond() +; X86-SCOPE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP11:%.*]] +; X86-SCOPE: 10: +; X86-SCOPE-NEXT: call void @use(i8* nonnull [[ALLOCA_0_HWASAN]]) +; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(i8* [[TMP2]], i8 0, i64 16) +; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[TMP2]]) +; X86-SCOPE-NEXT: br label [[TMP12:%.*]] +; X86-SCOPE: 11: +; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(i8* [[TMP2]], i8 0, i64 16) +; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[TMP2]]) +; X86-SCOPE-NEXT: br label [[TMP12]] +; X86-SCOPE: 12: +; X86-SCOPE-NEXT: ret i32 0 +; +; X86-NOSCOPE-LABEL: @diamond_lifetime( +; X86-NOSCOPE-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call i8* asm "", "=r,0"(i8* null) +; X86-NOSCOPE-NEXT: [[TMP1:%.*]] = alloca { i8, [15 x i8] }, align 16 +; X86-NOSCOPE-NEXT: [[TMP2:%.*]] = bitcast { i8, [15 x i8] }* [[TMP1]] to i8* +; X86-NOSCOPE-NEXT: [[TMP3:%.*]] = call i8 @__hwasan_generate_tag() +; X86-NOSCOPE-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64 +; X86-NOSCOPE-NEXT: [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64 +; X86-NOSCOPE-NEXT: [[TMP6:%.*]] = shl i64 [[TMP4]], 57 +; X86-NOSCOPE-NEXT: [[TMP7:%.*]] = or i64 [[TMP5]], [[TMP6]] +; X86-NOSCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP7]] to i8* +; X86-NOSCOPE-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP4]] to i8 +; X86-NOSCOPE-NEXT: call void @__hwasan_tag_memory(i8* [[TMP2]], i8 [[TMP8]], i64 16) +; X86-NOSCOPE-NEXT: [[TMP9:%.*]] = tail call i1 (...) @cond() +; X86-NOSCOPE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP11:%.*]] +; X86-NOSCOPE: 10: +; X86-NOSCOPE-NEXT: call void @use(i8* nonnull [[ALLOCA_0_HWASAN]]) +; X86-NOSCOPE-NEXT: br label [[TMP12:%.*]] +; X86-NOSCOPE: 11: +; X86-NOSCOPE-NEXT: br label [[TMP12]] +; X86-NOSCOPE: 12: +; X86-NOSCOPE-NEXT: call void @__hwasan_tag_memory(i8* [[TMP2]], i8 0, i64 16) +; X86-NOSCOPE-NEXT: ret i32 0 +; +; AARCH64-SCOPE-LABEL: @diamond_lifetime( +; AARCH64-SCOPE-NEXT: [[TMP1:%.*]] = call i8* @llvm.thread.pointer() +; AARCH64-SCOPE-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[TMP1]], i32 48 +; AARCH64-SCOPE-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i64* +; AARCH64-SCOPE-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 4 +; AARCH64-SCOPE-NEXT: [[TMP5:%.*]] = ashr i64 [[TMP4]], 3 +; AARCH64-SCOPE-NEXT: [[TMP6:%.*]] = call i64 @llvm.read_register.i64(metadata [[META1]]) +; AARCH64-SCOPE-NEXT: [[TMP7:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 0) +; AARCH64-SCOPE-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[TMP7]] to i64 +; AARCH64-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 44 +; AARCH64-SCOPE-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP4]] to i64* +; AARCH64-SCOPE-NEXT: [[TMP11:%.*]] = or i64 [[TMP6]], [[TMP9]] +; AARCH64-SCOPE-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 4 +; AARCH64-SCOPE-NEXT: [[TMP12:%.*]] = ashr i64 [[TMP4]], 56 +; AARCH64-SCOPE-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[TMP12]], 12 +; AARCH64-SCOPE-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], -1 +; AARCH64-SCOPE-NEXT: [[TMP15:%.*]] = add i64 [[TMP4]], 8 +; AARCH64-SCOPE-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], [[TMP14]] +; AARCH64-SCOPE-NEXT: store i64 [[TMP16]], i64* [[TMP3]], align 4 +; AARCH64-SCOPE-NEXT: [[TMP17:%.*]] = or i64 [[TMP4]], 4294967295 +; AARCH64-SCOPE-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP17]], 1 +; AARCH64-SCOPE-NEXT: [[TMP18:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to i8* +; AARCH64-SCOPE-NEXT: [[TMP19:%.*]] = alloca { i8, [15 x i8] }, align 16 +; AARCH64-SCOPE-NEXT: [[TMP20:%.*]] = bitcast { i8, [15 x i8] }* [[TMP19]] to i8* +; AARCH64-SCOPE-NEXT: [[TMP21:%.*]] = call i8 @__hwasan_generate_tag() +; AARCH64-SCOPE-NEXT: [[TMP22:%.*]] = zext i8 [[TMP21]] to i64 +; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = shl i64 [[TMP22]], 56 +; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = or i64 [[TMP23]], [[TMP24]] +; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP25]] to i8* +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull [[TMP20]]) +; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP22]] to i8 +; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = lshr i64 [[TMP27]], 4 +; AARCH64-SCOPE-NEXT: [[TMP29:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP28]] +; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP29]], i8 [[TMP26]], i64 1, i1 false) +; AARCH64-SCOPE-NEXT: [[TMP30:%.*]] = tail call i1 (...) @cond() +; AARCH64-SCOPE-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP35:%.*]] +; AARCH64-SCOPE: 31: +; AARCH64-SCOPE-NEXT: call void @use(i8* nonnull [[ALLOCA_0_HWASAN]]) +; AARCH64-SCOPE-NEXT: [[TMP32:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SCOPE-NEXT: [[TMP33:%.*]] = lshr i64 [[TMP32]], 4 +; AARCH64-SCOPE-NEXT: [[TMP34:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP33]] +; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP34]], i8 0, i64 1, i1 false) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[TMP20]]) +; AARCH64-SCOPE-NEXT: br label [[TMP39:%.*]] +; AARCH64-SCOPE: 35: +; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4 +; AARCH64-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP37]] +; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP38]], i8 0, i64 1, i1 false) +; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[TMP20]]) +; AARCH64-SCOPE-NEXT: br label [[TMP39]] +; AARCH64-SCOPE: 39: +; AARCH64-SCOPE-NEXT: ret i32 0 +; +; AARCH64-NOSCOPE-LABEL: @diamond_lifetime( +; AARCH64-NOSCOPE-NEXT: [[TMP1:%.*]] = call i8* @llvm.thread.pointer() +; AARCH64-NOSCOPE-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[TMP1]], i32 48 +; AARCH64-NOSCOPE-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i64* +; AARCH64-NOSCOPE-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 4 +; AARCH64-NOSCOPE-NEXT: [[TMP5:%.*]] = ashr i64 [[TMP4]], 3 +; AARCH64-NOSCOPE-NEXT: [[TMP6:%.*]] = call i64 @llvm.read_register.i64(metadata [[META1]]) +; AARCH64-NOSCOPE-NEXT: [[TMP7:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 0) +; AARCH64-NOSCOPE-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[TMP7]] to i64 +; AARCH64-NOSCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 44 +; AARCH64-NOSCOPE-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP4]] to i64* +; AARCH64-NOSCOPE-NEXT: [[TMP11:%.*]] = or i64 [[TMP6]], [[TMP9]] +; AARCH64-NOSCOPE-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 4 +; AARCH64-NOSCOPE-NEXT: [[TMP12:%.*]] = ashr i64 [[TMP4]], 56 +; AARCH64-NOSCOPE-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[TMP12]], 12 +; AARCH64-NOSCOPE-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], -1 +; AARCH64-NOSCOPE-NEXT: [[TMP15:%.*]] = add i64 [[TMP4]], 8 +; AARCH64-NOSCOPE-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], [[TMP14]] +; AARCH64-NOSCOPE-NEXT: store i64 [[TMP16]], i64* [[TMP3]], align 4 +; AARCH64-NOSCOPE-NEXT: [[TMP17:%.*]] = or i64 [[TMP4]], 4294967295 +; AARCH64-NOSCOPE-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP17]], 1 +; AARCH64-NOSCOPE-NEXT: [[TMP18:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to i8* +; AARCH64-NOSCOPE-NEXT: [[TMP19:%.*]] = alloca { i8, [15 x i8] }, align 16 +; AARCH64-NOSCOPE-NEXT: [[TMP20:%.*]] = bitcast { i8, [15 x i8] }* [[TMP19]] to i8* +; AARCH64-NOSCOPE-NEXT: [[TMP21:%.*]] = call i8 @__hwasan_generate_tag() +; AARCH64-NOSCOPE-NEXT: [[TMP22:%.*]] = zext i8 [[TMP21]] to i64 +; AARCH64-NOSCOPE-NEXT: [[TMP23:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-NOSCOPE-NEXT: [[TMP24:%.*]] = shl i64 [[TMP22]], 56 +; AARCH64-NOSCOPE-NEXT: [[TMP25:%.*]] = or i64 [[TMP23]], [[TMP24]] +; AARCH64-NOSCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP25]] to i8* +; AARCH64-NOSCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP22]] to i8 +; AARCH64-NOSCOPE-NEXT: [[TMP27:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-NOSCOPE-NEXT: [[TMP28:%.*]] = lshr i64 [[TMP27]], 4 +; AARCH64-NOSCOPE-NEXT: [[TMP29:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP28]] +; AARCH64-NOSCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP29]], i8 [[TMP26]], i64 1, i1 false) +; AARCH64-NOSCOPE-NEXT: [[TMP30:%.*]] = tail call i1 (...) @cond() +; AARCH64-NOSCOPE-NEXT: br i1 [[TMP30]], label [[TMP31:%.*]], label [[TMP32:%.*]] +; AARCH64-NOSCOPE: 31: +; AARCH64-NOSCOPE-NEXT: call void @use(i8* nonnull [[ALLOCA_0_HWASAN]]) +; AARCH64-NOSCOPE-NEXT: br label [[TMP33:%.*]] +; AARCH64-NOSCOPE: 32: +; AARCH64-NOSCOPE-NEXT: br label [[TMP33]] +; AARCH64-NOSCOPE: 33: +; AARCH64-NOSCOPE-NEXT: [[TMP34:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-NOSCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4 +; AARCH64-NOSCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP35]] +; AARCH64-NOSCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP36]], i8 0, i64 1, i1 false) +; AARCH64-NOSCOPE-NEXT: ret i32 0 +; +; AARCH64-SHORT-SCOPE-LABEL: @diamond_lifetime( +; AARCH64-SHORT-SCOPE-NEXT: [[TMP1:%.*]] = call i8* @llvm.thread.pointer() +; AARCH64-SHORT-SCOPE-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[TMP1]], i32 48 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i64* +; AARCH64-SHORT-SCOPE-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 4 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP5:%.*]] = ashr i64 [[TMP4]], 3 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP6:%.*]] = call i64 @llvm.read_register.i64(metadata [[META1]]) +; AARCH64-SHORT-SCOPE-NEXT: [[TMP7:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 0) +; AARCH64-SHORT-SCOPE-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[TMP7]] to i64 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 44 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP4]] to i64* +; AARCH64-SHORT-SCOPE-NEXT: [[TMP11:%.*]] = or i64 [[TMP6]], [[TMP9]] +; AARCH64-SHORT-SCOPE-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 4 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP12:%.*]] = ashr i64 [[TMP4]], 56 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[TMP12]], 12 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], -1 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP15:%.*]] = add i64 [[TMP4]], 8 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], [[TMP14]] +; AARCH64-SHORT-SCOPE-NEXT: store i64 [[TMP16]], i64* [[TMP3]], align 4 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP17:%.*]] = or i64 [[TMP4]], 4294967295 +; AARCH64-SHORT-SCOPE-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP17]], 1 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP18:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to i8* +; AARCH64-SHORT-SCOPE-NEXT: [[TMP19:%.*]] = alloca { i8, [15 x i8] }, align 16 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP20:%.*]] = bitcast { i8, [15 x i8] }* [[TMP19]] to i8* +; AARCH64-SHORT-SCOPE-NEXT: [[TMP21:%.*]] = call i8 @__hwasan_generate_tag() +; AARCH64-SHORT-SCOPE-NEXT: [[TMP22:%.*]] = zext i8 [[TMP21]] to i64 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = shl i64 [[TMP22]], 56 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = or i64 [[TMP23]], [[TMP24]] +; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP25]] to i8* +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull [[TMP20]]) +; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP22]] to i8 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = lshr i64 [[TMP27]], 4 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP29:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP28]] +; AARCH64-SHORT-SCOPE-NEXT: [[TMP30:%.*]] = getelementptr i8, i8* [[TMP29]], i32 0 +; AARCH64-SHORT-SCOPE-NEXT: store i8 1, i8* [[TMP30]], align 1 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP31:%.*]] = getelementptr i8, i8* [[TMP20]], i32 15 +; AARCH64-SHORT-SCOPE-NEXT: store i8 [[TMP26]], i8* [[TMP31]], align 1 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP32:%.*]] = tail call i1 (...) @cond() +; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP32]], label [[TMP33:%.*]], label [[TMP37:%.*]] +; AARCH64-SHORT-SCOPE: 33: +; AARCH64-SHORT-SCOPE-NEXT: call void @use(i8* nonnull [[ALLOCA_0_HWASAN]]) +; AARCH64-SHORT-SCOPE-NEXT: [[TMP34:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP35]] +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP36]], i8 0, i64 1, i1 false) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[TMP20]]) +; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP41:%.*]] +; AARCH64-SHORT-SCOPE: 37: +; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP39:%.*]] = lshr i64 [[TMP38]], 4 +; AARCH64-SHORT-SCOPE-NEXT: [[TMP40:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP39]] +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP40]], i8 0, i64 1, i1 false) +; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull [[TMP20]]) +; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP41]] +; AARCH64-SHORT-SCOPE: 41: +; AARCH64-SHORT-SCOPE-NEXT: ret i32 0 +; +; AARCH64-SHORT-NOSCOPE-LABEL: @diamond_lifetime( +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP1:%.*]] = call i8* @llvm.thread.pointer() +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[TMP1]], i32 48 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i64* +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 4 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP5:%.*]] = ashr i64 [[TMP4]], 3 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP6:%.*]] = call i64 @llvm.read_register.i64(metadata [[META1]]) +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP7:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 0) +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP8:%.*]] = ptrtoint i8* [[TMP7]] to i64 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 44 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP4]] to i64* +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP11:%.*]] = or i64 [[TMP6]], [[TMP9]] +; AARCH64-SHORT-NOSCOPE-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 4 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP12:%.*]] = ashr i64 [[TMP4]], 56 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[TMP12]], 12 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], -1 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP15:%.*]] = add i64 [[TMP4]], 8 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], [[TMP14]] +; AARCH64-SHORT-NOSCOPE-NEXT: store i64 [[TMP16]], i64* [[TMP3]], align 4 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP17:%.*]] = or i64 [[TMP4]], 4294967295 +; AARCH64-SHORT-NOSCOPE-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP17]], 1 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP18:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to i8* +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP19:%.*]] = alloca { i8, [15 x i8] }, align 16 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP20:%.*]] = bitcast { i8, [15 x i8] }* [[TMP19]] to i8* +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP21:%.*]] = call i8 @__hwasan_generate_tag() +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP22:%.*]] = zext i8 [[TMP21]] to i64 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP23:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP24:%.*]] = shl i64 [[TMP22]], 56 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP25:%.*]] = or i64 [[TMP23]], [[TMP24]] +; AARCH64-SHORT-NOSCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP25]] to i8* +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP22]] to i8 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP27:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP28:%.*]] = lshr i64 [[TMP27]], 4 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP29:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP28]] +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP30:%.*]] = getelementptr i8, i8* [[TMP29]], i32 0 +; AARCH64-SHORT-NOSCOPE-NEXT: store i8 1, i8* [[TMP30]], align 1 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP31:%.*]] = getelementptr i8, i8* [[TMP20]], i32 15 +; AARCH64-SHORT-NOSCOPE-NEXT: store i8 [[TMP26]], i8* [[TMP31]], align 1 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP32:%.*]] = tail call i1 (...) @cond() +; AARCH64-SHORT-NOSCOPE-NEXT: br i1 [[TMP32]], label [[TMP33:%.*]], label [[TMP34:%.*]] +; AARCH64-SHORT-NOSCOPE: 33: +; AARCH64-SHORT-NOSCOPE-NEXT: call void @use(i8* nonnull [[ALLOCA_0_HWASAN]]) +; AARCH64-SHORT-NOSCOPE-NEXT: br label [[TMP35:%.*]] +; AARCH64-SHORT-NOSCOPE: 34: +; AARCH64-SHORT-NOSCOPE-NEXT: br label [[TMP35]] +; AARCH64-SHORT-NOSCOPE: 35: +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP36:%.*]] = ptrtoint i8* [[TMP20]] to i64 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4 +; AARCH64-SHORT-NOSCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, i8* [[TMP18]], i64 [[TMP37]] +; AARCH64-SHORT-NOSCOPE-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP38]], i8 0, i64 1, i1 false) +; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0 +; + %1 = alloca i8, align 1 + call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %1) + %2 = tail call i1 (...) @cond() #2 + br i1 %2, label %3, label %4 + +3: + call void @use(i8* nonnull %1) #2 + call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %1) + br label %5 + +4: + call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %1) + br label %5 + +5: + ret i32 0 +} + declare dso_local i1 @cond(...) local_unnamed_addr declare dso_local void @use(i8*) local_unnamed_addr