Index: llvm/include/llvm/CodeGen/StackProtector.h =================================================================== --- llvm/include/llvm/CodeGen/StackProtector.h +++ llvm/include/llvm/CodeGen/StackProtector.h @@ -61,6 +61,12 @@ /// protection when -fstack-protection is used. unsigned SSPBufferSize = 0; + /// VisitedPHIs - The set of PHI nodes visited when determining + /// if a variable's reference has been taken. This set + /// is maintained to ensure we don't visit the same PHI node multiple + /// times. + SmallPtrSet VisitedPHIs; + // A prologue is generated. bool HasPrologue = false; Index: llvm/lib/CodeGen/StackProtector.cpp =================================================================== --- llvm/lib/CodeGen/StackProtector.cpp +++ llvm/lib/CodeGen/StackProtector.cpp @@ -17,7 +17,6 @@ #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/BranchProbabilityInfo.h" -#include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" #include "llvm/CodeGen/Passes.h" @@ -157,6 +156,40 @@ return NeedsProtector; } +bool StackProtector::HasAddressTaken(const Instruction *AI) { + for (const User *U : AI->users()) { + if (const StoreInst *SI = dyn_cast(U)) { + if (AI == SI->getValueOperand()) + return true; + } else if (const PtrToIntInst *SI = dyn_cast(U)) { + if (AI == SI->getOperand(0)) + return true; + } else if (const CallInst *CI = dyn_cast(U)) { + // Ignore intrinsics that are not calls. TODO: Use isLoweredToCall(). + if (!isa(CI) && !CI->isLifetimeStartOrEnd()) + return true; + } else if (isa(U)) { + return true; + } else if (const SelectInst *SI = dyn_cast(U)) { + if (HasAddressTaken(SI)) + return true; + } else if (const PHINode *PN = dyn_cast(U)) { + // Keep track of what PHI nodes we have already visited to ensure + // they are only visited once. + if (VisitedPHIs.insert(PN).second) + if (HasAddressTaken(PN)) + return true; + } else if (const GetElementPtrInst *GEP = dyn_cast(U)) { + if (HasAddressTaken(GEP)) + return true; + } else if (const BitCastInst *BI = dyn_cast(U)) { + if (HasAddressTaken(BI)) + return true; + } + } + return false; +} + /// Search for the first call to the llvm.stackprotector intrinsic and return it /// if present. static const CallInst *findStackProtectorIntrinsic(Function &F) { @@ -264,9 +297,7 @@ continue; } - if (Strong && PointerMayBeCaptured(AI, - /* ReturnCaptures */ false, - /* StoreCaptures */ true)) { + if (Strong && HasAddressTaken(AI)) { ++NumAddrTaken; Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf)); ORE.emit([&]() { Index: llvm/test/CodeGen/X86/stack-protector.ll =================================================================== --- llvm/test/CodeGen/X86/stack-protector.ll +++ llvm/test/CodeGen/X86/stack-protector.ll @@ -4087,8 +4087,8 @@ %1 = alloca i32, align 4 %2 = bitcast i32* %1 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2) - store i32 1, i32* %1, align 4 - %3 = load i32, i32* %1, align 4 + store volatile i32 1, i32* %1, align 4 + %3 = load volatile i32, i32* %1, align 4 %4 = mul nsw i32 %3, 42 call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2) ret i32 %4 Index: llvm/test/Transforms/StackProtector/X86/captures.ll =================================================================== --- llvm/test/Transforms/StackProtector/X86/captures.ll +++ /dev/null @@ -1,139 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -stack-protector < %s | FileCheck %s -; Bug 42238: Test some situations missed by old, custom capture tracking. - -define void @store_captures() #0 { -; CHECK-LABEL: @store_captures( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8* -; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*) -; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]]) -; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8 -; CHECK-NEXT: store i32 0, i32* [[RETVAL]] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1 -; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4 -; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8 -; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*) -; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]] -; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0 -; CHECK: SP_return: -; CHECK-NEXT: ret void -; CHECK: CallStackCheckFailBlk: -; CHECK-NEXT: call void @__stack_chk_fail() -; CHECK-NEXT: unreachable -; -entry: - %retval = alloca i32, align 4 - %a = alloca i32, align 4 - %j = alloca i32*, align 8 - store i32 0, i32* %retval - %load = load i32, i32* %a, align 4 - %add = add nsw i32 %load, 1 - store i32 %add, i32* %a, align 4 - store i32* %a, i32** %j, align 8 - ret void -} - -define i32* @return_captures() #0 { -; CHECK-LABEL: @return_captures( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8 -; CHECK-NEXT: store i32 0, i32* [[RETVAL]] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1 -; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4 -; CHECK-NEXT: ret i32* [[A]] -; -entry: - %retval = alloca i32, align 4 - %a = alloca i32, align 4 - %j = alloca i32*, align 8 - store i32 0, i32* %retval - %load = load i32, i32* %a, align 4 - %add = add nsw i32 %load, 1 - store i32 %add, i32* %a, align 4 - ret i32* %a -} - -define void @store_addrspacecast_captures() #0 { -; CHECK-LABEL: @store_addrspacecast_captures( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8* -; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*) -; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]]) -; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8 -; CHECK-NEXT: store i32 0, i32* [[RETVAL]] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1 -; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4 -; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)* -; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8 -; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*) -; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]] -; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0 -; CHECK: SP_return: -; CHECK-NEXT: ret void -; CHECK: CallStackCheckFailBlk: -; CHECK-NEXT: call void @__stack_chk_fail() -; CHECK-NEXT: unreachable -; -entry: - %retval = alloca i32, align 4 - %a = alloca i32, align 4 - %j = alloca i32 addrspace(1)*, align 8 - store i32 0, i32* %retval - %load = load i32, i32* %a, align 4 - %add = add nsw i32 %load, 1 - store i32 %add, i32* %a, align 4 - %a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)* - store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8 - ret void -} - -define void @cmpxchg_captures() #0 { -; CHECK-LABEL: @cmpxchg_captures( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8* -; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*) -; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]]) -; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8 -; CHECK-NEXT: store i32 0, i32* [[RETVAL]] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1 -; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4 -; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* [[A]], i32* null seq_cst monotonic -; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*) -; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]] -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]] -; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0 -; CHECK: SP_return: -; CHECK-NEXT: ret void -; CHECK: CallStackCheckFailBlk: -; CHECK-NEXT: call void @__stack_chk_fail() -; CHECK-NEXT: unreachable -; -entry: - %retval = alloca i32, align 4 - %a = alloca i32, align 4 - %j = alloca i32*, align 8 - store i32 0, i32* %retval - %load = load i32, i32* %a, align 4 - %add = add nsw i32 %load, 1 - store i32 %add, i32* %a, align 4 - - cmpxchg i32** %j, i32* %a, i32* null seq_cst monotonic - ret void -} - -attributes #0 = { sspstrong } Index: llvm/test/Transforms/StackProtector/X86/lit.local.cfg =================================================================== --- llvm/test/Transforms/StackProtector/X86/lit.local.cfg +++ /dev/null @@ -1,2 +0,0 @@ -if not 'X86' in config.root.targets: - config.unsupported = True Index: llvm/tools/opt/opt.cpp =================================================================== --- llvm/tools/opt/opt.cpp +++ llvm/tools/opt/opt.cpp @@ -523,7 +523,6 @@ initializeDwarfEHPreparePass(Registry); initializeSafeStackLegacyPassPass(Registry); initializeSjLjEHPreparePass(Registry); - initializeStackProtectorPass(Registry); initializePreISelIntrinsicLoweringLegacyPassPass(Registry); initializeGlobalMergePass(Registry); initializeIndirectBrExpandPassPass(Registry);