Index: lib/Transforms/Instrumentation/ThreadSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -19,6 +19,8 @@ // The rest is handled by the run-time library. //===----------------------------------------------------------------------===// +#include "llvm/Analysis/CaptureTracking.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" @@ -26,6 +28,7 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" @@ -68,6 +71,7 @@ STATISTIC(NumOmittedReadsFromConstantGlobals, "Number of reads from constant globals"); STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); +STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing"); namespace { @@ -84,7 +88,8 @@ bool instrumentLoadOrStore(Instruction *I); bool instrumentAtomic(Instruction *I); bool instrumentMemIntrinsic(Instruction *I); - void chooseInstructionsToInstrument(SmallVectorImpl &Local, + void chooseInstructionsToInstrument(DominatorTree &DT, + SmallVectorImpl &Local, SmallVectorImpl &All); bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr); @@ -260,6 +265,7 @@ // Instrumenting some of the accesses may be proven redundant. // Currently handled: // - read-before-write (within same BB, no calls between) +// - not yet captured variables // // We do not handle some of the patterns that should not survive // after the classic compiler optimizations. @@ -269,6 +275,7 @@ // 'Local' is a vector of insns within the same BB (no calls between). // 'All' is a vector of insns that will be instrumented. void ThreadSanitizer::chooseInstructionsToInstrument( + DominatorTree &DT, SmallVectorImpl &Local, SmallVectorImpl &All) { SmallSet WriteTargets; @@ -291,6 +298,22 @@ continue; } } + Value *Addr = isa(*I) + ? cast(I)->getPointerOperand() + : cast(I)->getPointerOperand(); + if (isa(GetUnderlyingObject(Addr, nullptr)) && + !PointerMayBeCapturedBefore(Addr, true, true, I, &DT)) { + // The variable is addressable but its address is not yet leaked, ignore. + // For example, consider: + // + // int x = 0; + // foo(&x); + // + // In this code we are ignoring the first write to x, since it cannot + // participate in races yet. + NumOmittedNonCaptured++; + continue; + } All.push_back(I); } Local.clear(); @@ -322,6 +345,9 @@ bool HasCalls = false; bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread); + DominatorTree DT; + DT.recalculate(F); + // Traverse all instructions, collect loads/stores/returns, check for calls. for (auto &BB : F) { for (auto &Inst : BB) { @@ -335,10 +361,11 @@ if (isa(Inst)) MemIntrinCalls.push_back(&Inst); HasCalls = true; - chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); + chooseInstructionsToInstrument(DT, LocalLoadsAndStores, + AllLoadsAndStores); } } - chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); + chooseInstructionsToInstrument(DT, LocalLoadsAndStores, AllLoadsAndStores); } // We have collected all loads and stores. Index: test/Instrumentation/ThreadSanitizer/capture.ll =================================================================== --- test/Instrumentation/ThreadSanitizer/capture.ll +++ test/Instrumentation/ThreadSanitizer/capture.ll @@ -0,0 +1,91 @@ +; RUN: opt < %s -tsan -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" + +declare void @escape(i32*) + +@sink = global i32* null, align 4 + +define void @captured0() nounwind uwtable sanitize_thread { +entry: + %ptr = alloca i32, align 4 + ; escapes due to call + call void @escape(i32* %ptr) + store i32 42, i32* %ptr, align 4 + ret void +} +; CHECK: define void @captured0 +; CHECK: __tsan_write +; CHECK: ret void + +define void @captured1() nounwind uwtable sanitize_thread { +entry: + %ptr = alloca i32, align 4 + ; escapes due to store into global + store i32* %ptr, i32** @sink, align 4 + store i32 42, i32* %ptr, align 4 + ret void +} +; CHECK: define void @captured1 +; CHECK: __tsan_write +; CHECK: __tsan_write +; CHECK: ret void + +define void @captured2() nounwind uwtable sanitize_thread { +entry: + %ptr = alloca i32, align 4 + %tmp = alloca i32*, align 8 + ; transitive escape + store i32* %ptr, i32** %tmp, align 8 + %0 = load i32** %tmp, align 8 + store i32* %0, i32** @sink, align 8 + store i32 42, i32* %ptr, align 4 + ret void +} +; CHECK: define void @captured2 +; CHECK: __tsan_write +; CHECK: __tsan_write +; CHECK: ret void + +define void @notcaptured0() nounwind uwtable sanitize_thread { +entry: + %ptr = alloca i32, align 4 + store i32 42, i32* %ptr, align 4 + ; escapes due to call + call void @escape(i32* %ptr) + ret void +} +; CHECK: define void @notcaptured0 +; CHECK-NOT: __tsan_write +; CHECK: ret void + +define void @notcaptured1() nounwind uwtable sanitize_thread { +entry: + %ptr = alloca i32, align 4 + store i32 42, i32* %ptr, align 4 + ; escapes due to store into global + store i32* %ptr, i32** @sink, align 4 + ret void +} +; CHECK: define void @notcaptured1 +; CHECK: __tsan_write +; CHECK-NOT: __tsan_write +; CHECK: ret void + +define void @notcaptured2() nounwind uwtable sanitize_thread { +entry: + %ptr = alloca i32, align 4 + %tmp = alloca i32*, align 8 + store i32 42, i32* %ptr, align 4 + ; transitive escape + store i32* %ptr, i32** %tmp, align 8 + %0 = load i32** %tmp, align 8 + store i32* %0, i32** @sink, align 8 + ret void +} +; CHECK: define void @notcaptured2 +; CHECK: __tsan_write +; CHECK-NOT: __tsan_write +; CHECK: ret void + +