Index: lib/Transforms/Instrumentation/AddressSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -437,6 +437,7 @@ Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F) override; bool maybeInsertAsanInitAtFunctionEntry(Function &F); + IntrinsicInst *markEscapedLocalAllocas(Function &F); bool doInitialization(Module &M) override; static char ID; // Pass identification, replacement for typeid @@ -546,6 +547,7 @@ SmallVector DynamicAllocaVec; SmallVector StackRestoreVec; AllocaInst *DynamicAllocaLayout = nullptr; + IntrinsicInst *LocalEscapeCall; // Maps Value to an AllocaInst from which the Value is originated. typedef DenseMap AllocaForValueMapTy; @@ -554,7 +556,8 @@ bool HasNonEmptyInlineAsm; std::unique_ptr EmptyInlineAsm; - FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) + FunctionStackPoisoner(Function &F, AddressSanitizer &ASan, + IntrinsicInst *LocalEscapeCall) : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), @@ -563,6 +566,7 @@ IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), StackAlignment(1 << Mapping.Scale), + LocalEscapeCall(LocalEscapeCall), HasNonEmptyInlineAsm(false), EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} @@ -1477,6 +1481,35 @@ return false; } +IntrinsicInst *AddressSanitizer::markEscapedLocalAllocas(Function &F) { + // Find the one possible call to llvm.localescape and pre-mark allocas passed + // to it as uninteresting. This assumes we haven't started processing allocas + // yet. This check is done up front because iterating the use list in + // isInterestingAlloca would be algorithmically slower. + assert(ProcessedAllocas.empty() && "must process localescape before allocas"); + + // Try to get the declaration of llvm.localescape. If it's not in the module, + // we can exit early. + if (!F.getParent()->getFunction("llvm.localescape")) return nullptr; + + // Look for a call to llvm.localescape call in the entry block. It can't be in + // any other block. + for (Instruction &I : F.getEntryBlock()) { + IntrinsicInst *II = dyn_cast(&I); + if (II && II->getIntrinsicID() == Intrinsic::localescape) { + // We found a call. Mark all the allocas passed in as uninteresting. + for (Value *Arg : II->arg_operands()) { + AllocaInst *AI = dyn_cast(Arg->stripPointerCasts()); + assert(AI && AI->isStaticAlloca() && + "non-static alloca arg to localescape"); + ProcessedAllocas[AI] = false; + } + return II; + } + } + return nullptr; +} + bool AddressSanitizer::runOnFunction(Function &F) { if (&F == AsanCtorFunction) return false; if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; @@ -1492,6 +1525,10 @@ if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; + // We can't instrument allocas used with llvm.localescape. Only static allocas + // can be passed to that intrinsic. + IntrinsicInst *LocalEscapeCall = markEscapedLocalAllocas(F); + // We want to instrument every address only once per basic block (unless there // are calls between uses). SmallSet TempsToInstrument; @@ -1563,7 +1600,7 @@ NumInstrumented++; } - FunctionStackPoisoner FSP(F, *this); + FunctionStackPoisoner FSP(F, *this, LocalEscapeCall); bool ChangedStack = FSP.runOnFunction(); // We must unpoison the stack before every NoReturn call (throw, _exit, etc). @@ -1582,6 +1619,8 @@ DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n"); + ProcessedAllocas.clear(); + return res; } @@ -1740,6 +1779,9 @@ for (auto *AI : NonInstrumentedStaticAllocaVec) AI->moveBefore(InsBefore); + // If we have a call to llvm.localescape, keep it in the entry block. + if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); + SmallVector SVD; SVD.reserve(AllocaVec.size()); for (AllocaInst *AI : AllocaVec) { Index: test/Instrumentation/AddressSanitizer/localescape.ll =================================================================== --- /dev/null +++ test/Instrumentation/AddressSanitizer/localescape.ll @@ -0,0 +1,77 @@ +; RUN: opt < %s -asan -asan-module -asan-use-after-return -asan-stack-dynamic-alloca -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.10.0" + +declare i32 @llvm.eh.typeid.for(i8*) #2 +declare i8* @llvm.frameaddress(i32) +declare i8* @llvm.x86.seh.recoverfp(i8*, i8*) +declare i8* @llvm.localrecover(i8*, i8*, i32) +declare void @llvm.localescape(...) #1 + +declare i32 @_except_handler3(...) +declare void @may_throw(i32* %r) + +define i32 @main() sanitize_address personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) { +entry: + %r = alloca i32, align 4 + %__exception_code = alloca i32, align 4 + call void (...) @llvm.localescape(i32* nonnull %__exception_code) + %0 = bitcast i32* %r to i8* + store i32 0, i32* %r, align 4 + invoke void @may_throw(i32* nonnull %r) #4 + to label %__try.cont unwind label %lpad + +lpad: ; preds = %entry + %1 = landingpad { i8*, i32 } + catch i8* bitcast (i32 ()* @"\01?filt$0@0@main@@" to i8*) + %2 = extractvalue { i8*, i32 } %1, 1 + %3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @"\01?filt$0@0@main@@" to i8*)) #1 + %matches = icmp eq i32 %2, %3 + br i1 %matches, label %__except, label %eh.resume + +__except: ; preds = %lpad + store i32 1, i32* %r, align 4 + br label %__try.cont + +__try.cont: ; preds = %entry, %__except + %4 = load i32, i32* %r, align 4 + ret i32 %4 + +eh.resume: ; preds = %lpad + resume { i8*, i32 } %1 +} + +; Check that the alloca remains static and the localescape call remains in the +; entry block. + +; CHECK-LABEL: define i32 @main() +; CHECK-NOT: br {{.*}}label +; CHECK: %__exception_code = alloca i32, align 4 +; CHECK-NOT: br {{.*}}label +; CHECK: call void (...) @llvm.localescape(i32* nonnull %__exception_code) + +; The UAR branches happen after localescape. +; CHECK: load i32, i32* @__asan_option_detect_stack_use_after_return +; CHECK: br i1 +; CHECK: @__asan_stack_malloc_0(i64 64) + +; Function Attrs: nounwind +define internal i32 @"\01?filt$0@0@main@@"() #1 { +entry: + %0 = tail call i8* @llvm.frameaddress(i32 1) + %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0) + %2 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %1, i32 0) + %__exception_code = bitcast i8* %2 to i32* + %3 = getelementptr inbounds i8, i8* %0, i32 -20 + %4 = bitcast i8* %3 to { i32*, i8* }** + %5 = load { i32*, i8* }*, { i32*, i8* }** %4, align 4 + %6 = getelementptr inbounds { i32*, i8* }, { i32*, i8* }* %5, i32 0, i32 0 + %7 = load i32*, i32** %6, align 4 + %8 = load i32, i32* %7, align 4 + store i32 %8, i32* %__exception_code, align 4 + ret i32 1 +} + +; CHECK-LABEL: define internal i32 @"\01?filt$0@0@main@@"() +; CHECK: tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* {{.*}}, i32 0)