Index: llvm/trunk/include/llvm/Transforms/Instrumentation.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Instrumentation.h +++ llvm/trunk/include/llvm/Transforms/Instrumentation.h @@ -15,6 +15,7 @@ #define LLVM_TRANSFORMS_INSTRUMENTATION_H #include "llvm/ADT/StringRef.h" +#include "llvm/IR/BasicBlock.h" #include #if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID) @@ -33,6 +34,14 @@ namespace llvm { +/// Instrumentation passes often insert conditional checks into entry blocks. +/// Call this function before splitting the entry block to move instructions +/// that must remain in the entry block up before the split point. Static +/// allocas and llvm.localescape calls, for example, must remain in the entry +/// block. +BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB, + BasicBlock::iterator IP); + class ModulePass; class FunctionPass; Index: llvm/trunk/lib/Transforms/Instrumentation/Instrumentation.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/Instrumentation.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/Instrumentation.cpp @@ -12,12 +12,47 @@ // //===----------------------------------------------------------------------===// -#include "llvm/InitializePasses.h" +#include "llvm/Transforms/Instrumentation.h" #include "llvm-c/Initialization.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/InitializePasses.h" #include "llvm/PassRegistry.h" using namespace llvm; +/// Moves I before IP. Returns new insert point. +static BasicBlock::iterator moveBeforeInsertPoint(BasicBlock::iterator I, BasicBlock::iterator IP) { + // If I is IP, move the insert point down. + if (I == IP) + return ++IP; + // Otherwise, move I before IP and return IP. + I->moveBefore(IP); + return IP; +} + +/// Instrumentation passes often insert conditional checks into entry blocks. +/// Call this function before splitting the entry block to move instructions +/// that must remain in the entry block up before the split point. Static +/// allocas and llvm.localescape calls, for example, must remain in the entry +/// block. +BasicBlock::iterator llvm::PrepareToSplitEntryBlock(BasicBlock &BB, + BasicBlock::iterator IP) { + assert(&BB.getParent()->getEntryBlock() == &BB); + for (auto I = IP, E = BB.end(); I != E; ++I) { + bool KeepInEntry = false; + if (auto *AI = dyn_cast(I)) { + if (AI->isStaticAlloca()) + KeepInEntry = true; + } else if (auto *II = dyn_cast(I)) { + if (II->getIntrinsicID() == llvm::Intrinsic::localescape) + KeepInEntry = true; + } + if (KeepInEntry) + IP = moveBeforeInsertPoint(I, IP); + } + return IP; +} + /// initializeInstrumentation - Initialize all passes in the TransformUtils /// library. void llvm::initializeInstrumentation(PassRegistry &Registry) { Index: llvm/trunk/lib/Transforms/Instrumentation/SanitizerCoverage.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -438,20 +438,16 @@ if (isa(BB.getTerminator())) return; BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end(); - // Skip static allocas at the top of the entry block so they don't become - // dynamic when we split the block. If we used our optimized stack layout, - // then there will only be one alloca and it will come first. - for (; IP != BE; ++IP) { - AllocaInst *AI = dyn_cast(IP); - if (!AI || !AI->isStaticAlloca()) - break; - } bool IsEntryBB = &BB == &F.getEntryBlock(); DebugLoc EntryLoc; if (IsEntryBB) { if (auto SP = getDISubprogram(&F)) EntryLoc = DebugLoc::get(SP->getScopeLine(), 0, SP); + // Keep static allocas and llvm.localescape calls in the entry block. Even + // if we aren't splitting the block, it's nice for allocas to be before + // calls. + IP = PrepareToSplitEntryBlock(BB, IP); } else { EntryLoc = IP->getDebugLoc(); } Index: llvm/trunk/test/Instrumentation/SanitizerCoverage/localescape.ll =================================================================== --- llvm/trunk/test/Instrumentation/SanitizerCoverage/localescape.ll +++ llvm/trunk/test/Instrumentation/SanitizerCoverage/localescape.ll @@ -0,0 +1,88 @@ +; RUN: opt < %s -sancov -sanitizer-coverage-level=0 -S | FileCheck %s +; RUN: opt < %s -sancov -sanitizer-coverage-level=1 -S | FileCheck %s +; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -S | FileCheck %s +; RUN: opt < %s -sancov -sanitizer-coverage-level=2 -sanitizer-coverage-block-threshold=0 -S | FileCheck %s + +target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" +target triple = "i686-pc-windows-msvc18.0.0" + +declare i32 @llvm.eh.typeid.for(i8*) #2 +declare i8* @llvm.frameaddress(i32) +declare i8* @llvm.x86.seh.recoverfp(i8*, i8*) +declare i8* @llvm.localrecover(i8*, i8*, i32) +declare void @llvm.localescape(...) #1 + +declare i32 @_except_handler3(...) +declare void @may_throw(i32* %r) + +define i32 @main() sanitize_address personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) { +entry: + %r = alloca i32, align 4 + %__exception_code = alloca i32, align 4 + call void (...) @llvm.localescape(i32* nonnull %__exception_code) + %0 = bitcast i32* %r to i8* + store i32 0, i32* %r, align 4 + invoke void @may_throw(i32* nonnull %r) #4 + to label %__try.cont unwind label %lpad + +lpad: ; preds = %entry + %1 = landingpad { i8*, i32 } + catch i8* bitcast (i32 ()* @"\01?filt$0@0@main@@" to i8*) + %2 = extractvalue { i8*, i32 } %1, 1 + %3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @"\01?filt$0@0@main@@" to i8*)) #1 + %matches = icmp eq i32 %2, %3 + br i1 %matches, label %__except, label %eh.resume + +__except: ; preds = %lpad + store i32 1, i32* %r, align 4 + br label %__try.cont + +__try.cont: ; preds = %entry, %__except + %4 = load i32, i32* %r, align 4 + ret i32 %4 + +eh.resume: ; preds = %lpad + resume { i8*, i32 } %1 +} + +; Check that the alloca remains static and the localescape call remains in the +; entry block. + +; CHECK-LABEL: define i32 @main() +; CHECK-NOT: br {{.*}}label +; CHECK: %__exception_code = alloca i32, align 4 +; CHECK-NOT: br {{.*}}label +; CHECK: call void (...) @llvm.localescape(i32* nonnull %__exception_code) + +; Function Attrs: nounwind +define internal i32 @"\01?filt$0@0@main@@"() #1 { +entry: + %0 = tail call i8* @llvm.frameaddress(i32 1) + %1 = tail call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @main to i8*), i8* %0) + %2 = tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %1, i32 0) + %__exception_code = bitcast i8* %2 to i32* + %3 = getelementptr inbounds i8, i8* %0, i32 -20 + %4 = bitcast i8* %3 to { i32*, i8* }** + %5 = load { i32*, i8* }*, { i32*, i8* }** %4, align 4 + %6 = getelementptr inbounds { i32*, i8* }, { i32*, i8* }* %5, i32 0, i32 0 + %7 = load i32*, i32** %6, align 4 + %8 = load i32, i32* %7, align 4 + store i32 %8, i32* %__exception_code, align 4 + ret i32 1 +} + +; CHECK-LABEL: define internal i32 @"\01?filt$0@0@main@@"() +; CHECK: tail call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* {{.*}}, i32 0) + +define void @ScaleFilterCols_SSSE3(i8* %dst_ptr, i8* %src_ptr, i32 %dst_width, i32 %x, i32 %dx) sanitize_address { +entry: + %dst_width.addr = alloca i32, align 4 + store i32 %dst_width, i32* %dst_width.addr, align 4 + %0 = call { i8*, i8*, i32, i32, i32 } asm sideeffect "", "=r,=r,={ax},=r,=r,=*rm,rm,rm,0,1,2,3,4,5,~{memory},~{cc},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{dirflag},~{fpsr},~{flags}"(i32* nonnull %dst_width.addr, i32 %x, i32 %dx, i8* %dst_ptr, i8* %src_ptr, i32 0, i32 0, i32 0, i32 %dst_width) + ret void +} + +define void @ScaleColsUp2_SSE2() sanitize_address { +entry: + ret void +}