Index: llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -568,7 +568,8 @@ typedef DenseMap AllocaForValueMapTy; AllocaForValueMapTy AllocaForValue; - bool HasNonEmptyInlineAsm; + bool HasNonEmptyInlineAsm = false; + bool HasReturnsTwiceCall = false; std::unique_ptr EmptyInlineAsm; FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) @@ -580,7 +581,6 @@ IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), StackAlignment(1 << Mapping.Scale), - HasNonEmptyInlineAsm(false), EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} bool runOnFunction() { @@ -682,9 +682,13 @@ AllocaPoisonCallVec.push_back(APC); } - void visitCallInst(CallInst &CI) { - HasNonEmptyInlineAsm |= - CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get()); + void visitCallSite(CallSite CS) { + Instruction *I = CS.getInstruction(); + if (CallInst *CI = dyn_cast(I)) { + HasNonEmptyInlineAsm |= + CI->isInlineAsm() && !CI->isIdenticalTo(EmptyInlineAsm.get()); + HasReturnsTwiceCall |= CI->canReturnTwice(); + } } // ---------------------- Helpers. @@ -1820,10 +1824,15 @@ uint64_t LocalStackSize = L.FrameSize; bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; - // Don't do dynamic alloca or stack malloc in presence of inline asm: - // too often it makes assumptions on which registers are available. - bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm; - DoStackMalloc &= !HasNonEmptyInlineAsm; + bool DoDynamicAlloca = ClDynamicAllocaStack; + // Don't do dynamic alloca or stack malloc if: + // 1) There is inline asm: too often it makes assumptions on which registers + // are available. + // 2) There is a returns_twice call (typically setjmp), which is + // optimization-hostile, and doesn't play well with introduced indirect + // register-relative calculation of local variable addresses. + DoDynamicAlloca &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall; + DoStackMalloc &= !HasNonEmptyInlineAsm && !HasReturnsTwiceCall; Value *StaticAlloca = DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); Index: llvm/trunk/test/Instrumentation/AddressSanitizer/stack_dynamic_alloca.ll =================================================================== --- llvm/trunk/test/Instrumentation/AddressSanitizer/stack_dynamic_alloca.ll +++ llvm/trunk/test/Instrumentation/AddressSanitizer/stack_dynamic_alloca.ll @@ -44,3 +44,34 @@ call void asm sideeffect "mov %%rbx, %%rcx", "~{dirflag},~{fpsr},~{flags}"() nounwind ret void } + +; Test that dynamic alloca is not used when setjmp is present. +%struct.__jmp_buf_tag = type { [8 x i64], i32, %struct.__sigset_t } +%struct.__sigset_t = type { [16 x i64] } +@_ZL3buf = internal global [1 x %struct.__jmp_buf_tag] zeroinitializer, align 16 + +define void @Func3() uwtable sanitize_address { +; CHECK-LABEL: define void @Func3 +; CHECK-NOT: __asan_option_detect_stack_use_after_return +; CHECK-NOT: __asan_stack_malloc +; CHECK: call void @__asan_handle_no_return +; CHECK: call void @longjmp +; CHECK: ret void +entry: + %a = alloca i32, align 4 + %call = call i32 @_setjmp(%struct.__jmp_buf_tag* getelementptr inbounds ([1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* @_ZL3buf, i32 0, i32 0)) nounwind returns_twice + %cmp = icmp eq i32 0, %call + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + call void @longjmp(%struct.__jmp_buf_tag* getelementptr inbounds ([1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* @_ZL3buf, i32 0, i32 0), i32 1) noreturn nounwind + unreachable + +if.end: ; preds = %entry + call void @_Z10escape_ptrPi(i32* %a) + ret void +} + +declare i32 @_setjmp(%struct.__jmp_buf_tag*) nounwind returns_twice +declare void @longjmp(%struct.__jmp_buf_tag*, i32) noreturn nounwind +declare void @_Z10escape_ptrPi(i32*)