Index: compiler-rt/test/asan/TestCases/Windows/issue64990.cpp =================================================================== --- /dev/null +++ compiler-rt/test/asan/TestCases/Windows/issue64990.cpp @@ -0,0 +1,18 @@ +// Repro for the issue #64990: Asan with Windows EH generates __asan_xxx runtime calls without required funclet tokens +// RUN: %clang_cl_asan %Od %s -EHsc %Fe%t +// RUN: not %run %t 2>&1 | FileCheck %s + +char buff1[6] = "hello"; +char buff2[6] = "hello"; + +int main(int argc, char **argv) { + try { + throw 1; + } catch (...) { + // Make asan generate call to __asan_memcpy inside the EH pad. + __builtin_memcpy(buff1, buff2 + 3, 6); + } + return 0; +} + +// CHECK: SUMMARY: AddressSanitizer: global-buffer-overflow {{.*}} in __asan_memcpy Index: llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp =================================================================== --- llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -43,6 +43,7 @@ #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/EHPersonalities.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalValue.h" @@ -640,6 +641,67 @@ } namespace { +/// Helper RAII class to keep track of the inserted asan runtime calls during a +/// pass on a single Function. Upon end of scope, detects and applies the +/// required funclet OpBundle. +class RuntimeCallInserter { + Function *OwnerFn = nullptr; + bool TrackInsertedCalls = false; + std::vector InsertedCalls; + +public: + RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) { + if (Fn.hasPersonalityFn()) { + auto Personality = classifyEHPersonality(Fn.getPersonalityFn()); + if (isScopedEHPersonality(Personality)) + TrackInsertedCalls = true; + } + } + + ~RuntimeCallInserter() { + if (!TrackInsertedCalls || InsertedCalls.empty()) + return; + + DenseMap BlockColors = colorEHFunclets(*OwnerFn); + for (CallInst *CI : InsertedCalls) { + BasicBlock *BB = CI->getParent(); + assert(BB && "Instruction doesn't belong to a BasicBlock"); + assert(BB->getParent() == OwnerFn && + "Instruction doesn't belong to the expected Function!"); + + ColorVector &Colors = BlockColors[BB]; + // A BB could be colorless when it is unreachable, in which case it will + // be removed by a later pass anyway. + if (Colors.empty()) + continue; + assert(Colors.size() == 1 && "Expected monochromatic BB!"); + + BasicBlock *Color = Colors.front(); + Instruction *EHPad = Color->getFirstNonPHI(); + + if (EHPad && EHPad->isEHPad()) { + // Replace CI with a clone with an added funclet OperandBundle + OperandBundleDef OB("funclet", EHPad); + auto *NewCall = + CallBase::addOperandBundle(CI, LLVMContext::OB_funclet, OB, CI); + NewCall->copyMetadata(*CI); + CI->replaceAllUsesWith(NewCall); + CI->eraseFromParent(); + } + } + } + + CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee, + ArrayRef Args = {}, + const Twine &Name = "") { + assert(IRB.GetInsertBlock()->getParent() == OwnerFn); + + CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr); + if (TrackInsertedCalls) + InsertedCalls.push_back(Inst); + return Inst; + } +}; /// AddressSanitizer: instrument the code in module to find memory bugs. struct AddressSanitizer { @@ -681,12 +743,14 @@ void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, InterestingMemoryOperand &O, bool UseCalls, - const DataLayout &DL); - void instrumentPointerComparisonOrSubtraction(Instruction *I); + const DataLayout &DL, RuntimeCallInserter &RTCI); + void instrumentPointerComparisonOrSubtraction(Instruction *I, + RuntimeCallInserter &RTCI); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, uint32_t TypeStoreSize, bool IsWrite, - Value *SizeArgument, bool UseCalls, uint32_t Exp); + Value *SizeArgument, bool UseCalls, uint32_t Exp, + RuntimeCallInserter &RTCI); Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeStoreSize, bool IsWrite, @@ -695,20 +759,22 @@ Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, - uint32_t Exp); + uint32_t Exp, + RuntimeCallInserter &RTCI); void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask, Value *EVL, Value *Stride, Instruction *I, Value *Addr, MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite, Value *SizeArgument, bool UseCalls, - uint32_t Exp); + uint32_t Exp, RuntimeCallInserter &RTCI); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeStoreSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, - Value *SizeArgument, uint32_t Exp); - void instrumentMemIntrinsic(MemIntrinsic *MI); + Value *SizeArgument, uint32_t Exp, + RuntimeCallInserter &RTCI); + void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool suppressInstrumentationSiteForDebug(int &Instrumented); bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); @@ -890,6 +956,7 @@ struct FunctionStackPoisoner : public InstVisitor { Function &F; AddressSanitizer &ASan; + RuntimeCallInserter &RTCI; DIBuilder DIB; LLVMContext *C; Type *IntptrTy; @@ -926,10 +993,12 @@ bool HasReturnsTwiceCall = false; bool PoisonStack; - FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) - : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), - C(ASan.C), IntptrTy(ASan.IntptrTy), - IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), + FunctionStackPoisoner(Function &F, AddressSanitizer &ASan, + RuntimeCallInserter &RTCI) + : F(F), ASan(ASan), RTCI(RTCI), + DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C), + IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)), + Mapping(ASan.Mapping), PoisonStack(ClStack && !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {} @@ -1012,8 +1081,8 @@ DynamicAreaOffset); } - IRB.CreateCall( - AsanAllocasUnpoisonFunc, + RTCI.createRuntimeCall( + IRB, AsanAllocasUnpoisonFunc, {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); } @@ -1229,17 +1298,18 @@ } // Instrument memset/memmove/memcpy -void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { +void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI, + RuntimeCallInserter &RTCI) { InstrumentationIRBuilder IRB(MI); if (isa(MI)) { - IRB.CreateCall( - isa(MI) ? AsanMemmove : AsanMemcpy, + RTCI.createRuntimeCall( + IRB, isa(MI) ? AsanMemmove : AsanMemcpy, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } else if (isa(MI)) { - IRB.CreateCall( - AsanMemset, + RTCI.createRuntimeCall( + IRB, AsanMemset, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); @@ -1477,7 +1547,7 @@ } void AddressSanitizer::instrumentPointerComparisonOrSubtraction( - Instruction *I) { + Instruction *I, RuntimeCallInserter &RTCI) { IRBuilder<> IRB(I); FunctionCallee F = isa(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; @@ -1485,7 +1555,7 @@ if (i->getType()->isPointerTy()) i = IRB.CreatePointerCast(i, IntptrTy); } - IRB.CreateCall(F, Param); + RTCI.createRuntimeCall(IRB, F, Param); } static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, @@ -1493,7 +1563,7 @@ MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, - uint32_t Exp) { + uint32_t Exp, RuntimeCallInserter &RTCI) { // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check // if the data is properly aligned. if (!TypeStoreSize.isScalable()) { @@ -1508,18 +1578,19 @@ *Alignment >= FixedSize / 8) return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment, FixedSize, IsWrite, nullptr, UseCalls, - Exp); + Exp, RTCI); } } Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize, - IsWrite, nullptr, UseCalls, Exp); + IsWrite, nullptr, UseCalls, Exp, RTCI); } void AddressSanitizer::instrumentMaskedLoadOrStore( AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask, Value *EVL, Value *Stride, Instruction *I, Value *Addr, MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite, - Value *SizeArgument, bool UseCalls, uint32_t Exp) { + Value *SizeArgument, bool UseCalls, uint32_t Exp, + RuntimeCallInserter &RTCI) { auto *VTy = cast(OpType); TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); auto Zero = ConstantInt::get(IntptrTy, 0); @@ -1575,15 +1646,16 @@ } else { InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index}); } - doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), - InstrumentedAddress, Alignment, Granularity, - ElemTypeSize, IsWrite, SizeArgument, UseCalls, Exp); + doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress, + Alignment, Granularity, ElemTypeSize, IsWrite, + SizeArgument, UseCalls, Exp, RTCI); }); } void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, InterestingMemoryOperand &O, bool UseCalls, - const DataLayout &DL) { + const DataLayout &DL, + RuntimeCallInserter &RTCI) { Value *Addr = O.getPtr(); // Optimization experiments. @@ -1629,11 +1701,11 @@ instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL, O.MaybeStride, O.getInsn(), Addr, O.Alignment, Granularity, O.OpType, O.IsWrite, nullptr, - UseCalls, Exp); + UseCalls, Exp, RTCI); } else { doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, - Granularity, O.TypeStoreSize, O.IsWrite, nullptr, UseCalls, - Exp); + Granularity, O.TypeStoreSize, O.IsWrite, nullptr, + UseCalls, Exp, RTCI); } } @@ -1641,24 +1713,25 @@ Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, - uint32_t Exp) { + uint32_t Exp, + RuntimeCallInserter &RTCI) { InstrumentationIRBuilder IRB(InsertBefore); Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); CallInst *Call = nullptr; if (SizeArgument) { if (Exp == 0) - Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], - {Addr, SizeArgument}); + Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0], + {Addr, SizeArgument}); else - Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], - {Addr, SizeArgument, ExpVal}); + Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1], + {Addr, SizeArgument, ExpVal}); } else { if (Exp == 0) - Call = - IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); + Call = RTCI.createRuntimeCall( + IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); else - Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], - {Addr, ExpVal}); + Call = RTCI.createRuntimeCall( + IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal}); } Call->setCannotMerge(); @@ -1711,7 +1784,8 @@ MaybeAlign Alignment, uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, - uint32_t Exp) { + uint32_t Exp, + RuntimeCallInserter &RTCI) { if (TargetTriple.isAMDGPU()) { InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, TypeStoreSize, IsWrite, SizeArgument); @@ -1736,11 +1810,12 @@ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); if (UseCalls) { if (Exp == 0) - IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], - AddrLong); + RTCI.createRuntimeCall( + IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong); else - IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], - {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); + RTCI.createRuntimeCall( + IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], + {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); return; } @@ -1779,8 +1854,8 @@ CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); } - Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, - AccessSizeIndex, SizeArgument, Exp); + Instruction *Crash = generateCrashCode( + CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI); if (OrigIns->getDebugLoc()) Crash->setDebugLoc(OrigIns->getDebugLoc()); } @@ -1790,8 +1865,9 @@ // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able // to report the actual access size. void AddressSanitizer::instrumentUnusualSizeOrAlignment( - Instruction *I, Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize, - bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { + Instruction *I, Instruction *InsertBefore, Value *Addr, + TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, + uint32_t Exp, RuntimeCallInserter &RTCI) { InstrumentationIRBuilder IRB(InsertBefore); Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize); Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3)); @@ -1799,19 +1875,21 @@ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); if (UseCalls) { if (Exp == 0) - IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], - {AddrLong, Size}); + RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0], + {AddrLong, Size}); else - IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], - {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); + RTCI.createRuntimeCall( + IRB, AsanMemoryAccessCallbackSized[IsWrite][1], + {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); } else { Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1)); Value *LastByte = IRB.CreateIntToPtr( IRB.CreateAdd(AddrLong, SizeMinusOne), Addr->getType()); - instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp); + instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp, + RTCI); instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false, - Exp); + Exp, RTCI); } } @@ -2821,6 +2899,8 @@ FunctionStateRAII CleanupObj(this); + RuntimeCallInserter RTCI(F); + FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); // We can't instrument allocas used with llvm.localescape. Only static allocas @@ -2903,27 +2983,27 @@ for (auto &Operand : OperandsToInstrument) { if (!suppressInstrumentationSiteForDebug(NumInstrumented)) instrumentMop(ObjSizeVis, Operand, UseCalls, - F.getParent()->getDataLayout()); + F.getParent()->getDataLayout(), RTCI); FunctionModified = true; } for (auto *Inst : IntrinToInstrument) { if (!suppressInstrumentationSiteForDebug(NumInstrumented)) - instrumentMemIntrinsic(Inst); + instrumentMemIntrinsic(Inst, RTCI); FunctionModified = true; } - FunctionStackPoisoner FSP(F, *this); + FunctionStackPoisoner FSP(F, *this, RTCI); bool ChangedStack = FSP.runOnFunction(); // We must unpoison the stack before NoReturn calls (throw, _exit, etc). // See e.g. https://github.com/google/sanitizers/issues/37 for (auto *CI : NoReturnCalls) { IRBuilder<> IRB(CI); - IRB.CreateCall(AsanHandleNoReturnFunc, {}); + RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {}); } for (auto *Inst : PointerComparisonsOrSubtracts) { - instrumentPointerComparisonOrSubtraction(Inst); + instrumentPointerComparisonOrSubtraction(Inst, RTCI); FunctionModified = true; } @@ -3068,9 +3148,10 @@ if (j - i >= ClMaxInlinePoisoningSize) { copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); - IRB.CreateCall(AsanSetShadowFunc[Val], - {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), - ConstantInt::get(IntptrTy, j - i)}); + RTCI.createRuntimeCall( + IRB, AsanSetShadowFunc[Val], + {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), + ConstantInt::get(IntptrTy, j - i)}); Done = j; } } @@ -3357,8 +3438,8 @@ StackMallocIdx = StackMallocSizeClass(LocalStackSize); assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); Value *FakeStackValue = - IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], - ConstantInt::get(IntptrTy, LocalStackSize)); + RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx], + ConstantInt::get(IntptrTy, LocalStackSize)); IRB.SetInsertPoint(InsBefore); FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, ConstantInt::get(IntptrTy, 0)); @@ -3368,7 +3449,8 @@ // void *LocalStackBase = (FakeStack) ? FakeStack : // alloca(LocalStackSize); StackMallocIdx = StackMallocSizeClass(LocalStackSize); - FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx], + FakeStack = + RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx], ConstantInt::get(IntptrTy, LocalStackSize)); } Value *NoFakeStack = @@ -3503,8 +3585,8 @@ IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); } else { // For larger frames call __asan_stack_free_*. - IRBPoison.CreateCall( - AsanStackFreeFunc[StackMallocIdx], + RTCI.createRuntimeCall( + IRBPoison, AsanStackFreeFunc[StackMallocIdx], {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); } @@ -3525,8 +3607,8 @@ // For now just insert the call to ASan runtime. Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); Value *SizeArg = ConstantInt::get(IntptrTy, Size); - IRB.CreateCall( - DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, + RTCI.createRuntimeCall( + IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, {AddrArg, SizeArg}); } @@ -3587,7 +3669,7 @@ ConstantInt::get(IntptrTy, Alignment.value())); // Insert __asan_alloca_poison call for new created alloca. - IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); + RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize}); // Store the last alloca's address to DynamicAllocaLayout. We'll need this // for unpoisoning stuff. Index: llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll =================================================================== --- /dev/null +++ llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll @@ -0,0 +1,128 @@ +; Test appropriate tagging of funclet for function calls generated by asan. +; RUN: opt -S -passes=asan -asan-max-inline-poisoning-size=0 \ +; RUN: -asan-detect-invalid-pointer-cmp -asan-detect-invalid-pointer-sub -asan-use-after-scope < %s | FileCheck %s --check-prefixes=CHECK,CHECK-INLINE +; RUN: opt -S -passes=asan -asan-max-inline-poisoning-size=0 -asan-instrumentation-with-call-threshold=0 \ +; RUN: -asan-detect-invalid-pointer-cmp -asan-detect-invalid-pointer-sub -asan-use-after-scope < %s | FileCheck %s --check-prefixes=CHECK,CHECK-OUTLINE + +; REQUIRES: x86-registered-target + +target triple = "x86_64-pc-windows-msvc" + +declare void @DeInit(ptr) +declare void @MayThrowFunc() +declare void @NoReturn() noreturn + +declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) +declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1) +declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) +declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind +declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind + +declare i32 @__CxxFrameHandler3(...) +declare i32 @dummyPersonality(...) + +define void @FuncletPersonality(ptr %ptrParam) sanitize_address personality ptr @__CxxFrameHandler3 { +; CHECK-LABEL: @FuncletPersonality +; CHECK: ehcleanup: +; CHECK: [[CleanupPad1:%[^ ]+]] = cleanuppad within none [] +; CHECK-INLINE: call void @__asan_unpoison_stack_memory{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK-INLINE: call void @__asan_report_store1{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK-INLINE: call void @__asan_poison_stack_memory{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call void @DeInit({{.*}}) [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call ptr @__asan_memset{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call ptr @__asan_memcpy{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call ptr @__asan_memmove{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call void @__sanitizer_ptr_cmp{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call void @__sanitizer_ptr_sub{{.*}} [ "funclet"(token [[CleanupPad1]]) ] + +; CHECK-OUTLINE: call void @__asan_storeN{{.*}} [ "funclet"(token [[CleanupPad1]]) ] + +; CHECK: noreturncall: +; CHECK: call void @__asan_handle_no_return{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: ehexit: +; CHECK: call void @__asan_allocas_unpoison{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call void @__asan_stack_free_{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +; CHECK: call void @__asan_set_shadow_{{.*}} [ "funclet"(token [[CleanupPad1]]) ] +entry: + ; Large enough local alloca to have asan generate a __asan_stack_free_#() call + %largeObj = alloca [2048 x i32], align 16 + %tmpInt1 = alloca i32, align 4 + %tmpInt2 = alloca i32, align 4 + %tmpInt3 = alloca i32, align 4 + + ; Creating %lifetimeInt and %lifetimeArr, and managing their lifetimes + ; to make asan generate stack poisoning calls + %lifetimeInt = alloca i32, align 4 + call void @llvm.lifetime.start.p0(i64 4, ptr %lifetimeInt) + store volatile i8 0, ptr %lifetimeInt + call void @llvm.lifetime.end.p0(i64 4, ptr %lifetimeInt) + %lifetimeArr = alloca i32, align 4 + + ; Dynamic alloca to generate a @__asan_allocas_unpoison call in ehcleanup + %tmpVolatilei64 = alloca i64, align 8 + store volatile i64 0, ptr %tmpVolatilei64, align 8 + %tmpCopyi64 = load i64, ptr %tmpVolatilei64, align 8 + %tmpVolatilei8 = alloca i8, i64 %tmpCopyi64, align 32 + store volatile i8 0, ptr %tmpVolatilei8 + + invoke void @MayThrowFunc() + to label %invoke.cont unwind label %ehcleanup +invoke.cont: ; preds = %entry + call void @DeInit(ptr %largeObj) + ret void + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + + ; Make asan add a call to __asan_unpoison_stack_memory + call void @llvm.lifetime.start.p0(i64 4, ptr %lifetimeArr) + ; Make asan add a call to __asan_report_store1 + store volatile i8 0, ptr %lifetimeArr + ; Make asan add a call to __asan_poison_stack_memory + call void @llvm.lifetime.end.p0(i64 4, ptr %lifetimeArr) + + call void @DeInit(ptr %largeObj) [ "funclet"(token %0) ] + call void @llvm.memset.p0.i64(ptr align 4 %tmpInt1, i8 0, i64 4, i1 false) + call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmpInt2, ptr align 4 %tmpInt1, i64 4, i1 false) + call void @llvm.memmove.p0.p0.i64(ptr align 4 %tmpInt3, ptr align 4 %tmpInt1, i64 4, i1 false) + %cmpAddr = icmp ule ptr %tmpInt1, %tmpInt2 + %addr1 = ptrtoint ptr %tmpInt1 to i64 + %addr2 = ptrtoint ptr %tmpInt2 to i64 + %subAddr = sub i64 %addr1, %addr2 + + store i64 0, ptr %ptrParam, align 1 + + %cmp = icmp ne i64 %subAddr, 0 + br i1 %cmp, label %ehexit, label %noreturncall + +noreturncall: + call void @NoReturn(ptr null, ptr null) noreturn [ "funclet"(token %0) ] + unreachable + +ehexit: + cleanupret from %0 unwind to caller + +; Ensure unreachable basic block doesn't make the compiler assert, as it's a special case for coloring computation. +nopredecessor: + call void @llvm.memset.p0.i64(ptr align 4 %tmpInt1, i8 0, i64 4, i1 false) + unreachable +} + +; Non-Windows personality, ensure no funclet gets attached to asan runtime call. +define void @OtherPersonality(ptr %ptrParam) sanitize_address personality ptr @dummyPersonality { +; CHECK-LABEL: @OtherPersonality +; CHECK: ehcleanup: +; CHECK: call ptr @__asan_memset +; CHECK-NOT: funclet +entry: + %tmpInt = alloca i32, align 4 + invoke void @MayThrowFunc() + to label %invoke.cont unwind label %ehcleanup +invoke.cont: ; preds = %entry + ret void + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + call void @llvm.memset.p0.i64(ptr align 4 %tmpInt, i8 0, i64 4, i1 false) + cleanupret from %0 unwind to caller +} Index: llvm/test/Instrumentation/AddressSanitizer/localescape.ll =================================================================== --- llvm/test/Instrumentation/AddressSanitizer/localescape.ll +++ llvm/test/Instrumentation/AddressSanitizer/localescape.ll @@ -14,10 +14,10 @@ declare ptr @llvm.localrecover(ptr, ptr, i32) declare void @llvm.localescape(...) #1 -declare i32 @_except_handler3(...) +declare i32 @__gcc_personality_v0(...) declare void @may_throw(ptr %r) -define i32 @main() sanitize_address personality ptr @_except_handler3 { +define i32 @main() sanitize_address personality ptr @__gcc_personality_v0 { entry: %r = alloca i32, align 4 %__exception_code = alloca i32, align 4