diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -26,6 +26,7 @@ #include "llvm/ADT/Triple.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/StackSafetyAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/BinaryFormat/MachO.h" @@ -47,6 +48,7 @@ #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstIterator.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" @@ -211,6 +213,11 @@ "asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); +static cl::opt + ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(false), + cl::Hidden, cl::desc("Use Stack Safety analysis results"), + cl::Optional); + static cl::opt ClInstrumentAtomics( "asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, @@ -647,6 +654,7 @@ /// AddressSanitizer: instrument the code in module to find memory bugs. struct AddressSanitizer { AddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, + const StackSafetyGlobalInfo *SSGI, bool CompileKernel = false, bool Recover = false, bool UseAfterScope = false, AsanDetectStackUseAfterReturnMode UseAfterReturn = @@ -657,7 +665,7 @@ UseAfterScope(UseAfterScope || ClUseAfterScope), UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn : UseAfterReturn), - GlobalsMD(*GlobalsMD) { + GlobalsMD(*GlobalsMD), SSGI(SSGI) { C = &(M.getContext()); LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); @@ -686,7 +694,7 @@ /// Check if we want (and can) handle this alloca. bool isInterestingAlloca(const AllocaInst &AI); - bool ignoreAccess(Value *Ptr); + bool ignoreAccess(Instruction *Inst, Value *Ptr); void getInterestingMemoryOperands( Instruction *I, SmallVectorImpl &Interesting); @@ -771,6 +779,7 @@ FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; Value *LocalDynamicShadow = nullptr; const GlobalsMetadata &GlobalsMD; + const StackSafetyGlobalInfo *SSGI; DenseMap ProcessedAllocas; FunctionCallee AMDGPUAddressShared; @@ -797,16 +806,22 @@ void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); + if (ClUseStackSafety) + AU.addRequired(); AU.addRequired(); } bool runOnFunction(Function &F) override { GlobalsMetadata &GlobalsMD = getAnalysis().getGlobalsMD(); + const StackSafetyGlobalInfo *const SSGI = + ClUseStackSafety + ? &getAnalysis().getResult() + : nullptr; const TargetLibraryInfo *TLI = &getAnalysis().getTLI(F); - AddressSanitizer ASan(*F.getParent(), &GlobalsMD, CompileKernel, Recover, - UseAfterScope, UseAfterReturn); + AddressSanitizer ASan(*F.getParent(), &GlobalsMD, SSGI, CompileKernel, + Recover, UseAfterScope, UseAfterReturn); return ASan.instrumentFunction(F, TLI); } @@ -1260,8 +1275,9 @@ Module &M = *F.getParent(); if (auto *R = MAMProxy.getCachedResult(M)) { const TargetLibraryInfo *TLI = &AM.getResult(F); - AddressSanitizer Sanitizer(M, R, Options.CompileKernel, Options.Recover, - Options.UseAfterScope, Options.UseAfterReturn); + AddressSanitizer Sanitizer(M, R, nullptr, Options.CompileKernel, + Options.Recover, Options.UseAfterScope, + Options.UseAfterReturn); if (Sanitizer.instrumentFunction(F, TLI)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); @@ -1307,10 +1323,12 @@ UseOdrIndicator, DestructorKind); bool Modified = false; auto &FAM = MAM.getResult(M).getManager(); + const StackSafetyGlobalInfo *const SSGI = + ClUseStackSafety ? &MAM.getResult(M) : nullptr; + AddressSanitizer FunctionSanitizer(M, &GlobalsMD, SSGI, Options.CompileKernel, + Options.Recover, Options.UseAfterScope, + Options.UseAfterReturn); for (Function &F : M) { - AddressSanitizer FunctionSanitizer(M, &GlobalsMD, Options.CompileKernel, - Options.Recover, Options.UseAfterScope, - Options.UseAfterReturn); const TargetLibraryInfo &TLI = FAM.getResult(F); Modified |= FunctionSanitizer.instrumentFunction(F, &TLI); } @@ -1330,6 +1348,7 @@ "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, false) INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass) +INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END( AddressSanitizerLegacyPass, "asan", @@ -1468,7 +1487,7 @@ return IsInteresting; } -bool AddressSanitizer::ignoreAccess(Value *Ptr) { +bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) { // Instrument acesses from different address spaces only for AMDGPU. Type *PtrTy = cast(Ptr->getType()->getScalarType()); if (PtrTy->getPointerAddressSpace() != 0 && @@ -1489,6 +1508,10 @@ if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) return true; + if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) && + findAllocaForValue(Ptr)) + return true; + return false; } @@ -1503,22 +1526,22 @@ return; if (LoadInst *LI = dyn_cast(I)) { - if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) + if (!ClInstrumentReads || ignoreAccess(LI, LI->getPointerOperand())) return; Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, LI->getType(), LI->getAlign()); } else if (StoreInst *SI = dyn_cast(I)) { - if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) + if (!ClInstrumentWrites || ignoreAccess(LI, SI->getPointerOperand())) return; Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, SI->getValueOperand()->getType(), SI->getAlign()); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { - if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) + if (!ClInstrumentAtomics || ignoreAccess(LI, RMW->getPointerOperand())) return; Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, RMW->getValOperand()->getType(), None); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { - if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) + if (!ClInstrumentAtomics || ignoreAccess(LI, XCHG->getPointerOperand())) return; Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, XCHG->getCompareOperand()->getType(), None); @@ -1533,7 +1556,7 @@ return; auto BasePtr = CI->getOperand(OpOffset); - if (ignoreAccess(BasePtr)) + if (ignoreAccess(LI, BasePtr)) return; auto Ty = cast(BasePtr->getType())->getElementType(); MaybeAlign Alignment = Align(1); @@ -1545,7 +1568,7 @@ } else { for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || - ignoreAccess(CI->getArgOperand(ArgNo))) + ignoreAccess(LI, CI->getArgOperand(ArgNo))) continue; Type *Ty = CI->getParamByValType(ArgNo); Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll @@ -0,0 +1,19 @@ +; REQUIRES: x86-registered-target + +; RUN: opt < %s -S -enable-new-pm=0 -asan-instrumentation-with-call-threshold=0 -asan \ +; RUN: -asan-use-stack-safety=0 -o - | FileCheck %s --check-prefixes=NOSAFETY +; RUN: opt < %s -S -enable-new-pm=0 -asan-instrumentation-with-call-threshold=0 -asan \ +; RUN: -asan-use-stack-safety=1 -o - | FileCheck %s --check-prefixes=SAFETY +; RUN: opt < %s -S -enable-new-pm=1 -asan-instrumentation-with-call-threshold=0 \ +; RUN: -passes='asan-pipeline' -asan-use-stack-safety=0 -o - | FileCheck %s --check-prefixes=NOSAFETY +; RUN: opt < %s -S -enable-new-pm=1 -asan-instrumentation-with-call-threshold=0 \ +; RUN: -passes='asan-pipeline' -asan-use-stack-safety=1 -o - | FileCheck %s --check-prefixes=SAFETY +; NOSAFETY: call void @__asan_load1 +; SAFETY-NOT: call void @__asan_load1 + +define i32 @stack-safety() sanitize_address { + %buf = alloca [10 x i8], align 1 + %arrayidx = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0 + %1 = load i8, i8* %arrayidx, align 1 + ret i32 0 +}