Index: lib/Transforms/Instrumentation/AddressSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -24,6 +24,9 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" +#include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" @@ -185,6 +188,9 @@ cl::init(true)); static cl::opt ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); +static cl::opt ClOptStack("asan-opt-stack", + cl::desc("Don't instrument scalar stack variables"), cl::Hidden, + cl::init(false)); static cl::opt ClCheckLifetime("asan-check-lifetime", cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), @@ -211,10 +217,10 @@ STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); STATISTIC(NumInstrumentedDynamicAllocas, "Number of instrumented dynamic allocas"); -STATISTIC(NumOptimizedAccessesToGlobalArray, - "Number of optimized accesses to global arrays"); STATISTIC(NumOptimizedAccessesToGlobalVar, "Number of optimized accesses to global vars"); +STATISTIC(NumOptimizedAccessesToStackVar, + "Number of optimized accesses to stack vars"); namespace { /// Frontend-provided metadata for source location. @@ -375,6 +381,8 @@ } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); + AU.addRequired(); + AU.addRequired(); } uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { Type *Ty = AI->getAllocatedType(); @@ -386,8 +394,10 @@ /// If it is an interesting memory access, return the PointerOperand /// and set IsWrite/Alignment. Otherwise return nullptr. Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, - unsigned *Alignment) const; - void instrumentMop(Instruction *I, bool UseCalls); + uint64_t *TypeSize, unsigned *Alignment) + const; + void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, + bool UseCalls); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, @@ -411,6 +421,8 @@ bool LooksLikeCodeInBug11395(Instruction *I); bool GlobalIsLinkerInitialized(GlobalVariable *G); + bool isInboundsAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, + uint64_t TypeSize) const; LLVMContext *C; const DataLayout *DL; @@ -791,6 +803,7 @@ /// and set IsWrite/Alignment. Otherwise return nullptr. Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, bool *IsWrite, + uint64_t *TypeSize, unsigned *Alignment) const { // Skip memory accesses inserted by another instrumentation. if (I->getMetadata("nosanitize")) @@ -800,21 +813,26 @@ if (LoadInst *LI = dyn_cast(I)) { if (!ClInstrumentReads) return nullptr; *IsWrite = false; + *TypeSize = DL->getTypeStoreSizeInBits(LI->getType()); *Alignment = LI->getAlignment(); PtrOperand = LI->getPointerOperand(); } else if (StoreInst *SI = dyn_cast(I)) { if (!ClInstrumentWrites) return nullptr; *IsWrite = true; + *TypeSize = DL->getTypeStoreSizeInBits(SI->getValueOperand()->getType()); *Alignment = SI->getAlignment(); PtrOperand = SI->getPointerOperand(); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; + *TypeSize = DL->getTypeStoreSizeInBits(RMW->getValOperand()->getType()); *Alignment = 0; PtrOperand = RMW->getPointerOperand(); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; + *TypeSize = DL->getTypeStoreSizeInBits( + XCHG->getCompareOperand()->getType()); *Alignment = 0; PtrOperand = XCHG->getPointerOperand(); } @@ -871,38 +889,34 @@ IRB.CreateCall2(F, Param[0], Param[1]); } -void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { +void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, + Instruction *I, bool UseCalls) { bool IsWrite = false; unsigned Alignment = 0; - Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment); + uint64_t TypeSize = 0; + Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment); assert(Addr); + if (ClOpt && ClOptGlobals) { - if (GlobalVariable *G = dyn_cast(Addr)) { - // If initialization order checking is disabled, a simple access to a - // dynamically initialized global is always valid. - if (!ClInitializers || GlobalIsLinkerInitialized(G)) { - NumOptimizedAccessesToGlobalVar++; - return; - } - } - ConstantExpr *CE = dyn_cast(Addr); - if (CE && CE->isGEPWithNoNotionalOverIndexing()) { - if (GlobalVariable *G = dyn_cast(CE->getOperand(0))) { - if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) { - NumOptimizedAccessesToGlobalArray++; - return; - } - } + // If initialization order checking is disabled, a simple access to a + // dynamically initialized global is always valid. + GlobalVariable *G = dyn_cast(GetUnderlyingObject(Addr, + nullptr)); + if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) && + isInboundsAccess(ObjSizeVis, Addr, TypeSize)) { + NumOptimizedAccessesToGlobalVar++; + return; } } - Type *OrigPtrTy = Addr->getType(); - Type *OrigTy = cast(OrigPtrTy)->getElementType(); - - assert(OrigTy->isSized()); - uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); - - assert((TypeSize % 8) == 0); + if (ClOpt && ClOptStack) { + // A direct inbounds access to a stack variable is always valid. + if (isa(GetUnderlyingObject(Addr, nullptr)) && + isInboundsAccess(ObjSizeVis, Addr, TypeSize)) { + NumOptimizedAccessesToStackVar++; + return; + } + } if (IsWrite) NumInstrumentedWrites++; @@ -928,7 +942,7 @@ } else { Value *LastByte = IRB.CreateIntToPtr( IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), - OrigPtrTy); + Addr->getType()); instrumentAddress(I, I, Addr, 8, IsWrite, Size, false); instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false); } @@ -1460,6 +1474,7 @@ int NumAllocas = 0; bool IsWrite; unsigned Alignment; + uint64_t TypeSize; // Fill the set of memory operations to instrument. for (auto &BB : F) { @@ -1469,7 +1484,8 @@ for (auto &Inst : BB) { if (LooksLikeCodeInBug11395(&Inst)) return false; if (Value *Addr = - isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) { + isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, + &Alignment)) { if (ClOpt && ClOptSameTemp) { if (!TempsToInstrument.insert(Addr).second) continue; // We've seen this temp in the current BB. @@ -1504,13 +1520,18 @@ ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold) UseCalls = true; + const TargetLibraryInfo *TLI = &getAnalysis(). + getTLI(); + ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), + /*RoundToAlign=*/ true); + // Instrument. int NumInstrumented = 0; for (auto Inst : ToInstrument) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { - if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment)) - instrumentMop(Inst, UseCalls); + if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment)) + instrumentMop(ObjSizeVis, Inst, UseCalls); else instrumentMemIntrinsic(cast(Inst)); } @@ -2041,3 +2062,21 @@ AI->eraseFromParent(); NumInstrumentedDynamicAllocas++; } + +// isInboundsAccess returns true if Addr is always inbounds with respect to its +// base object. For example, it is a field access or an array access with +// constant inbounds index. +bool AddressSanitizer::isInboundsAccess(ObjectSizeOffsetVisitor &ObjSizeVis, + Value *Addr, uint64_t TypeSize) const { + SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); + if (!ObjSizeVis.bothKnown(SizeOffset)) + return false; + int64_t Size = SizeOffset.first.getSExtValue(); + int64_t Offset = SizeOffset.second.getSExtValue(); + // Three checks are required to ensure safety: + // . Offset >= 0 (since the offset is given from the base ptr) + // . Size >= Offset (unsigned) + // . Size - Offset >= NeededSize (unsigned) + return Offset >= 0 && Size >= Offset && + uint64_t(Size - Offset) >= TypeSize / 8; +} Index: test/Instrumentation/AddressSanitizer/instrument-stack.ll =================================================================== --- test/Instrumentation/AddressSanitizer/instrument-stack.ll +++ test/Instrumentation/AddressSanitizer/instrument-stack.ll @@ -0,0 +1,48 @@ +; This test checks that we are not instrumenting direct inbound stack accesses. +; RUN: opt < %s -asan -asan-module -asan-opt-stack -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +;@sink = global i32* null, align 4 + +; Ignore direct inbounds stack access. +define void @foo() uwtable sanitize_address { +entry: + %a = alloca i32, align 4 + store i32 42, i32* %a, align 4 + ret void +; CHECK-LABEL: define void @foo +; CHECK-NOT: __asan_report +; CHECK: ret void +} + +; Don't ignore dynamic indexing. +define void @baz(i64 %i) sanitize_address { +entry: + %a = alloca [10 x i32], align 4 + %e = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i64 %i + store i32 42, i32* %e, align 4 + ret void +; CHECK-LABEL: define void @baz +; CHECK: __asan_report +; CHECK: ret void +} + +define void @bar() sanitize_address { +entry: + %a = alloca [10 x i32], align 4 + %e = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i64 12 + store i32 42, i32* %e, align 4 + ret void +; CHECK-LABEL: define void @bar +; CHECK: __asan_report +; CHECK: ret void +} + +define void @endoftests() sanitize_address { +entry: + ret void +; CHECK-LABEL: define void @endoftests +} +