diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -98,7 +98,7 @@ static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; static const uint64_t kDynamicShadowSentinel = std::numeric_limits::max(); -static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. +static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; @@ -120,8 +120,8 @@ // The shadow memory space is dynamically allocated. static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; -static const size_t kMinStackMallocSize = 1 << 6; // 64B -static const size_t kMaxStackMallocSize = 1 << 16; // 64K +static const size_t kMinStackMallocSize = 1 << 6; // 64B +static const size_t kMaxStackMallocSize = 1 << 16; // 64K static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; @@ -147,8 +147,7 @@ const char kAsanHandleNoReturnName[] = "__asan_handle_no_return"; static const int kMaxAsanStackMallocSizeClass = 10; const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_"; -const char kAsanStackMallocAlwaysNameTemplate[] = - "__asan_stack_malloc_always_"; +const char kAsanStackMallocAlwaysNameTemplate[] = "__asan_stack_malloc_always_"; const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_"; const char kAsanGenPrefix[] = "___asan_gen_"; const char kODRGenPrefix[] = "__odr_asan_gen_"; @@ -188,28 +187,30 @@ // Command-line flags. -static cl::opt ClEnableKasan( - "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), - cl::Hidden, cl::init(false)); +static cl::opt + ClEnableKasan("asan-kernel", + cl::desc("Enable KernelAddressSanitizer instrumentation"), + cl::Hidden, cl::init(false)); -static cl::opt ClRecover( - "asan-recover", - cl::desc("Enable recovery mode (continue-after-error)."), - cl::Hidden, cl::init(false)); +static cl::opt + ClRecover("asan-recover", + cl::desc("Enable recovery mode (continue-after-error)."), + cl::Hidden, cl::init(false)); static cl::opt ClInsertVersionCheck( "asan-guard-against-version-mismatch", - cl::desc("Guard against compiler/runtime version mismatch."), - cl::Hidden, cl::init(true)); + cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, + cl::init(true)); // This flag may need to be replaced with -f[no-]asan-reads. static cl::opt ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); -static cl::opt ClInstrumentWrites( - "asan-instrument-writes", cl::desc("instrument write instructions"), - cl::Hidden, cl::init(true)); +static cl::opt + ClInstrumentWrites("asan-instrument-writes", + cl::desc("instrument write instructions"), cl::Hidden, + cl::init(true)); static cl::opt ClInstrumentAtomics( "asan-instrument-atomics", @@ -278,8 +279,8 @@ static cl::opt ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " - "required)"), cl::Hidden, - cl::init(true)); + "required)"), + cl::Hidden, cl::init(true)); static cl::opt ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), @@ -316,26 +317,25 @@ static cl::opt ClInstrumentationWithCallsThreshold( "asan-instrumentation-with-call-threshold", - cl::desc( - "If the function being instrumented contains more than " - "this number of memory accesses, use callbacks instead of " - "inline checks (-1 means never use callbacks)."), + cl::desc("If the function being instrumented contains more than " + "this number of memory accesses, use callbacks instead of " + "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000)); -static cl::opt ClMemoryAccessCallbackPrefix( - "asan-memory-access-callback-prefix", - cl::desc("Prefix for memory access callbacks"), cl::Hidden, - cl::init("__asan_")); +static cl::opt + ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", + cl::desc("Prefix for memory access callbacks"), + cl::Hidden, cl::init("__asan_")); static cl::opt ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true)); -static cl::opt ClSkipPromotableAllocas( - "asan-skip-promotable-allocas", - cl::desc("Do not instrument promotable allocas"), cl::Hidden, - cl::init(true)); +static cl::opt + ClSkipPromotableAllocas("asan-skip-promotable-allocas", + cl::desc("Do not instrument promotable allocas"), + cl::Hidden, cl::init(true)); // These flags allow to change the shadow mapping. // The shadow mapping looks like @@ -360,27 +360,29 @@ cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false)); -static cl::opt ClOptSameTemp( - "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), - cl::Hidden, cl::init(true)); +static cl::opt + ClOptSameTemp("asan-opt-same-temp", + cl::desc("Instrument the same temp just once"), cl::Hidden, + cl::init(true)); static cl::opt ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); -static cl::opt ClOptStack( - "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), - cl::Hidden, cl::init(false)); +static cl::opt + ClOptStack("asan-opt-stack", + cl::desc("Don't instrument scalar stack variables"), cl::Hidden, + cl::init(false)); static cl::opt ClDynamicAllocaStack( "asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true)); -static cl::opt ClForceExperiment( - "asan-force-experiment", - cl::desc("Force optimization experiment (for testing)"), cl::Hidden, - cl::init(0)); +static cl::opt + ClForceExperiment("asan-force-experiment", + cl::desc("Force optimization experiment (for testing)"), + cl::Hidden, cl::init(0)); static cl::opt ClUsePrivateAlias("asan-use-private-alias", @@ -501,7 +503,7 @@ Mapping.Offset = kEmscriptenShadowOffset; else Mapping.Offset = kDefaultShadowOffset32; - } else { // LongSize == 64 + } else { // LongSize == 64 // Fuchsia is always PIE, which means that the beginning of the address // space is always available. if (IsFuchsia) @@ -678,8 +680,7 @@ ArraySize = CI->getZExtValue(); } Type *Ty = AI.getAllocatedType(); - uint64_t SizeInBytes = - AI.getModule()->getDataLayout().getTypeAllocSize(Ty); + uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty); return SizeInBytes * ArraySize; } @@ -1016,9 +1017,11 @@ copyArgsPassedByValToAllocas(); // Collect alloca, ret, lifetime instructions etc. - for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); + for (BasicBlock *BB : depth_first(&F.getEntryBlock())) + visit(*BB); - if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; + if (AllocaVec.empty() && DynamicAllocaVec.empty()) + return false; initializeCallbacks(*F.getParent()); @@ -1066,7 +1069,9 @@ void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } /// Collect all CatchReturnInst instructions. - void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } + void visitCleanupReturnInst(CleanupReturnInst &CRI) { + RetVec.push_back(&CRI); + } void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, Value *SavedStack) { @@ -1138,8 +1143,10 @@ /// errors. void visitIntrinsicInst(IntrinsicInst &II) { Intrinsic::ID ID = II.getIntrinsicID(); - if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); - if (ID == Intrinsic::localescape) LocalEscapeCall = &II; + if (ID == Intrinsic::stackrestore) + StackRestoreVec.push_back(&II); + if (ID == Intrinsic::localescape) + LocalEscapeCall = &II; if (!ASan.UseAfterScope) return; if (!II.isLifetimeStartOrEnd()) @@ -1147,7 +1154,8 @@ // Found lifetime intrinsic, add ASan instrumentation if necessary. auto *Size = cast(II.getArgOperand(0)); // If size argument is undefined, don't do anything. - if (Size->isMinusOne()) return; + if (Size->isMinusOne()) + return; // Check that size doesn't saturate uint64_t and can // be stored in IntptrTy. const uint64_t SizeValue = Size->getValue().getLimitedValue(); @@ -1241,8 +1249,7 @@ E.Name = Name->getString(); ConstantInt *IsDynInit = mdconst::extract(MDN->getOperand(3)); E.IsDynInit |= IsDynInit->isOne(); - ConstantInt *IsExcluded = - mdconst::extract(MDN->getOperand(4)); + ConstantInt *IsExcluded = mdconst::extract(MDN->getOperand(4)); E.IsExcluded |= IsExcluded->isOne(); } } @@ -1404,7 +1411,8 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); - if (Mapping.Offset == 0) return Shadow; + if (Mapping.Offset == 0) + return Shadow; // (Shadow >> scale) | offset Value *ShadowBase; if (LocalDynamicShadow) @@ -1843,7 +1851,7 @@ CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); } else { BasicBlock *CrashBlock = - BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); + BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); CrashTerm = new UnreachableInst(*C, CrashBlock); BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); ReplaceInstWithInst(CheckTerm, NewTerm); @@ -1910,12 +1918,14 @@ return; for (Use &OP : CA->operands()) { - if (isa(OP)) continue; + if (isa(OP)) + continue; ConstantStruct *CS = cast(OP); // Must have a function or null ptr. if (Function *F = dyn_cast(CS->getOperand(1))) { - if (F->getName() == kAsanModuleCtorName) continue; + if (F->getName() == kAsanModuleCtorName) + continue; auto *Priority = cast(CS->getOperand(0)); // Don't instrument CTORs that will run before asan.module_ctor. if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) @@ -1949,20 +1959,26 @@ // FIXME: Metadata should be attched directly to the global directly instead // of being added to llvm.asan.globals. - if (GlobalsMD.get(G).IsExcluded) return false; - if (!Ty->isSized()) return false; - if (!G->hasInitializer()) return false; + if (GlobalsMD.get(G).IsExcluded) + return false; + if (!Ty->isSized()) + return false; + if (!G->hasInitializer()) + return false; // Globals in address space 1 and 4 are supported for AMDGPU. if (G->getAddressSpace() && !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G))) return false; - if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. + if (GlobalWasGeneratedByCompiler(G)) + return false; // Our own globals. // Two problems with thread-locals: // - The address of the main thread's copy can't be computed at link-time. // - Need to poison all copies, not just the main thread's one. - if (G->isThreadLocal()) return false; + if (G->isThreadLocal()) + return false; // For now, just ignore this Global if the alignment is large. - if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; + if (G->getAlignment() > getMinRedzoneSizeForGlobal()) + return false; // For non-COFF targets, only instrument globals known to be defined by this // TU. @@ -2001,9 +2017,12 @@ StringRef Section = G->getSection(); // Globals from llvm.metadata aren't emitted, do not instrument them. - if (Section == "llvm.metadata") return false; + if (Section == "llvm.metadata") + return false; // Do not instrument globals from special LLVM sections. - if (Section.find("__llvm") != StringRef::npos || Section.find("__LLVM") != StringRef::npos) return false; + if (Section.find("__llvm") != StringRef::npos || + Section.find("__LLVM") != StringRef::npos) + return false; // Do not instrument function pointers to initialization and termination // routines: dynamic linker will not properly handle redzones. @@ -2100,9 +2119,12 @@ StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { switch (TargetTriple.getObjectFormat()) { - case Triple::COFF: return ".ASAN$GL"; - case Triple::ELF: return "asan_globals"; - case Triple::MachO: return "__DATA,__asan_globals,regular"; + case Triple::COFF: + return ".ASAN$GL"; + case Triple::ELF: + return "asan_globals"; + case Triple::MachO: + return "__DATA,__asan_globals,regular"; case Triple::Wasm: case Triple::GOFF: case Triple::XCOFF: @@ -2189,9 +2211,10 @@ auto Linkage = TargetTriple.isOSBinFormatMachO() ? GlobalVariable::InternalLinkage : GlobalVariable::PrivateLinkage; - GlobalVariable *Metadata = new GlobalVariable( - M, Initializer->getType(), false, Linkage, Initializer, - Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); + GlobalVariable *Metadata = + new GlobalVariable(M, Initializer->getType(), false, Linkage, Initializer, + Twine("__asan_global_") + + GlobalValue::dropLLVMManglingEscape(OriginalName)); Metadata->setSection(getGlobalMetadataSection()); return Metadata; } @@ -2422,6 +2445,31 @@ for (auto &G : M.globals()) { if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G); + StringRef off_ent_str = G.getSection(); + const StringRef sectionName = "omp_offloading_entries"; + if (off_ent_str.compare(sectionName) == 0 && + (G.getOperand(0)->getType()->isStructTy())) { + StringRef GName = G.getOperand(0)->getType()->getStructName(); + const APInt &UI = G.getInitializer() + ->getAggregateElement(unsigned(2)) + ->getUniqueInteger(); + const uint64_t SizeInBytes = UI.getSExtValue(); + if (SizeInBytes != 0) { + const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); + Constant *val = ConstantInt::get(IRB.getInt64Ty(), + SizeInBytes + RightRedzoneSize, true); + StructType *sttype = StructType::getTypeByName(M.getContext(), GName); + SmallVector indices(5); + indices[0] = G.getInitializer()->getAggregateElement(unsigned(0)); + indices[1] = G.getInitializer()->getAggregateElement(unsigned(1)); + indices[2] = val; + indices[3] = G.getInitializer()->getAggregateElement(unsigned(3)); + indices[4] = G.getInitializer()->getAggregateElement(unsigned(4)); + Constant *NewInit = ConstantStruct::get(sttype, indices); + NewInit->takeName(G.getInitializer()); + G.setInitializer(NewInit); + } + } } size_t n = GlobalsToChange.size(); @@ -2525,9 +2573,9 @@ Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); GlobalValue *InstrumentedGlobal = NewGlobal; - bool CanUsePrivateAliases = - TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || - TargetTriple.isOSBinFormatWasm(); + bool CanUsePrivateAliases = TargetTriple.isOSBinFormatELF() || + TargetTriple.isOSBinFormatMachO() || + TargetTriple.isOSBinFormatWasm(); if (CanUsePrivateAliases && UsePrivateAlias) { // Create local alias for NewGlobal to avoid crash on ODR between // instrumented and non-instrumented libraries. @@ -2565,7 +2613,8 @@ ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); - if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; + if (ClInitializers && MD.IsDynInit) + HasDynamicallyInitializedGlobals = true; LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); @@ -2577,7 +2626,8 @@ SmallVector GlobalsToAddToUsedList; for (size_t i = 0; i < n; i++) { GlobalVariable *G = NewGlobals[i]; - if (G->getName().empty()) continue; + if (G->getName().empty()) + continue; GlobalsToAddToUsedList.push_back(G); } appendToCompilerUsed(M, ArrayRef(GlobalsToAddToUsedList)); @@ -2811,7 +2861,8 @@ // Try to get the declaration of llvm.localescape. If it's not in the module, // we can exit early. - if (!F.getParent()->getFunction("llvm.localescape")) return; + if (!F.getParent()->getFunction("llvm.localescape")) + return; // Look for a call to llvm.localescape call in the entry block. It can't be in // any other block. @@ -2840,9 +2891,12 @@ bool AddressSanitizer::instrumentFunction(Function &F, const TargetLibraryInfo *TLI) { - if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; - if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; - if (F.getName().startswith("__asan_")) return false; + if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) + return false; + if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) + return false; + if (F.getName().startswith("__asan_")) + return false; bool FunctionModified = false; @@ -2853,7 +2907,8 @@ FunctionModified = true; // Leave if the function doesn't need instrumentation. - if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; + if (!F.hasFnAttribute(Attribute::SanitizeAddress)) + return FunctionModified; LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); @@ -2883,7 +2938,8 @@ TempsToInstrument.clear(); int NumInsnsPerBB = 0; for (auto &Inst : BB) { - if (LooksLikeCodeInBug11395(&Inst)) return false; + if (LooksLikeCodeInBug11395(&Inst)) + return false; SmallVector InterestingOperands; getInterestingMemoryOperands(&Inst, InterestingOperands); @@ -2915,7 +2971,8 @@ IntrinToInstrument.push_back(MI); NumInsnsPerBB++; } else { - if (isa(Inst)) NumAllocas++; + if (isa(Inst)) + NumAllocas++; if (auto *CB = dyn_cast(&Inst)) { // A call inside BB. TempsToInstrument.clear(); @@ -2925,7 +2982,8 @@ if (CallInst *CI = dyn_cast(&Inst)) maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); } - if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; + if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) + break; } } @@ -2979,9 +3037,11 @@ // with large assembly blobs (32-bit only), otherwise reg alloc may crash. // FIXME: remove once the bug 11395 is fixed. bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { - if (LongSize != 32) return false; + if (LongSize != 32) + return false; CallInst *CI = dyn_cast(I); - if (!CI || !CI->isInlineAsm()) return false; + if (!CI || !CI->isInlineAsm()) + return false; if (CI->arg_size() <= 5) return false; // We have inline assembly with quite a few arguments. @@ -3122,7 +3182,8 @@ assert(LocalStackSize <= kMaxStackMallocSize); uint64_t MaxSize = kMinStackMallocSize; for (int i = 0;; i++, MaxSize *= 2) - if (LocalStackSize <= MaxSize) return i; + if (LocalStackSize <= MaxSize) + return i; llvm_unreachable("impossible LocalStackSize"); } @@ -3179,7 +3240,8 @@ assert(Alloca->isStaticAlloca()); } assert((ClRealignStack & (ClRealignStack - 1)) == 0); - uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack)); + uint64_t FrameAlignment = + std::max(L.FrameAlignment, uint64_t(ClRealignStack)); Alloca->setAlignment(Align(FrameAlignment)); return IRB.CreatePointerCast(Alloca, IntptrTy); } @@ -3306,7 +3368,8 @@ ArgInitInst->moveBefore(InsBefore); // If we have a call to llvm.localescape, keep it in the entry block. - if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); + if (LocalEscapeCall) + LocalEscapeCall->moveBefore(InsBefore); SmallVector SVD; SVD.reserve(AllocaVec.size()); @@ -3554,7 +3617,8 @@ } // We are done. Remove the old unused alloca instructions. - for (auto AI : AllocaVec) AI->eraseFromParent(); + for (auto AI : AllocaVec) + AI->eraseFromParent(); } void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, @@ -3562,9 +3626,9 @@ // For now just insert the call to ASan runtime. Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); Value *SizeArg = ConstantInt::get(IntptrTy, Size); - IRB.CreateCall( - DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, - {AddrArg, SizeArg}); + IRB.CreateCall(DoPoison ? AsanPoisonStackMemoryFunc + : AsanUnpoisonStackMemoryFunc, + {AddrArg, SizeArg}); } // Handling llvm.lifetime intrinsics for a given %alloca: @@ -3643,7 +3707,8 @@ bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, uint64_t TypeSize) const { SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); - if (!ObjSizeVis.bothKnown(SizeOffset)) return false; + if (!ObjSizeVis.bothKnown(SizeOffset)) + return false; uint64_t Size = SizeOffset.first.getZExtValue(); int64_t Offset = SizeOffset.second.getSExtValue(); // Three checks are required to ensure safety: