diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -685,6 +685,7 @@ ATTR_KIND_ALLOC_ALIGN = 80, ATTR_KIND_ALLOCATED_POINTER = 81, ATTR_KIND_ALLOC_KIND = 82, + ATTR_KIND_SANITIZE_TYPE = 83, }; enum ComdatSelectionKindCodes { diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td --- a/llvm/include/llvm/IR/Attributes.td +++ b/llvm/include/llvm/IR/Attributes.td @@ -258,6 +258,9 @@ /// ThreadSanitizer is on. def SanitizeThread : EnumAttr<"sanitize_thread", [FnAttr]>; +/// TypeSanitizer is on. +def SanitizeType : EnumAttr<"sanitize_type", [FnAttr]>; + /// MemorySanitizer is on. def SanitizeMemory : EnumAttr<"sanitize_memory", [FnAttr]>; @@ -330,6 +333,7 @@ def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; +def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; diff --git a/llvm/include/llvm/Transforms/Instrumentation/TypeSanitizer.h b/llvm/include/llvm/Transforms/Instrumentation/TypeSanitizer.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/Transforms/Instrumentation/TypeSanitizer.h @@ -0,0 +1,41 @@ +//===- Transforms/Instrumentation/TypeSanitizer.h - TySan Pass -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the type sanitizer pass. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_TYPESANITIZER_H +#define LLVM_TRANSFORMS_INSTRUMENTATION_TYPESANITIZER_H + +#include "llvm/IR/PassManager.h" + +namespace llvm { +class Function; +class FunctionPass; +class Module; +// Insert ThreadSanitizer (race detection) instrumentation +// FunctionPass *createThreadSanitizerLegacyPassPass(); + +/// A function pass for tysan instrumentation. +/// +struct TypeSanitizerPass : public PassInfoMixin { + PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM); + static bool isRequired() { return true; } +}; + +/// A module pass for tysan instrumentation. +/// +/// Create ctor and init functions. +struct ModuleTypeSanitizerPass : public PassInfoMixin { + PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); + static bool isRequired() { return true; } +}; + +} // namespace llvm +#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_TYPEDSANITIZER_H */ diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp --- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp +++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -367,10 +367,27 @@ return isa(MD->getOperand(0)) && MD->getNumOperands() >= 3; } +// When using the TypeSanitizer, don't use TBAA information for alias analysis. +// This might cause us to remove memory accesses that we need to verify at +// runtime. +static bool usingSanitizeType(const Value *V) { + const Function *F; + + if (auto *I = dyn_cast(V)) + F = I->getParent()->getParent(); + else if (auto *A = dyn_cast(V)) + F = A->getParent(); + else + return false; + + return F->hasFnAttribute(Attribute::SanitizeType); +} + AliasResult TypeBasedAAResult::alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI) { - if (!EnableTBAA) + if (!EnableTBAA || usingSanitizeType(LocA.Ptr) || usingSanitizeType(LocB.Ptr)) + return AAResultBase::alias(LocA, LocB, AAQI); // If accesses may alias, chain to the next AliasAnalysis. @@ -425,7 +442,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI) { - if (!EnableTBAA) + if (!EnableTBAA || usingSanitizeType(Call)) return AAResultBase::getModRefInfo(Call, Loc, AAQI); if (const MDNode *L = Loc.AATags.TBAA) @@ -439,7 +456,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call1, const CallBase *Call2, AAQueryInfo &AAQI) { - if (!EnableTBAA) + if (!EnableTBAA || usingSanitizeType(Call1)) return AAResultBase::getModRefInfo(Call1, Call2, AAQI); if (const MDNode *M1 = Call1->getMetadata(LLVMContext::MD_tbaa)) diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1600,6 +1600,8 @@ return Attribute::SanitizeHWAddress; case bitc::ATTR_KIND_SANITIZE_THREAD: return Attribute::SanitizeThread; + case bitc::ATTR_KIND_SANITIZE_TYPE: + return Attribute::SanitizeType; case bitc::ATTR_KIND_SANITIZE_MEMORY: return Attribute::SanitizeMemory; case bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING: diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -742,6 +742,8 @@ return bitc::ATTR_KIND_SANITIZE_HWADDRESS; case Attribute::SanitizeThread: return bitc::ATTR_KIND_SANITIZE_THREAD; + case Attribute::SanitizeType: + return bitc::ATTR_KIND_SANITIZE_TYPE; case Attribute::SanitizeMemory: return bitc::ATTR_KIND_SANITIZE_MEMORY; case Attribute::SpeculativeLoadHardening: diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp --- a/llvm/lib/CodeGen/ShrinkWrap.cpp +++ b/llvm/lib/CodeGen/ShrinkWrap.cpp @@ -606,6 +606,7 @@ !(MF.getFunction().hasFnAttribute(Attribute::SanitizeAddress) || MF.getFunction().hasFnAttribute(Attribute::SanitizeThread) || MF.getFunction().hasFnAttribute(Attribute::SanitizeMemory) || + MF.getFunction().hasFnAttribute(Attribute::SanitizeType) || MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress)); // If EnableShrinkWrap is set, it takes precedence on whatever the // target sets. The rational is that we assume we want to test diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -140,6 +140,7 @@ #include "llvm/Transforms/Instrumentation/PoisonChecking.h" #include "llvm/Transforms/Instrumentation/SanitizerCoverage.h" #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h" +#include "llvm/Transforms/Instrumentation/TypeSanitizer.h" #include "llvm/Transforms/ObjCARC.h" #include "llvm/Transforms/Scalar/ADCE.h" #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -122,6 +122,7 @@ MODULE_PASS("msan-module", ModuleMemorySanitizerPass({})) MODULE_PASS("module-inline", ModuleInlinerPass()) MODULE_PASS("tsan-module", ModuleThreadSanitizerPass()) +MODULE_PASS("tysan-module", ModuleTypeSanitizerPass()) MODULE_PASS("sancov-module", ModuleSanitizerCoveragePass()) MODULE_PASS("memprof-module", ModuleMemProfilerPass()) MODULE_PASS("poison-checking", PoisonCheckingPass()) @@ -381,6 +382,7 @@ FUNCTION_PASS("tlshoist", TLSVariableHoistPass()) FUNCTION_PASS("transform-warning", WarnMissedTransformationsPass()) FUNCTION_PASS("tsan", ThreadSanitizerPass()) +FUNCTION_PASS("tysan", TypeSanitizerPass()) FUNCTION_PASS("memprof", MemProfilerPass()) #undef FUNCTION_PASS diff --git a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt --- a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt +++ b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt @@ -17,6 +17,7 @@ SanitizerCoverage.cpp ValueProfileCollector.cpp ThreadSanitizer.cpp + TypeSanitizer.cpp HWAddressSanitizer.cpp ADDITIONAL_HEADER_DIRS diff --git a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp @@ -0,0 +1,847 @@ +//===----- TypeSanitizer.cpp - type-based-aliasing-violation detector -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of TypeSanitizer, a type-based-aliasing-violation +// detector. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Instrumentation/TypeSanitizer.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/MemoryLocation.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/MDBuilder.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MD5.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/Regex.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/ModuleUtils.h" + +#include + +using namespace llvm; + +#define DEBUG_TYPE "tysan" + +static const char *const kTysanModuleCtorName = "tysan.module_ctor"; +static const char *const kTysanInitName = "__tysan_init"; +static const char *const kTysanCheckName = "__tysan_check"; +static const char *const kTysanGVNamePrefix = "__tysan_v1_"; + +static const char *const kTysanShadowMemoryAddress = + "__tysan_shadow_memory_address"; +static const char *const kTysanAppMemMask = "__tysan_app_memory_mask"; + +static cl::opt + ClWritesAlwaysSetType("tysan-writes-always-set-type", + cl::desc("Writes always set the type"), cl::Hidden, + cl::init(false)); + +STATISTIC(NumInstrumentedAccesses, "Number of instrumented accesses"); + +static Regex AnonNameRegex("^_ZTS.*N[1-9][0-9]*_GLOBAL__N"); + +namespace { + +/// TypeSanitizer: instrument the code in module to find races. +struct TypeSanitizer { + TypeSanitizer(Module &M); + bool run(Function &F, const TargetLibraryInfo &TLI); + void instrumentGlobals(); + +private: + typedef SmallDenseMap + TypeDescriptorsMapTy; + typedef SmallDenseMap TypeNameMapTy; + + void initializeCallbacks(Module &M); + + Value *getShadowBase(Function &F); + Value *getAppMemMask(Function &F); + + bool instrumentWithShadowUpdate(IRBuilder<> &IRB, const MDNode *TBAAMD, + Value *Ptr, uint64_t AccessSize, bool IsRead, + bool IsWrite, Value *&ShadowBase, + Value *&AppMemMask, bool ForceSetType, + bool SanitizeFunction, + TypeDescriptorsMapTy &TypeDescriptors, + const DataLayout &DL); + bool instrumentMemoryAccess(Instruction *I, MemoryLocation &MLoc, + Value *&ShadowBase, Value *&AppMemMask, + bool SanitizeFunction, + TypeDescriptorsMapTy &TypeDescriptors, + const DataLayout &DL); + bool instrumentMemInst(Value *I, Value *&ShadowBase, Value *&AppMemMask, + const DataLayout &DL); + + std::string getAnonymousStructIdentifier(const MDNode *MD, + TypeNameMapTy &TypeNames); + bool generateTypeDescriptor(const MDNode *MD, + TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, Module &M); + bool generateBaseTypeDescriptor(const MDNode *MD, + TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, Module &M); + + const Triple TargetTriple; + Type *IntptrTy; + uint64_t PtrShift; + IntegerType *OrdTy; + + // Callbacks to run-time library are computed in doInitialization. + Function *TysanCheck; + Function *TysanCtorFunction; + Function *TysanGlobalsSetTypeFunction; +}; +} // namespace + +TypeSanitizer::TypeSanitizer(Module &M) + : TargetTriple(Triple(M.getTargetTriple())) { + const DataLayout &DL = M.getDataLayout(); + IntptrTy = DL.getIntPtrType(M.getContext()); + PtrShift = countTrailingZeros(IntptrTy->getPrimitiveSizeInBits() / 8); + + initializeCallbacks(M); +} + +void TypeSanitizer::initializeCallbacks(Module &M) { + IRBuilder<> IRB(M.getContext()); + OrdTy = IRB.getInt32Ty(); + + AttributeList Attr; + Attr = Attr.addFnAttribute(M.getContext(), Attribute::NoUnwind); + // Initialize the callbacks. + TysanCheck = cast( + M.getOrInsertFunction(kTysanCheckName, Attr, IRB.getVoidTy(), + IRB.getInt8PtrTy(), // Pointer to data to be read. + OrdTy, // Size of the data in bytes. + IRB.getInt8PtrTy(), // Pointer to type descriptor. + OrdTy // Flags. + ) + .getCallee()); + + TysanCtorFunction = cast( + M.getOrInsertFunction(kTysanModuleCtorName, Attr, IRB.getVoidTy()) + .getCallee()); +} + +void TypeSanitizer::instrumentGlobals() { + Module &M = *TysanCtorFunction->getParent(); + initializeCallbacks(M); + TysanGlobalsSetTypeFunction = nullptr; + + NamedMDNode *Globals = M.getNamedMetadata("llvm.tysan.globals"); + if (!Globals) + return; + + const DataLayout &DL = M.getDataLayout(); + Value *ShadowBase = nullptr, *AppMemMask = nullptr; + TypeDescriptorsMapTy TypeDescriptors; + TypeNameMapTy TypeNames; + + for (const auto &GMD : Globals->operands()) { + auto *GV = mdconst::extract_or_null(GMD->getOperand(0)); + if (!GV) + continue; + const MDNode *TBAAMD = cast(GMD->getOperand(1)); + if (!generateBaseTypeDescriptor(TBAAMD, TypeDescriptors, TypeNames, M)) + continue; + + if (!TysanGlobalsSetTypeFunction) { + TysanGlobalsSetTypeFunction = Function::Create( + FunctionType::get(Type::getVoidTy(M.getContext()), false), + GlobalValue::InternalLinkage, "__tysan_set_globals_types", &M); + BasicBlock *BB = + BasicBlock::Create(M.getContext(), "", TysanGlobalsSetTypeFunction); + ReturnInst::Create(M.getContext(), BB); + } + + IRBuilder<> IRB( + TysanGlobalsSetTypeFunction->getEntryBlock().getTerminator()); + Type *AccessTy = GV->getValueType(); + assert(AccessTy->isSized()); + uint64_t AccessSize = DL.getTypeStoreSize(AccessTy); + instrumentWithShadowUpdate(IRB, TBAAMD, GV, AccessSize, false, false, + ShadowBase, AppMemMask, true, false, + TypeDescriptors, DL); + } + + if (TysanGlobalsSetTypeFunction) { + IRBuilder<> IRB(TysanCtorFunction->getEntryBlock().getTerminator()); + IRB.CreateCall(TysanGlobalsSetTypeFunction, {}); + } +} + +static void insertModuleCtor(Module &M) { + Function *TysanCtorFunction; + std::tie(TysanCtorFunction, std::ignore) = + createSanitizerCtorAndInitFunctions(M, kTysanModuleCtorName, + kTysanInitName, /*InitArgTypes=*/{}, + /*InitArgs=*/{}); + + TypeSanitizer TySan(M); + TySan.instrumentGlobals(); + appendToGlobalCtors(M, TysanCtorFunction, 0); +} + +static std::string encodeName(StringRef Name) { + static const char LUT[] = "0123456789abcdef"; + size_t Length = Name.size(); + + std::string Output = kTysanGVNamePrefix; + Output.reserve(Output.size() + 3 * Length); + for (size_t i = 0; i < Length; ++i) { + const unsigned char c = Name[i]; + if (isalnum((int)c)) { + Output.push_back(c); + continue; + } + + if (c == '_') { + Output.append("__"); + continue; + } + + Output.push_back('_'); + Output.push_back(LUT[c >> 4]); + Output.push_back(LUT[c & 15]); + } + + return Output; +} + +static bool isAnonymousNamespaceName(StringRef Name) { + // Types that are in an anonymous namespace are local to this module. + // FIXME: This should really be marked by the frontend in the metadata + // instead of having us guess this from the mangled name. Moreover, the regex + // here can pick up (unlikely) names in the non-reserved namespace (because + // it needs to search into the type to pick up cases where the type in the + // anonymous namespace is a template parameter, etc.). + return AnonNameRegex.match(Name); +} + +std::string +TypeSanitizer::getAnonymousStructIdentifier(const MDNode *MD, + TypeNameMapTy &TypeNames) { + MD5 Hash; + + for (int i = 1, e = MD->getNumOperands(); i < e; i += 2) { + const MDNode *MemberNode = dyn_cast(MD->getOperand(i)); + if (!MemberNode) + return ""; + + auto TNI = TypeNames.find(MemberNode); + std::string MemberName; + if (TNI != TypeNames.end()) { + MemberName = TNI->second; + } else { + if (MemberNode->getNumOperands() < 1) + return ""; + MDString *MemberNameNode = dyn_cast(MemberNode->getOperand(0)); + if (!MemberNameNode) + return ""; + MemberName = MemberNameNode->getString().str(); + if (MemberName.empty()) + MemberName = getAnonymousStructIdentifier(MemberNode, TypeNames); + if (MemberName.empty()) + return ""; + TypeNames[MemberNode] = MemberName; + } + + Hash.update(MemberName); + Hash.update("\0"); + + uint64_t Offset = + mdconst::extract(MD->getOperand(i + 1))->getZExtValue(); + Hash.update(utostr(Offset)); + Hash.update("\0"); + } + + MD5::MD5Result HashResult; + Hash.final(HashResult); + return "__anonymous_" + std::string(HashResult.digest().str()); +} + +bool TypeSanitizer::generateBaseTypeDescriptor( + const MDNode *MD, TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, Module &M) { + if (MD->getNumOperands() < 1) + return false; + + MDString *NameNode = dyn_cast(MD->getOperand(0)); + if (!NameNode) + return false; + + std::string Name = NameNode->getString().str(); + if (Name.empty()) + Name = getAnonymousStructIdentifier(MD, TypeNames); + if (Name.empty()) + return false; + TypeNames[MD] = Name; + std::string EncodedName = encodeName(Name); + + GlobalVariable *GV = + dyn_cast_or_null(M.getNamedValue(EncodedName)); + if (GV) { + TypeDescriptors[MD] = GV; + return true; + } + + SmallVector> Members; + for (int i = 1, e = MD->getNumOperands(); i < e; i += 2) { + const MDNode *MemberNode = dyn_cast(MD->getOperand(i)); + if (!MemberNode) + return false; + + Constant *Member; + auto TDI = TypeDescriptors.find(MemberNode); + if (TDI != TypeDescriptors.end()) { + Member = TDI->second; + } else { + if (!generateBaseTypeDescriptor(MemberNode, TypeDescriptors, TypeNames, + M)) + return false; + + Member = TypeDescriptors[MemberNode]; + } + + uint64_t Offset = + mdconst::extract(MD->getOperand(i + 1))->getZExtValue(); + + Members.push_back(std::make_pair(Member, Offset)); + } + + // The descriptor for a scalar is: + // [2, member count, [type pointer, offset]..., name] + + LLVMContext &C = MD->getContext(); + Constant *NameData = ConstantDataArray::getString(C, NameNode->getString()); + SmallVector TDSubTys; + SmallVector TDSubData; + + TDSubTys.push_back(IntptrTy); + TDSubData.push_back(ConstantInt::get(IntptrTy, 2)); + + TDSubTys.push_back(IntptrTy); + TDSubData.push_back(ConstantInt::get(IntptrTy, Members.size())); + + bool ShouldBeComdat = !isAnonymousNamespaceName(NameNode->getString()); + for (auto &Member : Members) { + TDSubTys.push_back(Member.first->getType()); + TDSubData.push_back(Member.first); + + if (!cast(Member.first)->hasComdat()) + ShouldBeComdat = false; + + TDSubTys.push_back(IntptrTy); + TDSubData.push_back(ConstantInt::get(IntptrTy, Member.second)); + } + + TDSubTys.push_back(NameData->getType()); + TDSubData.push_back(NameData); + + StructType *TDTy = StructType::get(C, TDSubTys); + Constant *TD = ConstantStruct::get(TDTy, TDSubData); + + GlobalVariable *TDGV = + new GlobalVariable(TDTy, true, + !ShouldBeComdat ? GlobalValue::InternalLinkage + : GlobalValue::LinkOnceODRLinkage, + TD, EncodedName); + M.getGlobalList().push_back(TDGV); + + if (ShouldBeComdat && TargetTriple.isOSBinFormatELF()) { + Comdat *TDComdat = M.getOrInsertComdat(EncodedName); + TDGV->setComdat(TDComdat); + } + appendToUsed(M, TDGV); + + TypeDescriptors[MD] = TDGV; + return true; +} + +bool TypeSanitizer::generateTypeDescriptor( + const MDNode *MD, TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, Module &M) { + // Here we need to generate a type descriptor corresponding to this TBAA + // metadata node. Under the current scheme there are three kinds of TBAA + // metadata nodes: scalar nodes, struct nodes, and struct tag nodes. + + if (MD->getNumOperands() < 3) + return false; + + const MDNode *BaseNode = dyn_cast(MD->getOperand(0)); + if (!BaseNode) + return false; + + // This is a struct tag (element-access) node. + + const MDNode *AccessNode = dyn_cast(MD->getOperand(1)); + if (!AccessNode) + return false; + + Constant *Base; + auto TDI = TypeDescriptors.find(BaseNode); + if (TDI != TypeDescriptors.end()) { + Base = TDI->second; + } else { + if (!generateBaseTypeDescriptor(BaseNode, TypeDescriptors, TypeNames, M)) + return false; + + Base = TypeDescriptors[BaseNode]; + } + + Constant *Access; + TDI = TypeDescriptors.find(AccessNode); + if (TDI != TypeDescriptors.end()) { + Access = TDI->second; + } else { + if (!generateBaseTypeDescriptor(AccessNode, TypeDescriptors, TypeNames, M)) + return false; + + Access = TypeDescriptors[AccessNode]; + } + + uint64_t Offset = + mdconst::extract(MD->getOperand(2))->getZExtValue(); + std::string EncodedName = + std::string(Base->getName()) + "_o_" + utostr(Offset); + + GlobalVariable *GV = + dyn_cast_or_null(M.getNamedValue(EncodedName)); + if (GV) { + TypeDescriptors[MD] = GV; + return true; + } + + // The descriptor for a scalar is: + // [1, base-type pointer, access-type pointer, offset] + + StructType *TDTy = + StructType::get(IntptrTy, Base->getType(), Access->getType(), IntptrTy); + Constant *TD = + ConstantStruct::get(TDTy, ConstantInt::get(IntptrTy, 1), Base, Access, + ConstantInt::get(IntptrTy, Offset)); + + bool ShouldBeComdat = cast(Base)->hasComdat(); + + GlobalVariable *TDGV = + new GlobalVariable(TDTy, true, + !ShouldBeComdat ? GlobalValue::InternalLinkage + : GlobalValue::LinkOnceODRLinkage, + TD, EncodedName); + M.getGlobalList().push_back(TDGV); + + if (ShouldBeComdat) { + Comdat *TDComdat = M.getOrInsertComdat(EncodedName); + TDGV->setComdat(TDComdat); + } + + TypeDescriptors[MD] = TDGV; + return true; +} + +Value *TypeSanitizer::getShadowBase(Function &F) { + IRBuilder<> IRB(&F.front().front()); + Constant *GlobalShadowAddress = + F.getParent()->getOrInsertGlobal(kTysanShadowMemoryAddress, IntptrTy); + return IRB.CreateLoad(IntptrTy, GlobalShadowAddress); +} + +Value *TypeSanitizer::getAppMemMask(Function &F) { + IRBuilder<> IRB(&F.front().front()); + Value *GlobalAppMemMask = + F.getParent()->getOrInsertGlobal(kTysanAppMemMask, IntptrTy); + return IRB.CreateLoad(IntptrTy, GlobalAppMemMask); +} + +bool TypeSanitizer::run(Function &F, const TargetLibraryInfo &TLI) { + // This is required to prevent instrumenting call to __tysan_init from within + // the module constructor. + if (&F == TysanCtorFunction || &F == TysanGlobalsSetTypeFunction) + return false; + initializeCallbacks(*F.getParent()); + + SmallVector> MemoryAccesses; + SmallSetVector TBAAMetadata; + SmallVector MemTypeResetInsts; + + bool Res = false; + bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeType); + const DataLayout &DL = F.getParent()->getDataLayout(); + // Traverse all instructions, collect loads/stores/returns, check for calls. + for (auto &BB : F) { + for (auto &Inst : BB) { + // Skip memory accesses inserted by another instrumentation. + if (Inst.getMetadata("nosanitize")) + continue; + + if (isa(Inst) || isa(Inst) || + isa(Inst) || isa(Inst)) { + MemoryLocation MLoc = MemoryLocation::get(&Inst); + + // Swift errors are special (we can't introduce extra uses on them). + if (MLoc.Ptr->isSwiftError()) + continue; + + // Skip non-address-space-0 pointers; we don't know how to handle them. + Type *PtrTy = cast(MLoc.Ptr->getType()); + if (PtrTy->getPointerAddressSpace() != 0) + continue; + + if (MLoc.AATags.TBAA) + TBAAMetadata.insert(MLoc.AATags.TBAA); + MemoryAccesses.push_back(std::make_pair(&Inst, MLoc)); + } else if (isa(Inst) || isa(Inst)) { + if (CallInst *CI = dyn_cast(&Inst)) + maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI); + + if (isa(Inst)) { + MemTypeResetInsts.push_back(&Inst); + } else if (auto *II = dyn_cast(&Inst)) { + if (II->getIntrinsicID() == Intrinsic::lifetime_start || + II->getIntrinsicID() == Intrinsic::lifetime_end) + MemTypeResetInsts.push_back(&Inst); + } + } else if (isa(Inst)) { + MemTypeResetInsts.push_back(&Inst); + } + } + } + + // byval arguments also need their types reset (they're new stack memory, + // just like allocas). + for (auto &A : F.args()) + if (A.hasByValAttr()) + MemTypeResetInsts.push_back(&A); + + // We have collected all loads and stores, and know for what TBAA nodes we + // need to generate type descriptors. + + Module &M = *F.getParent(); + TypeDescriptorsMapTy TypeDescriptors; + TypeNameMapTy TypeNames; + for (const MDNode *MD : TBAAMetadata) { + if (TypeDescriptors.count(MD)) + continue; + + if (!generateTypeDescriptor(MD, TypeDescriptors, TypeNames, M)) + return Res; // Giving up. + + Res = true; + } + + Value *ShadowBase = nullptr, *AppMemMask = nullptr; + for (auto &MA : MemoryAccesses) + Res |= instrumentMemoryAccess(MA.first, MA.second, ShadowBase, AppMemMask, + SanitizeFunction, TypeDescriptors, DL); + + for (auto Inst : MemTypeResetInsts) + Res |= instrumentMemInst(Inst, ShadowBase, AppMemMask, DL); + + return Res; +} + +bool TypeSanitizer::instrumentWithShadowUpdate( + IRBuilder<> &IRB, const MDNode *TBAAMD, Value *Ptr, uint64_t AccessSize, + bool IsRead, bool IsWrite, Value *&ShadowBase, Value *&AppMemMask, + bool ForceSetType, bool SanitizeFunction, + TypeDescriptorsMapTy &TypeDescriptors, const DataLayout &DL) { + if (!ShadowBase) + ShadowBase = getShadowBase(*IRB.GetInsertBlock()->getParent()); + if (!AppMemMask) + AppMemMask = getAppMemMask(*IRB.GetInsertBlock()->getParent()); + + Constant *TDGV; + if (TBAAMD) + TDGV = TypeDescriptors[TBAAMD]; + else + TDGV = Constant::getNullValue(IRB.getInt8PtrTy()); + + Value *TD = IRB.CreateBitCast(TDGV, IRB.getInt8PtrTy()); + + Value *ShadowDataInt = IRB.CreateAdd( + IRB.CreateShl( + IRB.CreateAnd(IRB.CreatePtrToInt(Ptr, IntptrTy), AppMemMask), + PtrShift), + ShadowBase); + + Type *Int8PtrPtrTy = IRB.getInt8PtrTy()->getPointerTo(); + Value *ShadowData = IRB.CreateIntToPtr(ShadowDataInt, Int8PtrPtrTy); + + auto SetType = [&]() { + IRB.CreateStore(TD, ShadowData); + + // Now fill the remainder of the shadow memory corresponding to the + // remainder of the the bytes of the type with a bad type descriptor. + for (uint64_t i = 1; i < AccessSize; ++i) { + Value *BadShadowData = IRB.CreateIntToPtr( + IRB.CreateAdd(ShadowDataInt, + ConstantInt::get(IntptrTy, i << PtrShift)), + Int8PtrPtrTy); + + // This is the TD value, -i, which is used to indicate that the byte is + // i bytes after the first byte of the type. + Value *BadTD = IRB.CreateIntToPtr(ConstantInt::getSigned(IntptrTy, -i), + IRB.getInt8PtrTy()); + IRB.CreateStore(BadTD, BadShadowData); + } + }; + + if (!ForceSetType && (!ClWritesAlwaysSetType || IsRead)) { + // We need to check the type here. If the type is unknown, then the read + // sets the type. If the type is known, then it is checked. If the type + // doesn't match, then we call the runtime (which may yet determine that + // the mismatch is okay). + LLVMContext &C = IRB.getContext(); + MDNode *UnlikelyBW = MDBuilder(C).createBranchWeights(1, 100000); + + Constant *Flags = + ConstantInt::get(OrdTy, (int)IsRead | (((int)IsWrite) << 1)); + + Value *LoadedTD = IRB.CreateLoad(IRB.getInt8PtrTy(), ShadowData); + if (SanitizeFunction) { + Value *BadTDCmp = IRB.CreateICmpNE(LoadedTD, TD); + Instruction *BadTDTerm, *GoodTDTerm; + SplitBlockAndInsertIfThenElse(BadTDCmp, &*IRB.GetInsertPoint(), + &BadTDTerm, &GoodTDTerm, UnlikelyBW); + IRB.SetInsertPoint(BadTDTerm); + + // We now know that the types did not match (we're on the slow path). If + // the type is unknown, then set it. + Value *NullTDCmp = IRB.CreateIsNull(LoadedTD); + Instruction *NullTDTerm, *MismatchTerm; + SplitBlockAndInsertIfThenElse(NullTDCmp, &*IRB.GetInsertPoint(), + &NullTDTerm, &MismatchTerm); + + // If the type is unknown, then set the type. + IRB.SetInsertPoint(NullTDTerm); + + // We're about to set the type. Make sure that all bytes in the value are + // also of unknown type. + Value *Size = ConstantInt::get(OrdTy, AccessSize); + Value *NotAllUnkTD = IRB.getFalse(); + for (uint64_t i = 1; i < AccessSize; ++i) { + Value *UnkShadowData = IRB.CreateIntToPtr( + IRB.CreateAdd(ShadowDataInt, + ConstantInt::get(IntptrTy, i << PtrShift)), + Int8PtrPtrTy); + Value *ILdTD = IRB.CreateLoad(IRB.getInt8PtrTy(), UnkShadowData); + NotAllUnkTD = IRB.CreateOr(NotAllUnkTD, IRB.CreateIsNotNull(ILdTD)); + } + + Instruction *BeforeSetType = &*IRB.GetInsertPoint(); + Instruction *BadUTDTerm = SplitBlockAndInsertIfThen( + NotAllUnkTD, BeforeSetType, false, UnlikelyBW); + IRB.SetInsertPoint(BadUTDTerm); + IRB.CreateCall(TysanCheck, {IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()), + Size, (Value *)TD, (Value *)Flags}); + + IRB.SetInsertPoint(BeforeSetType); + SetType(); + + // We have a non-trivial mismatch. Call the runtime. + IRB.SetInsertPoint(MismatchTerm); + IRB.CreateCall(TysanCheck, {IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()), + Size, (Value *)TD, (Value *)Flags}); + + // We appear to have the right type. Make sure that all other bytes in + // the type are still marked as interior bytes. If not, call the runtime. + IRB.SetInsertPoint(GoodTDTerm); + Value *NotAllBadTD = IRB.getFalse(); + for (uint64_t i = 1; i < AccessSize; ++i) { + Value *BadShadowData = IRB.CreateIntToPtr( + IRB.CreateAdd(ShadowDataInt, + ConstantInt::get(IntptrTy, i << PtrShift)), + Int8PtrPtrTy); + Value *ILdTD = IRB.CreatePtrToInt( + IRB.CreateLoad(IRB.getInt8PtrTy(), BadShadowData), IntptrTy); + NotAllBadTD = IRB.CreateOr( + NotAllBadTD, + IRB.CreateICmpSGE(ILdTD, ConstantInt::get(IntptrTy, 0))); + } + + Instruction *BadITDTerm = SplitBlockAndInsertIfThen( + NotAllBadTD, &*IRB.GetInsertPoint(), false, UnlikelyBW); + IRB.SetInsertPoint(BadITDTerm); + IRB.CreateCall(TysanCheck, {IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()), + Size, (Value *)TD, (Value *)Flags}); + } else { + // If we're not sanitizing this function, then we only care whether we + // need to *set* the type. + Value *NullTDCmp = IRB.CreateIsNull(LoadedTD); + Instruction *NullTDTerm = SplitBlockAndInsertIfThen( + NullTDCmp, &*IRB.GetInsertPoint(), false, UnlikelyBW); + IRB.SetInsertPoint(NullTDTerm); + SetType(); + } + } else if (ForceSetType || IsWrite) { + // In the mode where writes always set the type, for a write (which does + // not also read), we just set the type. + SetType(); + } + + return true; +} + +bool TypeSanitizer::instrumentMemoryAccess( + Instruction *I, MemoryLocation &MLoc, Value *&ShadowBase, + Value *&AppMemMask, bool SanitizeFunction, + TypeDescriptorsMapTy &TypeDescriptors, const DataLayout &DL) { + IRBuilder<> IRB(I); + assert(MLoc.Size.isPrecise()); + if (instrumentWithShadowUpdate( + IRB, MLoc.AATags.TBAA, const_cast(MLoc.Ptr), + MLoc.Size.getValue(), I->mayReadFromMemory(), I->mayWriteToMemory(), + ShadowBase, AppMemMask, false, SanitizeFunction, TypeDescriptors, + DL)) { + ++NumInstrumentedAccesses; + return true; + } + + return false; +} + +// Memory-related intrinsics/instructions reset the type of the destination +// memory (including allocas and byval arguments). +bool TypeSanitizer::instrumentMemInst(Value *V, Value *&ShadowBase, + Value *&AppMemMask, + const DataLayout &DL) { + BasicBlock::iterator IP; + BasicBlock *BB; + Function *F; + + if (auto *I = dyn_cast(V)) { + IP = BasicBlock::iterator(I); + BB = I->getParent(); + F = BB->getParent(); + } else { + auto *A = cast(V); + F = A->getParent(); + BB = &F->getEntryBlock(); + IP = BB->getFirstInsertionPt(); + } + + Value *Dest, *Size, *Src = nullptr; + bool NeedsMemMove = false; + IRBuilder<> IRB(BB, IP); + + if (auto *A = dyn_cast(V)) { + assert(A->hasByValAttr() && "Type reset for non-byval argument?"); + + Dest = A; + Size = + ConstantInt::get(IntptrTy, DL.getTypeAllocSize(A->getParamByValType())); + } else { + auto *I = cast(V); + if (auto *MI = dyn_cast(I)) { + if (MI->getDestAddressSpace() != 0) + return false; + + Dest = MI->getDest(); + Size = MI->getLength(); + + if (auto *MTI = dyn_cast(MI)) { + if (MTI->getSourceAddressSpace() == 0) { + Src = MTI->getSource(); + NeedsMemMove = isa(MTI); + } + } + } else if (auto *II = dyn_cast(I)) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + + Size = II->getArgOperand(0); + Dest = II->getArgOperand(1); + } else if (auto *AI = dyn_cast(I)) { + // We need to clear the types for new stack allocations (or else we might + // read stale type information from a previous function execution). + + IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(I))); + IRB.SetInstDebugLocation(I); + + Size = IRB.CreateMul( + IRB.CreateZExtOrTrunc(AI->getArraySize(), IntptrTy), + ConstantInt::get(IntptrTy, + DL.getTypeAllocSize(AI->getAllocatedType()))); + Dest = I; + } else { + return false; + } + } + + if (!ShadowBase) + ShadowBase = getShadowBase(*F); + if (!AppMemMask) + AppMemMask = getAppMemMask(*F); + + Value *ShadowDataInt = IRB.CreateAdd( + IRB.CreateShl( + IRB.CreateAnd(IRB.CreatePtrToInt(Dest, IntptrTy), AppMemMask), + PtrShift), + ShadowBase); + Value *ShadowData = IRB.CreateIntToPtr(ShadowDataInt, IRB.getInt8PtrTy()); + + if (!Src) { + IRB.CreateMemSet(ShadowData, IRB.getInt8(0), IRB.CreateShl(Size, PtrShift), + Align(1u << PtrShift)); + return true; + } + + Value *SrcShadowDataInt = IRB.CreateAdd( + IRB.CreateShl( + IRB.CreateAnd(IRB.CreatePtrToInt(Src, IntptrTy), AppMemMask), + PtrShift), + ShadowBase); + Value *SrcShadowData = + IRB.CreateIntToPtr(SrcShadowDataInt, IRB.getInt8PtrTy()); + + if (NeedsMemMove) { + IRB.CreateMemMove(ShadowData, Align(1u << PtrShift), SrcShadowData, + Align(1u << PtrShift), IRB.CreateShl(Size, PtrShift)); + } else { + IRB.CreateMemCpy(ShadowData, Align(1u << PtrShift), SrcShadowData, + Align(1u << PtrShift), IRB.CreateShl(Size, PtrShift)); + } + + return true; +} + +PreservedAnalyses TypeSanitizerPass::run(Function &F, + FunctionAnalysisManager &FAM) { + TypeSanitizer TySan(*F.getParent()); + TySan.run(F, FAM.getResult(F)); + return PreservedAnalyses::none(); +} + +PreservedAnalyses ModuleTypeSanitizerPass::run(Module &M, + ModuleAnalysisManager &AM) { + insertModuleCtor(M); + return PreservedAnalyses::none(); +} diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -949,6 +949,7 @@ case Attribute::SanitizeAddress: case Attribute::SanitizeMemory: case Attribute::SanitizeThread: + case Attribute::SanitizeType: case Attribute::SanitizeHWAddress: case Attribute::SanitizeMemTag: case Attribute::SpeculativeLoadHardening: diff --git a/llvm/test/Instrumentation/TypeSanitizer/basic.ll b/llvm/test/Instrumentation/TypeSanitizer/basic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/TypeSanitizer/basic.ll @@ -0,0 +1,482 @@ +; Test basic type sanitizer instrumentation. +; +; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +; CHECK-DAG: $__tysan_v1_Simple_20C_2b_2b_20TBAA = comdat any +; CHECK-DAG: $__tysan_v1_omnipotent_20char = comdat any +; CHECK-DAG: $__tysan_v1_int = comdat any +; CHECK-DAG: $__tysan_v1_int_o_0 = comdat any +; CHECK-DAG: $__tysan_v1___ZTS1x = comdat any +; CHECK-DAG: $__tysan_v1___ZTS1v = comdat any +; CHECK-DAG: $__tysan_v1___ZTS1v_o_12 = comdat any +; CHECK-DAG: $__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95 = comdat any +; CHECK-DAG: $__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95_o_24 = comdat any + +; CHECK-DAG: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @tysan.module_ctor, ptr null }] + +; CHECK-DAG: @__tysan_shadow_memory_address = external global i64 +; CHECK-DAG: @__tysan_app_memory_mask = external global i64 + +; CHECK-DAG: @__tysan_v1_Simple_20C_2b_2b_20TBAA = linkonce_odr constant { i64, i64, [16 x i8] } { i64 2, i64 0, [16 x i8] c"Simple C++ TBAA\00" }, comdat +; CHECK-DAG: @__tysan_v1_omnipotent_20char = linkonce_odr constant { i64, i64, ptr, i64, [16 x i8] } { i64 2, i64 1, ptr @__tysan_v1_Simple_20C_2b_2b_20TBAA, i64 0, [16 x i8] c"omnipotent char\00" }, comdat +; CHECK-DAG: @__tysan_v1_int = linkonce_odr constant { i64, i64, ptr, i64, [4 x i8] } { i64 2, i64 1, ptr @__tysan_v1_omnipotent_20char, i64 0, [4 x i8] c"int\00" }, comdat +; CHECK-DAG: @__tysan_v1_int_o_0 = linkonce_odr constant { i64, ptr, ptr, i64 } { i64 1, ptr @__tysan_v1_int, ptr @__tysan_v1_int, i64 0 }, comdat +; CHECK-DAG: @__tysan_v1___ZTS1x = linkonce_odr constant { i64, i64, ptr, i64, ptr, i64, [7 x i8] } { i64 2, i64 2, ptr @__tysan_v1_int, i64 0, ptr @__tysan_v1_int, i64 4, [7 x i8] c"_ZTS1x\00" }, comdat +; CHECK-DAG: @__tysan_v1___ZTS1v = linkonce_odr constant { i64, i64, ptr, i64, ptr, i64, ptr, i64, [7 x i8] } { i64 2, i64 3, ptr @__tysan_v1_int, i64 8, ptr @__tysan_v1_int, i64 12, ptr @__tysan_v1___ZTS1x, i64 16, [7 x i8] c"_ZTS1v\00" }, comdat +; CHECK-DAG: @__tysan_v1___ZTS1v_o_12 = linkonce_odr constant { i64, ptr, ptr, i64 } { i64 1, ptr @__tysan_v1___ZTS1v, ptr @__tysan_v1_int, i64 12 }, comdat +; CHECK-DAG: @__tysan_v1___ZTSN12__GLOBAL____N__11zE = internal constant { i64, i64, ptr, i64, [23 x i8] } { i64 2, i64 1, ptr @__tysan_v1_int, i64 24, [23 x i8] c"_ZTSN12_GLOBAL__N_11zE\00" } +; CHECK-DAG: @__tysan_v1___ZTSN12__GLOBAL____N__11zE_o_24 = internal constant { i64, ptr, ptr, i64 } { i64 1, ptr @__tysan_v1___ZTSN12__GLOBAL____N__11zE, ptr @__tysan_v1_int, i64 24 } +; CHECK-DAG: @__tysan_v1___ZTS1yIN12__GLOBAL____N__11zEE = internal constant { i64, i64, ptr, i64, [27 x i8] } { i64 2, i64 1, ptr @__tysan_v1_int, i64 24, [27 x i8] c"_ZTS1yIN12_GLOBAL__N_11zEE\00" } +; CHECK-DAG: @__tysan_v1___ZTS1yIN12__GLOBAL____N__11zEE_o_24 = internal constant { i64, ptr, ptr, i64 } { i64 1, ptr @__tysan_v1___ZTS1yIN12__GLOBAL____N__11zEE, ptr @__tysan_v1_int, i64 24 } +; CHECK-DAG: @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95 = linkonce_odr constant { i64, i64, ptr, i64, [1 x i8] } { i64 2, i64 1, ptr @__tysan_v1_int, i64 24, [1 x i8] zeroinitializer }, comdat +; CHECK-DAG: @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95_o_24 = linkonce_odr constant { i64, ptr, ptr, i64 } { i64 1, ptr @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95, ptr @__tysan_v1_int, i64 24 }, comdat + +@global1 = global i32 0, align 4 +@global2 = global i32 0, align 4 + +define i32 @test_load(ptr %a) sanitize_type { +entry: + %tmp1 = load i32, ptr %a, align 4, !tbaa !3 + ret i32 %tmp1 + +; CHECK-LABEL: @test_load( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: [[V7:%[0-9]+]] = load ptr, ptr [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp ne ptr [[V7]], @__tysan_v1_int_o_0 +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD:[0-9]+]] + +; CHECK: [[V10:%[0-9]+]] = icmp eq ptr [[V7]], null +; CHECK: br i1 [[V10]], label %{{[0-9]+}}, label %{{[0-9]+}} + +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to ptr +; CHECK: [[V14:%[0-9]+]] = load ptr, ptr [[V13]] +; CHECK: [[V15:%[0-9]+]] = icmp ne ptr [[V14]], null +; CHECK: [[V16:%[0-9]+]] = or i1 false, [[V15]] +; CHECK: [[V17:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V18:%[0-9]+]] = inttoptr i64 [[V17]] to ptr +; CHECK: [[V19:%[0-9]+]] = load ptr, ptr [[V18]] +; CHECK: [[V20:%[0-9]+]] = icmp ne ptr [[V19]], null +; CHECK: [[V21:%[0-9]+]] = or i1 [[V16]], [[V20]] +; CHECK: [[V22:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V23:%[0-9]+]] = inttoptr i64 [[V22]] to ptr +; CHECK: [[V24:%[0-9]+]] = load ptr, ptr [[V23]] +; CHECK: [[V25:%[0-9]+]] = icmp ne ptr [[V24]], null +; CHECK: [[V26:%[0-9]+]] = or i1 [[V21]], [[V25]] +; CHECK: br i1 [[V26]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr @__tysan_v1_int_o_0, i32 1) +; CHECK: br label %{{[0-9]+}} + +; CHECK: store ptr @__tysan_v1_int_o_0, ptr [[V6]] +; CHECK: [[V30:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V31:%[0-9]+]] = inttoptr i64 [[V30]] to ptr +; CHECK: store ptr inttoptr (i64 -1 to ptr), ptr [[V31]] +; CHECK: [[V32:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V33:%[0-9]+]] = inttoptr i64 [[V32]] to ptr +; CHECK: store ptr inttoptr (i64 -2 to ptr), ptr [[V33]] +; CHECK: [[V34:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V35:%[0-9]+]] = inttoptr i64 [[V34]] to ptr +; CHECK: store ptr inttoptr (i64 -3 to ptr), ptr [[V35]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr @__tysan_v1_int_o_0, i32 1) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: [[V40:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V41:%[0-9]+]] = inttoptr i64 [[V40]] to ptr +; CHECK: [[V42:%[0-9]+]] = load ptr, ptr [[V41]] +; CHECK: [[V42a:%[0-9]+]] = ptrtoint ptr [[V42]] to i64 +; CHECK: [[V43:%[0-9]+]] = icmp sge i64 [[V42a]], 0 +; CHECK: [[V44:%[0-9]+]] = or i1 false, [[V43]] +; CHECK: [[V45:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V46:%[0-9]+]] = inttoptr i64 [[V45]] to ptr +; CHECK: [[V47:%[0-9]+]] = load ptr, ptr [[V46]] +; CHECK: [[V47a:%[0-9]+]] = ptrtoint ptr [[V47]] to i64 +; CHECK: [[V48:%[0-9]+]] = icmp sge i64 [[V47a]], 0 +; CHECK: [[V49:%[0-9]+]] = or i1 [[V44]], [[V48]] +; CHECK: [[V50:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V51:%[0-9]+]] = inttoptr i64 [[V50]] to ptr +; CHECK: [[V52:%[0-9]+]] = load ptr, ptr [[V51]] +; CHECK: [[V52a:%[0-9]+]] = ptrtoint ptr [[V52]] to i64 +; CHECK: [[V53:%[0-9]+]] = icmp sge i64 [[V52a]], 0 +; CHECK: [[V54:%[0-9]+]] = or i1 [[V49]], [[V53]] +; CHECK: br i1 [[V54]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr @__tysan_v1_int_o_0, i32 1) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: %tmp1 = load i32, ptr %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret i32 %tmp1 +} + +define void @test_store(ptr %a) sanitize_type { +entry: + store i32 42, ptr %a, align 4, !tbaa !6 + ret void + +; CHECK-LABEL: @test_store( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: [[V7:%[0-9]+]] = load ptr, ptr [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp ne ptr [[V7]], @__tysan_v1___ZTS1v_o_12 +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: [[V10:%[0-9]+]] = icmp eq ptr [[V7]], null +; CHECK: br i1 [[V10]], label %{{[0-9]+}}, label %{{[0-9]+}} + +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to ptr +; CHECK: [[V14:%[0-9]+]] = load ptr, ptr [[V13]] +; CHECK: [[V15:%[0-9]+]] = icmp ne ptr [[V14]], null +; CHECK: [[V16:%[0-9]+]] = or i1 false, [[V15]] +; CHECK: [[V17:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V18:%[0-9]+]] = inttoptr i64 [[V17]] to ptr +; CHECK: [[V19:%[0-9]+]] = load ptr, ptr [[V18]] +; CHECK: [[V20:%[0-9]+]] = icmp ne ptr [[V19]], null +; CHECK: [[V21:%[0-9]+]] = or i1 [[V16]], [[V20]] +; CHECK: [[V22:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V23:%[0-9]+]] = inttoptr i64 [[V22]] to ptr +; CHECK: [[V24:%[0-9]+]] = load ptr, ptr [[V23]] +; CHECK: [[V25:%[0-9]+]] = icmp ne ptr [[V24]], null +; CHECK: [[V26:%[0-9]+]] = or i1 [[V21]], [[V25]] +; CHECK: br i1 [[V26]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr @__tysan_v1___ZTS1v_o_12, i32 2) +; CHECK: br label %{{[0-9]+}} + +; CHECK: store ptr @__tysan_v1___ZTS1v_o_12, ptr [[V6]] +; CHECK: [[V30:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V31:%[0-9]+]] = inttoptr i64 [[V30]] to ptr +; CHECK: store ptr inttoptr (i64 -1 to ptr), ptr [[V31]] +; CHECK: [[V32:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V33:%[0-9]+]] = inttoptr i64 [[V32]] to ptr +; CHECK: store ptr inttoptr (i64 -2 to ptr), ptr [[V33]] +; CHECK: [[V34:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V35:%[0-9]+]] = inttoptr i64 [[V34]] to ptr +; CHECK: store ptr inttoptr (i64 -3 to ptr), ptr [[V35]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr @__tysan_v1___ZTS1v_o_12, i32 2) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: [[V40:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V41:%[0-9]+]] = inttoptr i64 [[V40]] to ptr +; CHECK: [[V42:%[0-9]+]] = load ptr, ptr [[V41]] +; CHECK: [[V42a:%[0-9]+]] = ptrtoint ptr [[V42]] to i64 +; CHECK: [[V43:%[0-9]+]] = icmp sge i64 [[V42a]], 0 +; CHECK: [[V44:%[0-9]+]] = or i1 false, [[V43]] +; CHECK: [[V45:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V46:%[0-9]+]] = inttoptr i64 [[V45]] to ptr +; CHECK: [[V47:%[0-9]+]] = load ptr, ptr [[V46]] +; CHECK: [[V47a:%[0-9]+]] = ptrtoint ptr [[V47]] to i64 +; CHECK: [[V48:%[0-9]+]] = icmp sge i64 [[V47a]], 0 +; CHECK: [[V49:%[0-9]+]] = or i1 [[V44]], [[V48]] +; CHECK: [[V50:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V51:%[0-9]+]] = inttoptr i64 [[V50]] to ptr +; CHECK: [[V52:%[0-9]+]] = load ptr, ptr [[V51]] +; CHECK: [[V52a:%[0-9]+]] = ptrtoint ptr [[V52]] to i64 +; CHECK: [[V53:%[0-9]+]] = icmp sge i64 [[V52a]], 0 +; CHECK: [[V54:%[0-9]+]] = or i1 [[V49]], [[V53]] +; CHECK: br i1 [[V54]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr @__tysan_v1___ZTS1v_o_12, i32 2) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: store i32 42, ptr %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret void +} + +define i32 @test_load_unk(ptr %a) sanitize_type { +entry: + %tmp1 = load i32, ptr %a, align 4 + ret i32 %tmp1 + +; CHECK-LABEL: @test_load_unk( +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr null, i32 1) +; CHECK: ret i32 +} + +define void @test_store_unk(ptr %a) sanitize_type { +entry: + store i32 42, ptr %a, align 4 + ret void + +; CHECK-LABEL: @test_store_unk( +; CHECK: call void @__tysan_check(ptr %a, i32 4, ptr null, i32 2) +; CHECK: ret void +} + +define i32 @test_load_nsan(ptr %a) { +entry: + %tmp1 = load i32, ptr %a, align 4, !tbaa !3 + ret i32 %tmp1 + +; CHECK-LABEL: @test_load_nsan( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: [[V7:%[0-9]+]] = load ptr, ptr [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp eq ptr [[V7]], null +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: store ptr @__tysan_v1_int_o_0, ptr [[V6]] +; CHECK: [[V10:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V11:%[0-9]+]] = inttoptr i64 [[V10]] to ptr +; CHECK: store ptr inttoptr (i64 -1 to ptr), ptr [[V11]] +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to ptr +; CHECK: store ptr inttoptr (i64 -2 to ptr), ptr [[V13]] +; CHECK: [[V14:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V15:%[0-9]+]] = inttoptr i64 [[V14]] to ptr +; CHECK: store ptr inttoptr (i64 -3 to ptr), ptr [[V15]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: %tmp1 = load i32, ptr %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret i32 %tmp1 +} + +define void @test_store_nsan(ptr %a) { +entry: + store i32 42, ptr %a, align 4, !tbaa !3 + ret void + +; CHECK-LABEL: @test_store_nsan( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: [[V7:%[0-9]+]] = load ptr, ptr [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp eq ptr [[V7]], null +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: store ptr @__tysan_v1_int_o_0, ptr [[V6]] +; CHECK: [[V10:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V11:%[0-9]+]] = inttoptr i64 [[V10]] to ptr +; CHECK: store ptr inttoptr (i64 -1 to ptr), ptr [[V11]] +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to ptr +; CHECK: store ptr inttoptr (i64 -2 to ptr), ptr [[V13]] +; CHECK: [[V14:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V15:%[0-9]+]] = inttoptr i64 [[V14]] to ptr +; CHECK: store ptr inttoptr (i64 -3 to ptr), ptr [[V15]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: store i32 42, ptr %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret void +} + +define void @test_anon_ns(ptr %a, ptr %b) sanitize_type { +entry: + store i32 42, ptr %a, align 4, !tbaa !8 + store i32 43, ptr %b, align 4, !tbaa !10 + ret void + +; CHECK-LABEL: @test_anon_ns( +; CHECK: store ptr @__tysan_v1___ZTSN12__GLOBAL____N__11zE_o_24, ptr +; CHECK: ret void +} + +define void @test_anon_type(ptr %a) sanitize_type { +entry: + store i32 42, ptr %a, align 4, !tbaa !12 + ret void + +; CHECK-LABEL: @test_anon_type( +; CHECK: store ptr @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95_o_24, ptr +; CHECK: ret void +} + +declare void @alloca_test_use(ptr) +define void @alloca_test() sanitize_type { +entry: + %x = alloca [10 x i8], align 1 + call void @alloca_test_use([10 x i8]* %x) + ret void + +; CHECK-LABEL: @alloca_test( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: %x = alloca [10 x i8], align 1 +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %x to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[V6]], i8 0, i64 80, i1 false) +; CHECK: call void @alloca_test_use(ptr %x) +; CHECK: ret void +} + +%struct.s20 = type { i32, i32, [24 x i8] } +define void @byval_test(%struct.s20* byval(%struct.s20) align 32 %x) sanitize_type { +entry: + ret void + +; CHECK-LABEL: @byval_test( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %x to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[V6]], i8 0, i64 256, i1 false) +; CHECK: ret void +; NOTE: Ideally, we'd get the type from the caller's copy of the data (instead +; of setting it all to unknown). +} + +declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32, i1) nounwind + +define void @memintr_test(ptr %a, ptr %b) nounwind uwtable sanitize_type { + entry: + tail call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 100, i32 1, i1 false) + tail call void @llvm.memmove.p0.p0.i64(ptr %a, ptr %b, i64 100, i32 1, i1 false) + tail call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %b, i64 100, i32 1, i1 false) + ret void + +; CHECK-LABEL: @memintr_test( +; CHECK: [[V0:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[V6]], i8 0, i64 800, i1 false) +; CHECK: call void @llvm.memset.p0.i64(ptr align 1 %a, i8 0, i64 100, i1 false) +; CHECK: [[V7:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V8:%[0-9]+]] = and i64 [[V7]], [[V0]] +; CHECK: [[V9:%[0-9]+]] = shl i64 [[V8]], 3 +; CHECK: [[V10:%[0-9]+]] = add i64 [[V9]], [[V1]] +; CHECK: [[V11:%[0-9]+]] = inttoptr i64 [[V10]] to ptr +; CHECK: [[V12:%[0-9]+]] = ptrtoint ptr %b to i64 +; CHECK: [[V13:%[0-9]+]] = and i64 [[V12]], [[V0]] +; CHECK: [[V14:%[0-9]+]] = shl i64 [[V13]], 3 +; CHECK: [[V15:%[0-9]+]] = add i64 [[V14]], [[V1]] +; CHECK: [[V16:%[0-9]+]] = inttoptr i64 [[V15]] to ptr +; CHECK: call void @llvm.memmove.p0.p0.i64(ptr align 8 [[V11]], ptr align 8 [[V16]], i64 800, i1 false) +; CHECK: call void @llvm.memmove.p0.p0.i64(ptr align 1 %a, ptr align 1 %b, i64 100, i1 false) +; CHECK: [[V17:%[0-9]+]] = ptrtoint ptr %a to i64 +; CHECK: [[V18:%[0-9]+]] = and i64 [[V17]], [[V0]] +; CHECK: [[V19:%[0-9]+]] = shl i64 [[V18]], 3 +; CHECK: [[V20:%[0-9]+]] = add i64 [[V19]], [[V1]] +; CHECK: [[V21:%[0-9]+]] = inttoptr i64 [[V20]] to ptr +; CHECK: [[V22:%[0-9]+]] = ptrtoint ptr %b to i64 +; CHECK: [[V23:%[0-9]+]] = and i64 [[V22]], [[V0]] +; CHECK: [[V24:%[0-9]+]] = shl i64 [[V23]], 3 +; CHECK: [[V25:%[0-9]+]] = add i64 [[V24]], [[V1]] +; CHECK: [[V26:%[0-9]+]] = inttoptr i64 [[V25]] to ptr +; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[V21]], ptr align 8 [[V26]], i64 800, i1 false) +; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %a, ptr align 1 %b, i64 100, i1 false) +; CHECK: ret void +} + +define void @test_swifterror(ptr swifterror) sanitize_type { + %swifterror_ptr_value = load ptr, ptr %0 + ret void + +; CHECK-LABEL: @test_swifterror( +; CHECK-NOT: __tysan_check +; CHECK: ret void +} + +define void @test_swifterror_2(ptr swifterror) sanitize_type { + store ptr null, ptr %0 + ret void + +; CHECK-LABEL: @test_swifterror_2( +; CHECK-NOT: __tysan_check +; CHECK: ret void +} + +; CHECK-LABEL: define internal void @tysan.module_ctor() {{.+}}{ +; CHECK: call void @__tysan_init() +; CHECK: call void @__tysan_set_globals_types() +; CHECK: ret void +; CHECK: } + +; CHECK-LABEL: define internal void @__tysan_set_globals_types() { +; CHECK: [[V1:%[0-9]+]] = load i64, ptr @__tysan_app_memory_mask +; CHECK: [[V2:%[0-9]+]] = load i64, ptr @__tysan_shadow_memory_address +; CHECK: [[V3:%[0-9]+]] = and i64 ptrtoint (ptr @global1 to i64), [[V1]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V2]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to ptr +; CHECK: store ptr @__tysan_v1_int, ptr [[V6]] +; CHECK: [[V7:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V8:%[0-9]+]] = inttoptr i64 [[V7]] to ptr +; CHECK: store ptr inttoptr (i64 -1 to ptr), ptr [[V8]] +; CHECK: [[V9:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V10:%[0-9]+]] = inttoptr i64 [[V9]] to ptr +; CHECK: store ptr inttoptr (i64 -2 to ptr), ptr [[V10]] +; CHECK: [[V11:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V12:%[0-9]+]] = inttoptr i64 [[V11]] to ptr +; CHECK: store ptr inttoptr (i64 -3 to ptr), ptr [[V12]] +; CHECK: [[V13:%[0-9]+]] = and i64 ptrtoint (ptr @global1 to i64), [[V1]] +; CHECK: [[V14:%[0-9]+]] = shl i64 [[V13]], 3 +; CHECK: [[V15:%[0-9]+]] = add i64 [[V14]], [[V2]] +; CHECK: [[V16:%[0-9]+]] = inttoptr i64 [[V15]] to ptr +; CHECK: store ptr @__tysan_v1_int, ptr [[V16]] +; CHECK: [[V17:%[0-9]+]] = add i64 [[V15]], 8 +; CHECK: [[V18:%[0-9]+]] = inttoptr i64 [[V17]] to ptr +; CHECK: store ptr inttoptr (i64 -1 to ptr), ptr [[V18]] +; CHECK: [[V19:%[0-9]+]] = add i64 [[V15]], 16 +; CHECK: [[V20:%[0-9]+]] = inttoptr i64 [[V19]] to ptr +; CHECK: store ptr inttoptr (i64 -2 to ptr), ptr [[V20]] +; CHECK: [[V21:%[0-9]+]] = add i64 [[V15]], 24 +; CHECK: [[V22:%[0-9]+]] = inttoptr i64 [[V21]] to ptr +; CHECK: store ptr inttoptr (i64 -3 to ptr), ptr [[V22]] +; CHECK: ret void +; CHECK: } + +; CHECK: ![[PROFMD]] = !{!"branch_weights", i32 1, i32 100000} + +!llvm.tysan.globals = !{!13, !14} + +!0 = !{!"Simple C++ TBAA"} +!1 = !{!"omnipotent char", !0, i64 0} +!2 = !{!"int", !1, i64 0} +!3 = !{!2, !2, i64 0} +!4 = !{!"_ZTS1x", !2, i64 0, !2, i64 4} +!5 = !{!"_ZTS1v", !2, i64 8, !2, i64 12, !4, i64 16} +!6 = !{!5, !2, i64 12} +!7 = !{!"_ZTSN12_GLOBAL__N_11zE", !2, i64 24} +!8 = !{!7, !2, i64 24} +!9 = !{!"_ZTS1yIN12_GLOBAL__N_11zEE", !2, i64 24} +!10 = !{!9, !2, i64 24} +!11 = !{!"", !2, i64 24} +!12 = !{!11, !2, i64 24} +!13 = !{ptr @global1, !2} +!14 = !{ptr @global1, !2}