Index: include/llvm/Bitcode/LLVMBitCodes.h =================================================================== --- include/llvm/Bitcode/LLVMBitCodes.h +++ include/llvm/Bitcode/LLVMBitCodes.h @@ -539,7 +539,8 @@ ATTR_KIND_INACCESSIBLEMEM_ONLY = 49, ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50, ATTR_KIND_ALLOC_SIZE = 51, - ATTR_KIND_WRITEONLY = 52 + ATTR_KIND_WRITEONLY = 52, + ATTR_KIND_SANITIZE_TYPE = 53 }; enum ComdatSelectionKindCodes { Index: include/llvm/IR/Attributes.td =================================================================== --- include/llvm/IR/Attributes.td +++ include/llvm/IR/Attributes.td @@ -158,6 +158,9 @@ /// MemorySanitizer is on. def SanitizeMemory : EnumAttr<"sanitize_memory">; +/// TypeSanitizer is on. +def SanitizeType : EnumAttr<"sanitize_type">; + /// Argument is swift error. def SwiftError : EnumAttr<"swifterror">; @@ -193,6 +196,7 @@ def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; +def : CompatRule<"isEqual">; def : CompatRule<"isEqual">; class MergeRule { Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -355,6 +355,7 @@ void initializeTargetPassConfigPass(PassRegistry&); void initializeTargetTransformInfoWrapperPassPass(PassRegistry&); void initializeThreadSanitizerPass(PassRegistry&); +void initializeTypeSanitizerPass(PassRegistry&); void initializeTwoAddressInstructionPassPass(PassRegistry&); void initializeTypeBasedAAWrapperPassPass(PassRegistry&); void initializeUnifyFunctionExitNodesPass(PassRegistry&); Index: include/llvm/Transforms/Instrumentation.h =================================================================== --- include/llvm/Transforms/Instrumentation.h +++ include/llvm/Transforms/Instrumentation.h @@ -140,6 +140,10 @@ // Insert ThreadSanitizer (race detection) instrumentation FunctionPass *createThreadSanitizerPass(); +// Insert TypeSanitizer (type-based-aliasing-violation detection) +// instrumentation +FunctionPass *createTypeSanitizerPass(); + // Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation ModulePass *createDataFlowSanitizerPass( const std::vector &ABIListFiles = std::vector(), Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -246,6 +246,12 @@ if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) return 0; + // Load widening is also hostile to the TypeSanitizer: it may cause false + // positives (i.e. accessing data that seems to be of the wrong type). + if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeType) && + LI->getMetadata(LLVMContext::MD_tbaa) != nullptr) + return 0; + const DataLayout &DL = LI->getModule()->getDataLayout(); // Get the base of this load. Index: lib/Analysis/TypeBasedAliasAnalysis.cpp =================================================================== --- lib/Analysis/TypeBasedAliasAnalysis.cpp +++ lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -294,9 +294,26 @@ return isa(MD->getOperand(0)) && MD->getNumOperands() >= 3; } +// When using the TypeSanitizer, don't use TBAA information for alias analysis. +// This might cause us to remove memory accesses that we need to verify at +// runtime. +static bool usingSanitizeType(const Value *V) { + const Function *F; + + if (auto *I = dyn_cast(V)) + F = I->getParent()->getParent(); + else if (auto *A = dyn_cast(V)) + F = A->getParent(); + else + return false; + + return F->hasFnAttribute(Attribute::SanitizeType); +} + AliasResult TypeBasedAAResult::alias(const MemoryLocation &LocA, const MemoryLocation &LocB) { - if (!EnableTBAA) + if (!EnableTBAA || + usingSanitizeType(LocA.Ptr) || usingSanitizeType(LocB.Ptr)) return AAResultBase::alias(LocA, LocB); // Get the attached MDNodes. If either value lacks a tbaa MDNode, we must @@ -336,7 +353,7 @@ FunctionModRefBehavior TypeBasedAAResult::getModRefBehavior(ImmutableCallSite CS) { - if (!EnableTBAA) + if (!EnableTBAA || usingSanitizeType(CS.getInstruction())) return AAResultBase::getModRefBehavior(CS); FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; @@ -358,7 +375,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - if (!EnableTBAA) + if (!EnableTBAA || usingSanitizeType(CS.getInstruction())) return AAResultBase::getModRefInfo(CS, Loc); if (const MDNode *L = Loc.AATags.TBAA) @@ -372,7 +389,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { - if (!EnableTBAA) + if (!EnableTBAA || usingSanitizeType(CS1.getInstruction())) return AAResultBase::getModRefInfo(CS1, CS2); if (const MDNode *M1 = Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -3347,7 +3347,10 @@ // Speculative load may create a race that did not exist in the source. LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || // Speculative load may load data from dirty regions. - LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress)) + LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || + // Speculative load may load data of the wrong type. + (LI->getFunction()->hasFnAttribute(Attribute::SanitizeType) && + LI->getMetadata(LLVMContext::MD_tbaa) != nullptr)) return false; const DataLayout &DL = LI->getModule()->getDataLayout(); return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), Index: lib/AsmParser/LLLexer.cpp =================================================================== --- lib/AsmParser/LLLexer.cpp +++ lib/AsmParser/LLLexer.cpp @@ -656,6 +656,7 @@ KEYWORD(sanitize_address); KEYWORD(sanitize_thread); KEYWORD(sanitize_memory); + KEYWORD(sanitize_type); KEYWORD(swifterror); KEYWORD(swiftself); KEYWORD(uwtable); Index: lib/AsmParser/LLParser.cpp =================================================================== --- lib/AsmParser/LLParser.cpp +++ lib/AsmParser/LLParser.cpp @@ -1112,6 +1112,8 @@ B.addAttribute(Attribute::SanitizeThread); break; case lltok::kw_sanitize_memory: B.addAttribute(Attribute::SanitizeMemory); break; + case lltok::kw_sanitize_type: + B.addAttribute(Attribute::SanitizeType); break; case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break; case lltok::kw_writeonly: B.addAttribute(Attribute::WriteOnly); break; @@ -1433,6 +1435,7 @@ case lltok::kw_sanitize_address: case lltok::kw_sanitize_memory: case lltok::kw_sanitize_thread: + case lltok::kw_sanitize_type: case lltok::kw_ssp: case lltok::kw_sspreq: case lltok::kw_sspstrong: @@ -1524,6 +1527,7 @@ case lltok::kw_sanitize_address: case lltok::kw_sanitize_memory: case lltok::kw_sanitize_thread: + case lltok::kw_sanitize_type: case lltok::kw_ssp: case lltok::kw_sspreq: case lltok::kw_sspstrong: Index: lib/AsmParser/LLToken.h =================================================================== --- lib/AsmParser/LLToken.h +++ lib/AsmParser/LLToken.h @@ -205,6 +205,7 @@ kw_sret, kw_sanitize_thread, kw_sanitize_memory, + kw_sanitize_type, kw_swifterror, kw_swiftself, kw_uwtable, Index: lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- lib/Bitcode/Reader/BitcodeReader.cpp +++ lib/Bitcode/Reader/BitcodeReader.cpp @@ -1068,6 +1068,7 @@ case Attribute::SwiftSelf: return 1ULL << 51; case Attribute::SwiftError: return 1ULL << 52; case Attribute::WriteOnly: return 1ULL << 53; + case Attribute::SanitizeType: return 1ULL << 54; case Attribute::Dereferenceable: llvm_unreachable("dereferenceable attribute not supported in raw format"); break; @@ -1282,6 +1283,8 @@ return Attribute::SanitizeThread; case bitc::ATTR_KIND_SANITIZE_MEMORY: return Attribute::SanitizeMemory; + case bitc::ATTR_KIND_SANITIZE_TYPE: + return Attribute::SanitizeType; case bitc::ATTR_KIND_SWIFT_ERROR: return Attribute::SwiftError; case bitc::ATTR_KIND_SWIFT_SELF: Index: lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- lib/Bitcode/Writer/BitcodeWriter.cpp +++ lib/Bitcode/Writer/BitcodeWriter.cpp @@ -698,6 +698,8 @@ return bitc::ATTR_KIND_SANITIZE_THREAD; case Attribute::SanitizeMemory: return bitc::ATTR_KIND_SANITIZE_MEMORY; + case Attribute::SanitizeType: + return bitc::ATTR_KIND_SANITIZE_TYPE; case Attribute::SwiftError: return bitc::ATTR_KIND_SWIFT_ERROR; case Attribute::SwiftSelf: Index: lib/CodeGen/ShrinkWrap.cpp =================================================================== --- lib/CodeGen/ShrinkWrap.cpp +++ lib/CodeGen/ShrinkWrap.cpp @@ -541,7 +541,8 @@ // sanitizers to be able to get a correct stack frame. !(MF.getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || MF.getFunction()->hasFnAttribute(Attribute::SanitizeThread) || - MF.getFunction()->hasFnAttribute(Attribute::SanitizeMemory)); + MF.getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || + MF.getFunction()->hasFnAttribute(Attribute::SanitizeType)); // If EnableShrinkWrap is set, it takes precedence on whatever the // target sets. The rational is that we assume we want to test // something related to shrink-wrapping. Index: lib/IR/Attributes.cpp =================================================================== --- lib/IR/Attributes.cpp +++ lib/IR/Attributes.cpp @@ -330,6 +330,8 @@ return "sanitize_thread"; if (hasAttribute(Attribute::SanitizeMemory)) return "sanitize_memory"; + if (hasAttribute(Attribute::SanitizeType)) + return "sanitize_type"; if (hasAttribute(Attribute::UWTable)) return "uwtable"; if (hasAttribute(Attribute::ZExt)) Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -1367,6 +1367,7 @@ I->getKindAsEnum() == Attribute::SanitizeAddress || I->getKindAsEnum() == Attribute::SanitizeThread || I->getKindAsEnum() == Attribute::SanitizeMemory || + I->getKindAsEnum() == Attribute::SanitizeType || I->getKindAsEnum() == Attribute::MinSize || I->getKindAsEnum() == Attribute::NoDuplicate || I->getKindAsEnum() == Attribute::Builtin || Index: lib/Transforms/IPO/ForceFunctionAttrs.cpp =================================================================== --- lib/Transforms/IPO/ForceFunctionAttrs.cpp +++ lib/Transforms/IPO/ForceFunctionAttrs.cpp @@ -54,6 +54,7 @@ .Case("sanitize_address", Attribute::SanitizeAddress) .Case("sanitize_memory", Attribute::SanitizeMemory) .Case("sanitize_thread", Attribute::SanitizeThread) + .Case("sanitize_type", Attribute::SanitizeType) .Case("ssp", Attribute::StackProtect) .Case("sspreq", Attribute::StackProtectReq) .Case("sspstrong", Attribute::StackProtectStrong) Index: lib/Transforms/Instrumentation/CMakeLists.txt =================================================================== --- lib/Transforms/Instrumentation/CMakeLists.txt +++ lib/Transforms/Instrumentation/CMakeLists.txt @@ -11,6 +11,7 @@ SanitizerCoverage.cpp ThreadSanitizer.cpp EfficiencySanitizer.cpp + TypeSanitizer.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms Index: lib/Transforms/Instrumentation/Instrumentation.cpp =================================================================== --- lib/Transforms/Instrumentation/Instrumentation.cpp +++ lib/Transforms/Instrumentation/Instrumentation.cpp @@ -66,6 +66,7 @@ initializePGOMemOPSizeOptLegacyPassPass(Registry); initializeInstrProfilingLegacyPassPass(Registry); initializeMemorySanitizerPass(Registry); + initializeTypeSanitizerPass(Registry); initializeThreadSanitizerPass(Registry); initializeSanitizerCoverageModulePass(Registry); initializeDataFlowSanitizerPass(Registry); Index: lib/Transforms/Instrumentation/TypeSanitizer.cpp =================================================================== --- /dev/null +++ lib/Transforms/Instrumentation/TypeSanitizer.cpp @@ -0,0 +1,777 @@ +//===----- TypeSanitizer.cpp - type-based-aliasing-violation detector -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of TypeSanitizer, a type-based-aliasing-violation +// detector. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Instrumentation.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/MDBuilder.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/ProfileData/InstrProf.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/MD5.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Regex.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/ModuleUtils.h" +#include + +using namespace llvm; + +#define DEBUG_TYPE "tysan" + +static const char *const kTysanModuleCtorName = "tysan.module_ctor"; +static const char *const kTysanInitName = "__tysan_init"; +static const char *const kTysanCheckName = "__tysan_check"; +static const char *const kTysanGVNamePrefix = "__tysan_v1_"; + +static const char *const kTysanShadowMemoryAddress = + "__tysan_shadow_memory_address"; +static const char *const kTysanAppMemMask = + "__tysan_app_memory_mask"; + +static cl::opt ClWritesAlwaysSetType("tysan-writes-always-set-type", + cl::desc("Writes always set the type"), + cl::Hidden, cl::init(false)); + +STATISTIC(NumInstrumentedAccesses, "Number of instrumented accesses"); + +static Regex AnonNameRegex("^_ZTS.*N[1-9][0-9]*_GLOBAL__N"); + +namespace { + +/// TypeSanitizer: instrument the code in module to find races. +struct TypeSanitizer : public FunctionPass { + TypeSanitizer() : FunctionPass(ID) {} + StringRef getPassName() const override; + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnFunction(Function &F) override; + bool doInitialization(Module &M) override; + static char ID; // Pass identification, replacement for typeid. + + private: + typedef SmallDenseMap TypeDescriptorsMapTy; + typedef SmallDenseMap TypeNameMapTy; + + void initializeCallbacks(Module &M); + + Value *getShadowBase(Function &F); + Value *getAppMemMask(Function &F); + bool instrumentMemoryAccess(Instruction *I, MemoryLocation &MLoc, + Value *&ShadowBase, Value *&AppMemMask, + bool SanitizeFunction, + TypeDescriptorsMapTy &TypeDescriptors, + const DataLayout &DL); + bool instrumentMemInst(Value *I, Value *&ShadowBase, Value *&AppMemMask, + const DataLayout &DL); + + std::string getAnonymousStructIdentifier(const MDNode *MD, + TypeNameMapTy &TypeNames); + bool generateTypeDescriptor(const MDNode *MD, + TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, + Module &M); + bool generateBaseTypeDescriptor(const MDNode *MD, + TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, + Module &M); + + Type *IntptrTy; + uint64_t PtrShift; + IntegerType *OrdTy; + + // Callbacks to run-time library are computed in doInitialization. + Function *TysanCheck; + Function *TysanCtorFunction; + Function *MemmoveFn, *MemcpyFn, *MemsetFn; +}; +} // namespace + +char TypeSanitizer::ID = 0; +INITIALIZE_PASS_BEGIN( + TypeSanitizer, "tysan", + "TypeSanitizer: detects TBAA violations.", + false, false) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) +INITIALIZE_PASS_END( + TypeSanitizer, "tysan", + "TypeSanitizer: detects TBAA violations.", + false, false) + +StringRef TypeSanitizer::getPassName() const { return "TypeSanitizer"; } + +void TypeSanitizer::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired(); +} + +FunctionPass *llvm::createTypeSanitizerPass() { + return new TypeSanitizer(); +} + +void TypeSanitizer::initializeCallbacks(Module &M) { + IRBuilder<> IRB(M.getContext()); + OrdTy = IRB.getInt32Ty(); + + AttributeList Attr; + Attr = Attr.addAttribute(M.getContext(), AttributeList::FunctionIndex, + Attribute::NoUnwind); + // Initialize the callbacks. + TysanCheck = checkSanitizerInterfaceFunction(M.getOrInsertFunction( + kTysanCheckName, Attr, IRB.getVoidTy(), + IRB.getInt8PtrTy(), // Pointer to data to be read. + OrdTy, // Size of the data in bytes. + IRB.getInt8PtrTy(), // Pointer to type descriptor. + OrdTy, // Flags. + nullptr)); + + MemmoveFn = checkSanitizerInterfaceFunction( + M.getOrInsertFunction("memmove", Attr, IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IRB.getInt8PtrTy(), IntptrTy, nullptr)); + MemcpyFn = checkSanitizerInterfaceFunction( + M.getOrInsertFunction("memcpy", Attr, IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IRB.getInt8PtrTy(), IntptrTy, nullptr)); + MemsetFn = checkSanitizerInterfaceFunction( + M.getOrInsertFunction("memset", Attr, IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IRB.getInt32Ty(), IntptrTy, nullptr)); +} + +bool TypeSanitizer::doInitialization(Module &M) { + const DataLayout &DL = M.getDataLayout(); + IntptrTy = DL.getIntPtrType(M.getContext()); + PtrShift = countTrailingZeros(IntptrTy->getPrimitiveSizeInBits()/8); + std::tie(TysanCtorFunction, std::ignore) = + createSanitizerCtorAndInitFunctions(M, kTysanModuleCtorName, + kTysanInitName, /*InitArgTypes=*/{}, + /*InitArgs=*/{}); + + appendToGlobalCtors(M, TysanCtorFunction, 0); + + return true; +} + +static std::string encodeName(StringRef Name) { + static const char *const LUT = "0123456789abcdef"; + size_t Length = Name.size(); + + std::string Output = kTysanGVNamePrefix; + Output.reserve(Output.size() + 3 * Length); + for (size_t i = 0; i < Length; ++i) { + const unsigned char c = Name[i]; + + if (isalnum((int)c)) { + Output.push_back(c); + continue; + } + + if (c == '_') { + Output.append("__"); + continue; + } + + Output.push_back('_'); + Output.push_back(LUT[c >> 4]); + Output.push_back(LUT[c & 15]); + } + + return Output; +} + +static bool isAnonymousNamespaceName(StringRef Name) { + // Types that are in an anonymous namespace are local to this module. + // FIXME: This should really be marked by the frontend in the metadata + // instead of having us guess this from the mangled name. Moreover, the regex + // here can pick up (unlikely) names in the non-reserved namespace (because + // it needs to search into the type to pick up cases where the type in the + // anonymous namespace is a template parameter, etc.). + return AnonNameRegex.match(Name); +} + +std::string TypeSanitizer::getAnonymousStructIdentifier(const MDNode *MD, + TypeNameMapTy &TypeNames) { + MD5 Hash; + + for (int i = 1, e = MD->getNumOperands(); i < e; i += 2) { + const MDNode *MemberNode = dyn_cast(MD->getOperand(i)); + if (!MemberNode) + return ""; + + auto TNI = TypeNames.find(MemberNode); + std::string MemberName; + if (TNI != TypeNames.end()) { + MemberName = TNI->second; + } else { + if (MemberNode->getNumOperands() < 1) + return ""; + MDString *MemberNameNode = dyn_cast(MemberNode->getOperand(0)); + if (!MemberNameNode) + return ""; + MemberName = MemberNameNode->getString(); + if (MemberName.empty()) + MemberName = getAnonymousStructIdentifier(MemberNode, TypeNames); + if (MemberName.empty()) + return ""; + TypeNames[MemberNode] = MemberName; + } + + Hash.update(MemberName); + Hash.update("\0"); + + uint64_t Offset = + mdconst::extract(MD->getOperand(i+1))->getZExtValue(); + Hash.update(utostr(Offset)); + Hash.update("\0"); + } + + MD5::MD5Result HashResult; + Hash.final(HashResult); + return "__anonymous_" + std::string(HashResult.digest().str()); +} + +bool TypeSanitizer::generateBaseTypeDescriptor( + const MDNode *MD, TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, Module &M) { + if (MD->getNumOperands() < 1) + return false; + + MDString *NameNode = dyn_cast(MD->getOperand(0)); + if (!NameNode) + return false; + + std::string Name = NameNode->getString(); + if (Name.empty()) + Name = getAnonymousStructIdentifier(MD, TypeNames); + if (Name.empty()) + return false; + TypeNames[MD] = Name; + std::string EncodedName = encodeName(Name); + + GlobalVariable *GV = + dyn_cast_or_null(M.getNamedValue(EncodedName)); + if (GV) { + TypeDescriptors[MD] = GV; + return true; + } + + SmallVector, 8> Members; + for (int i = 1, e = MD->getNumOperands(); i < e; i += 2) { + const MDNode *MemberNode = dyn_cast(MD->getOperand(i)); + if (!MemberNode) + return false; + + Constant *Member; + auto TDI = TypeDescriptors.find(MemberNode); + if (TDI != TypeDescriptors.end()) { + Member = TDI->second; + } else { + if (!generateBaseTypeDescriptor(MemberNode, TypeDescriptors, + TypeNames, M)) + return false; + + Member = TypeDescriptors[MemberNode]; + } + + uint64_t Offset = + mdconst::extract(MD->getOperand(i+1))->getZExtValue(); + + Members.push_back(std::make_pair(Member, Offset)); + } + + // The descriptor for a scalar is: + // [2, member count, [type pointer, offset]..., name] + + LLVMContext &C = MD->getContext(); + Constant *NameData = ConstantDataArray::getString(C, NameNode->getString()); + SmallVector TDSubTys; + SmallVector TDSubData; + + TDSubTys.push_back(IntptrTy); + TDSubData.push_back(ConstantInt::get(IntptrTy, 2)); + + TDSubTys.push_back(IntptrTy); + TDSubData.push_back(ConstantInt::get(IntptrTy, Members.size())); + + bool ShouldBeComdat = !isAnonymousNamespaceName(NameNode->getString()); + for (auto &Member : Members) { + TDSubTys.push_back(Member.first->getType()); + TDSubData.push_back(Member.first); + + if (!cast(Member.first)->hasComdat()) + ShouldBeComdat = false; + + TDSubTys.push_back(IntptrTy); + TDSubData.push_back(ConstantInt::get(IntptrTy, Member.second)); + } + + TDSubTys.push_back(NameData->getType()); + TDSubData.push_back(NameData); + + StructType *TDTy = StructType::get(C, TDSubTys); + Constant *TD = ConstantStruct::get(TDTy, TDSubData); + + GlobalVariable *TDGV = + new GlobalVariable(TDTy, true, + !ShouldBeComdat ? GlobalValue::InternalLinkage : + GlobalValue::LinkOnceODRLinkage, + TD, EncodedName); + M.getGlobalList().push_back(TDGV); + + if (ShouldBeComdat) { + Comdat *TDComdat = M.getOrInsertComdat(EncodedName); + TDGV->setComdat(TDComdat); + } + + TypeDescriptors[MD] = TDGV; + return true; +} + +bool TypeSanitizer::generateTypeDescriptor( + const MDNode *MD, TypeDescriptorsMapTy &TypeDescriptors, + TypeNameMapTy &TypeNames, Module &M) { + // Here we need to generate a type descriptor corresponding to this TBAA + // metadata node. Under the current scheme there are three kinds of TBAA + // metadata nodes: scalar nodes, struct nodes, and struct tag nodes. + + if (MD->getNumOperands() < 3) + return false; + + const MDNode *BaseNode = dyn_cast(MD->getOperand(0)); + if (!BaseNode) + return false; + + // This is a struct tag (element-access) node. + + const MDNode *AccessNode = dyn_cast(MD->getOperand(1)); + if (!AccessNode) + return false; + + Constant *Base; + auto TDI = TypeDescriptors.find(BaseNode); + if (TDI != TypeDescriptors.end()) { + Base = TDI->second; + } else { + if (!generateBaseTypeDescriptor(BaseNode, TypeDescriptors, TypeNames, M)) + return false; + + Base = TypeDescriptors[BaseNode]; + } + + Constant *Access; + TDI = TypeDescriptors.find(AccessNode); + if (TDI != TypeDescriptors.end()) { + Access = TDI->second; + } else { + if (!generateBaseTypeDescriptor(AccessNode, TypeDescriptors, TypeNames, M)) + return false; + + Access = TypeDescriptors[AccessNode]; + } + + uint64_t Offset = + mdconst::extract(MD->getOperand(2))->getZExtValue(); + std::string EncodedName = std::string(Base->getName()) + "_o_" + + utostr(Offset); + + GlobalVariable *GV = + dyn_cast_or_null(M.getNamedValue(EncodedName)); + if (GV) { + TypeDescriptors[MD] = GV; + return true; + } + + // The descriptor for a scalar is: + // [1, base-type pointer, access-type pointer, offset] + + StructType *TDTy = StructType::get(IntptrTy, Base->getType(), + Access->getType(), IntptrTy, nullptr); + Constant *TD = ConstantStruct::get(TDTy, ConstantInt::get(IntptrTy, 1), + Base, Access, + ConstantInt::get(IntptrTy, Offset), + nullptr); + + bool ShouldBeComdat = cast(Base)->hasComdat(); + + GlobalVariable *TDGV = + new GlobalVariable(TDTy, true, + !ShouldBeComdat ? GlobalValue::InternalLinkage : + GlobalValue::LinkOnceODRLinkage, + TD, EncodedName); + M.getGlobalList().push_back(TDGV); + + if (ShouldBeComdat) { + Comdat *TDComdat = M.getOrInsertComdat(EncodedName); + TDGV->setComdat(TDComdat); + } + + TypeDescriptors[MD] = TDGV; + return true; +} + +Value *TypeSanitizer::getShadowBase(Function &F) { + IRBuilder<> IRB(&F.front().front()); + Value *GlobalShadowAddress = F.getParent()->getOrInsertGlobal( + kTysanShadowMemoryAddress, IntptrTy); + return IRB.CreateLoad(GlobalShadowAddress); +} + +Value *TypeSanitizer::getAppMemMask(Function &F) { + IRBuilder<> IRB(&F.front().front()); + Value *GlobalAppMemMask = F.getParent()->getOrInsertGlobal( + kTysanAppMemMask, IntptrTy); + return IRB.CreateLoad(GlobalAppMemMask); +} + +bool TypeSanitizer::runOnFunction(Function &F) { + // This is required to prevent instrumenting call to __tysan_init from within + // the module constructor. + if (&F == TysanCtorFunction) + return false; + initializeCallbacks(*F.getParent()); + + SmallVector, 8> MemoryAccesses; + SmallSetVector TBAAMetadata; + SmallVector MemTypeResetInsts; + + bool Res = false; + bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeType); + const DataLayout &DL = F.getParent()->getDataLayout(); + const TargetLibraryInfo *TLI = + &getAnalysis().getTLI(); + + // Traverse all instructions, collect loads/stores/returns, check for calls. + for (auto &BB : F) { + for (auto &Inst : BB) { + if (isa(Inst) || isa(Inst) || + isa(Inst) || isa(Inst)) { + MemoryLocation MLoc = MemoryLocation::get(&Inst); + + // Swift errors are special (we can't introduce extra uses on them). + if (MLoc.Ptr->isSwiftError()) + continue; + + // Skip non-address-space-0 pointers; we don't know how to handle them. + Type *PtrTy = cast(MLoc.Ptr->getType()); + if (PtrTy->getPointerAddressSpace() != 0) + continue; + + if (MLoc.AATags.TBAA) + TBAAMetadata.insert(MLoc.AATags.TBAA); + MemoryAccesses.push_back(std::make_pair(&Inst, MLoc)); + } else if (isa(Inst) || isa(Inst)) { + if (CallInst *CI = dyn_cast(&Inst)) + maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); + + if (isa(Inst)) { + MemTypeResetInsts.push_back(&Inst); + } else if (auto *II = dyn_cast(&Inst)) { + if (II->getIntrinsicID() == Intrinsic::lifetime_start || + II->getIntrinsicID() == Intrinsic::lifetime_end) + MemTypeResetInsts.push_back(&Inst); + } + } else if (isa(Inst)) { + MemTypeResetInsts.push_back(&Inst); + } + } + } + + // byval arguments also need their types reset (they're new stack memory, + // just like allocas). + for (auto &A : F.args()) + if (A.hasByValAttr()) + MemTypeResetInsts.push_back(&A); + + // We have collected all loads and stores, and know for what TBAA nodes we + // need to generate type descriptors. + + Module &M = *F.getParent(); + TypeDescriptorsMapTy TypeDescriptors; + TypeNameMapTy TypeNames; + for (const MDNode *MD : TBAAMetadata) { + if (TypeDescriptors.count(MD)) + continue; + + if (!generateTypeDescriptor(MD, TypeDescriptors, TypeNames, M)) + return Res; // Giving up. + + Res = true; + } + + Value *ShadowBase = nullptr, *AppMemMask = nullptr; + for (auto &MA : MemoryAccesses) + Res |= instrumentMemoryAccess(MA.first, MA.second, ShadowBase, AppMemMask, + SanitizeFunction, TypeDescriptors, DL); + + for (auto Inst : MemTypeResetInsts) + Res |= instrumentMemInst(Inst, ShadowBase, AppMemMask, DL); + + return Res; +} + +bool TypeSanitizer::instrumentMemoryAccess(Instruction *I, + MemoryLocation &MLoc, + Value *&ShadowBase, + Value *&AppMemMask, + bool SanitizeFunction, + TypeDescriptorsMapTy &TypeDescriptors, + const DataLayout &DL) { + if (!ShadowBase) + ShadowBase = getShadowBase(*I->getParent()->getParent()); + if (!AppMemMask) + AppMemMask = getAppMemMask(*I->getParent()->getParent()); + + IRBuilder<> IRB(I); + + Constant *TDGV; + if (MLoc.AATags.TBAA) + TDGV = TypeDescriptors[MLoc.AATags.TBAA]; + else + TDGV = Constant::getNullValue(IRB.getInt8PtrTy()); + + Value *TD = IRB.CreateBitCast(TDGV, IRB.getInt8PtrTy()); + + Value *ShadowDataInt = + IRB.CreateAdd(IRB.CreateShl(IRB.CreateAnd(IRB.CreatePtrToInt( + const_cast(MLoc.Ptr), + IntptrTy), + AppMemMask), + PtrShift), + ShadowBase); + + Type *Int8PtrPtrTy = IRB.getInt8PtrTy()->getPointerTo(); + Value *ShadowData = IRB.CreateIntToPtr(ShadowDataInt, Int8PtrPtrTy); + + Type *AccessTy = cast(MLoc.Ptr->getType())->getElementType(); + assert(AccessTy->isSized()); + uint64_t AccessSize = DL.getTypeStoreSize(AccessTy); + + // This is the TD value, -1, which is used to indicate that the byte is not + // the first byte of the type. + Value *BadTD = IRB.CreateIntToPtr(ConstantInt::getSigned(IntptrTy, -1), + IRB.getInt8PtrTy()); + + auto SetType = [&]() { + IRB.CreateStore(TD, ShadowData); + + // Now fill the remainder of the shadow memory corresponding to the + // remainder of the the bytes of the type with a bad type descriptor. + for (uint64_t i = 1; i < AccessSize; ++i) { + Value *BadShadowData = + IRB.CreateIntToPtr(IRB.CreateAdd(ShadowDataInt, + ConstantInt::get(IntptrTy, + i << PtrShift)), + Int8PtrPtrTy); + IRB.CreateStore(BadTD, BadShadowData); + } + }; + + Constant *Flags = + ConstantInt::get(OrdTy, (int) I->mayReadFromMemory() | + (((int) I->mayWriteToMemory()) << 1)); + + if (!ClWritesAlwaysSetType || I->mayReadFromMemory()) { + // We need to check the type here. If the type is unknown, then the read + // sets the type. If the type is known, then it is checked. If the type + // doesn't match, then we call the runtime (which may yet determine that + // the mismatch is okay). + LLVMContext &C = I->getContext(); + MDNode *UnlikelyBW = MDBuilder(C).createBranchWeights(1, 100000); + + Value *LoadedTD = IRB.CreateLoad(ShadowData); + if (SanitizeFunction) { + Value *BadTDCmp = IRB.CreateICmpNE(LoadedTD, TD); + TerminatorInst *BadTDTerm, *GoodTDTerm; + SplitBlockAndInsertIfThenElse(BadTDCmp, &*IRB.GetInsertPoint(), + &BadTDTerm, &GoodTDTerm, UnlikelyBW); + IRB.SetInsertPoint(BadTDTerm); + + // We now know that the types did not match (we're on the slow path). If + // the type is unknown, then set it. + Value *NullTDCmp = IRB.CreateIsNull(LoadedTD); + TerminatorInst *NullTDTerm, *MismatchTerm; + SplitBlockAndInsertIfThenElse(NullTDCmp, &*IRB.GetInsertPoint(), + &NullTDTerm, &MismatchTerm); + + + // If the type is unknown, then set the type. + IRB.SetInsertPoint(NullTDTerm); + + // We're about to set the type. Make sure that all bytes in the value are + // also of unknown type. + Value *Size = ConstantInt::get(OrdTy, AccessSize); + Value *NotAllUnkTD = IRB.getFalse(); + for (uint64_t i = 1; i < AccessSize; ++i) { + Value *UnkShadowData = + IRB.CreateIntToPtr(IRB.CreateAdd(ShadowDataInt, + ConstantInt::get(IntptrTy, + i << PtrShift)), + Int8PtrPtrTy); + Value *ILdTD = IRB.CreateLoad(UnkShadowData); + NotAllUnkTD = IRB.CreateOr(NotAllUnkTD, IRB.CreateIsNotNull(ILdTD)); + } + + Instruction *BeforeSetType = &*IRB.GetInsertPoint(); + TerminatorInst *BadUTDTerm = + SplitBlockAndInsertIfThen(NotAllUnkTD, BeforeSetType, false, + UnlikelyBW); + IRB.SetInsertPoint(BadUTDTerm); + IRB.CreateCall(TysanCheck, + {IRB.CreateBitCast(const_cast(MLoc.Ptr), IRB.getInt8PtrTy()), + Size, (Value *) TD, (Value *) Flags}); + + IRB.SetInsertPoint(BeforeSetType); + SetType(); + + // We have a non-trivial mismatch. Call the runtime. + IRB.SetInsertPoint(MismatchTerm); + IRB.CreateCall(TysanCheck, + {IRB.CreateBitCast(const_cast(MLoc.Ptr), IRB.getInt8PtrTy()), + Size, (Value *) TD, (Value *) Flags}); + + // We appear to have the right type. Make sure that all other bytes in + // the type are still marked as interior bytes. If not, call the runtime. + IRB.SetInsertPoint(GoodTDTerm); + Value *NotAllBadTD = IRB.getFalse(); + for (uint64_t i = 1; i < AccessSize; ++i) { + Value *BadShadowData = + IRB.CreateIntToPtr(IRB.CreateAdd(ShadowDataInt, + ConstantInt::get(IntptrTy, + i << PtrShift)), + Int8PtrPtrTy); + Value *ILdTD = IRB.CreateLoad(BadShadowData); + NotAllBadTD = IRB.CreateOr(NotAllBadTD, IRB.CreateICmpNE(ILdTD, BadTD)); + } + + TerminatorInst *BadITDTerm = + SplitBlockAndInsertIfThen(NotAllBadTD, &*IRB.GetInsertPoint(), + false, UnlikelyBW); + IRB.SetInsertPoint(BadITDTerm); + IRB.CreateCall(TysanCheck, + {IRB.CreateBitCast(const_cast(MLoc.Ptr), IRB.getInt8PtrTy()), + Size, (Value *) TD, (Value *) Flags}); + } else { + // If we're not sanitizing this function, then we only care whether we + // need to *set* the type. + Value *NullTDCmp = IRB.CreateIsNull(LoadedTD); + TerminatorInst *NullTDTerm = + SplitBlockAndInsertIfThen(NullTDCmp, &*IRB.GetInsertPoint(), false, + UnlikelyBW); + IRB.SetInsertPoint(NullTDTerm); + SetType(); + } + } else if (I->mayWriteToMemory()) { + // In the mode where writes always set the type, for a write (which does + // not also read), we just set the type. + SetType(); + } + + ++NumInstrumentedAccesses; + return true; +} + +// Memory-related intrinsics/instructions reset the type of the destination +// memory (including allocas and byval arguments). +bool TypeSanitizer::instrumentMemInst(Value *V, Value *&ShadowBase, + Value *&AppMemMask, const DataLayout &DL) { + BasicBlock::iterator IP; + BasicBlock *BB; + Function *F; + + if (auto *I = dyn_cast(V)) { + IP = BasicBlock::iterator(I); + BB = I->getParent(); + F = BB->getParent(); + } else { + auto *A = cast(V); + F = A->getParent(); + BB = &F->getEntryBlock(); + IP = BB->getFirstInsertionPt(); + } + + Value *Dest, *Size; + IRBuilder<> IRB(BB, IP); + + if (auto *A = dyn_cast(V)) { + assert(A->hasByValAttr() && "Type reset for non-byval argument?"); + + Dest = A; + Size = + ConstantInt::get(IntptrTy, + DL.getTypeAllocSize(cast(A->getType())-> + getElementType())); + } else { + auto *I = cast(V); + if (MemIntrinsic *MI = dyn_cast(I)) { + if (MI->getDestAddressSpace() != 0) + return false; + + Dest = MI->getDest(); + Size = MI->getLength(); + } else if (IntrinsicInst *II = dyn_cast(I)) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + + Size = II->getArgOperand(0); + Dest = II->getArgOperand(1); + } else if (auto *AI = dyn_cast(I)) { + // We need to clear the types for new stack allocations (or else we might + // read stale type information from a previous function execution). + + IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(I))); + IRB.SetInstDebugLocation(I); + + Size = IRB.CreateMul(IRB.CreateZExtOrTrunc(AI->getArraySize(), IntptrTy), + ConstantInt::get(IntptrTy, + DL.getTypeAllocSize( + AI->getAllocatedType()))); + Dest = I; + } else { + return false; + } + } + + if (!ShadowBase) + ShadowBase = getShadowBase(*F); + if (!AppMemMask) + AppMemMask = getAppMemMask(*F); + + Value *ShadowDataInt = + IRB.CreateAdd(IRB.CreateShl(IRB.CreateAnd(IRB.CreatePtrToInt(Dest, + IntptrTy), + AppMemMask), + PtrShift), + ShadowBase); + Value *ShadowData = IRB.CreateIntToPtr(ShadowDataInt, IRB.getInt8PtrTy()); + + IRB.CreateMemSet(ShadowData, IRB.getInt8(0), + IRB.CreateShl(Size, PtrShift), 1u << PtrShift); + + return true; +} + Index: test/Instrumentation/TypeSanitizer/basic.ll =================================================================== --- /dev/null +++ test/Instrumentation/TypeSanitizer/basic.ll @@ -0,0 +1,429 @@ +; Test basic type sanitizer instrumentation. +; +; RUN: opt < %s -tysan -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK-DAG: $__tysan_v1_Simple_20C_2b_2b_20TBAA = comdat any +; CHECK-DAG: $__tysan_v1_omnipotent_20char = comdat any +; CHECK-DAG: $__tysan_v1_int = comdat any +; CHECK-DAG: $__tysan_v1_int_o_0 = comdat any +; CHECK-DAG: $__tysan_v1___ZTS1x = comdat any +; CHECK-DAG: $__tysan_v1___ZTS1v = comdat any +; CHECK-DAG: $__tysan_v1___ZTS1v_o_12 = comdat any +; CHECK-DAG: $__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95 = comdat any +; CHECK-DAG: $__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95_o_24 = comdat any + +; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @tysan.module_ctor, i8* null }] + +; CHECK-DAG: @__tysan_shadow_memory_address = external global i64 +; CHECK-DAG: @__tysan_app_memory_mask = external global i64 + +; CHECK-DAG: @__tysan_v1_Simple_20C_2b_2b_20TBAA = linkonce_odr constant { i64, i64, [16 x i8] } { i64 2, i64 0, [16 x i8] c"Simple C++ TBAA\00" }, comdat +; CHECK-DAG: @__tysan_v1_omnipotent_20char = linkonce_odr constant { i64, i64, { {{.*}} }*, i64, [16 x i8] } { i64 2, i64 1, { {{.*}} }* @__tysan_v1_Simple_20C_2b_2b_20TBAA, i64 0, [16 x i8] c"omnipotent char\00" }, comdat +; CHECK-DAG: @__tysan_v1_int = linkonce_odr constant { i64, i64, { {{.*}} }*, i64, [4 x i8] } { i64 2, i64 1, { {{.*}} }* @__tysan_v1_omnipotent_20char, i64 0, [4 x i8] c"int\00" }, comdat +; CHECK-DAG: @__tysan_v1_int_o_0 = linkonce_odr constant { i64, { {{.*}} }*, { {{.*}} }*, i64 } { i64 1, { {{.*}} }* @__tysan_v1_int, { {{.*}} }* @__tysan_v1_int, i64 0 }, comdat +; CHECK-DAG: @__tysan_v1___ZTS1x = linkonce_odr constant { i64, i64, { {{.*}} }*, i64, { {{.*}} }*, i64, [7 x i8] } { i64 2, i64 2, { {{.*}} }* @__tysan_v1_int, i64 0, { {{.*}} }* @__tysan_v1_int, i64 4, [7 x i8] c"_ZTS1x\00" }, comdat +; CHECK-DAG: @__tysan_v1___ZTS1v = linkonce_odr constant { i64, i64, { {{.*}} }*, i64, { {{.*}} }*, i64, { {{.*}} }*, i64, [7 x i8] } { i64 2, i64 3, { {{.*}} }* @__tysan_v1_int, i64 8, { {{.*}} }* @__tysan_v1_int, i64 12, { {{.*}} }* @__tysan_v1___ZTS1x, i64 16, [7 x i8] c"_ZTS1v\00" }, comdat +; CHECK-DAG: @__tysan_v1___ZTS1v_o_12 = linkonce_odr constant { i64, { {{.*}} }*, { {{.*}} }*, i64 } { i64 1, { {{.*}} }* @__tysan_v1___ZTS1v, { {{.*}} }* @__tysan_v1_int, i64 12 }, comdat +; CHECK-DAG: @__tysan_v1___ZTSN12__GLOBAL____N__11zE = internal constant { i64, i64, { {{.*}} }*, i64, [23 x i8] } { i64 2, i64 1, { {{.*}} }* @__tysan_v1_int, i64 24, [23 x i8] c"_ZTSN12_GLOBAL__N_11zE\00" } +; CHECK-DAG: @__tysan_v1___ZTSN12__GLOBAL____N__11zE_o_24 = internal constant { i64, { {{.*}} }*, { {{.*}} }*, i64 } { i64 1, { {{.*}} }* @__tysan_v1___ZTSN12__GLOBAL____N__11zE, { {{.*}} }* @__tysan_v1_int, i64 24 } +; CHECK-DAG: @__tysan_v1___ZTS1yIN12__GLOBAL____N__11zEE = internal constant { i64, i64, { {{.*}} }*, i64, [27 x i8] } { i64 2, i64 1, { {{.*}} }* @__tysan_v1_int, i64 24, [27 x i8] c"_ZTS1yIN12_GLOBAL__N_11zEE\00" } +; CHECK-DAG: @__tysan_v1___ZTS1yIN12__GLOBAL____N__11zEE_o_24 = internal constant { i64, { {{.*}} }*, { {{.*}} }*, i64 } { i64 1, { {{.*}} }* @__tysan_v1___ZTS1yIN12__GLOBAL____N__11zEE, { {{.*}} }* @__tysan_v1_int, i64 24 } +; CHECK-DAG: @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95 = linkonce_odr constant { i64, i64, { {{.*}} }*, i64, [1 x i8] } { i64 2, i64 1, { {{.*}} }* @__tysan_v1_int, i64 24, [1 x i8] zeroinitializer }, comdat +; CHECK-DAG: @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95_o_24 = linkonce_odr constant { i64, { {{.*}} }*, { {{.*}} }*, i64 } { i64 1, { {{.*}} }* @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95, { {{.*}} }* @__tysan_v1_int, i64 24 }, comdat + +define i32 @test_load(i32* %a) sanitize_type { +entry: + %tmp1 = load i32, i32* %a, align 4, !tbaa !3 + ret i32 %tmp1 + +; CHECK-LABEL: @test_load +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint i32* %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8** +; CHECK: [[V7:%[0-9]+]] = load i8*, i8** [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp ne i8* [[V7]], bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*) +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD:[0-9]+]] + +; CHECK: [[V10:%[0-9]+]] = icmp eq i8* [[V7]], null +; CHECK: br i1 [[V10]], label %{{[0-9]+}}, label %{{[0-9]+}} + +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to i8** +; CHECK: [[V14:%[0-9]+]] = load i8*, i8** [[V13]] +; CHECK: [[V15:%[0-9]+]] = icmp ne i8* [[V14]], null +; CHECK: [[V16:%[0-9]+]] = or i1 false, [[V15]] +; CHECK: [[V17:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V18:%[0-9]+]] = inttoptr i64 [[V17]] to i8** +; CHECK: [[V19:%[0-9]+]] = load i8*, i8** [[V18]] +; CHECK: [[V20:%[0-9]+]] = icmp ne i8* [[V19]], null +; CHECK: [[V21:%[0-9]+]] = or i1 [[V16]], [[V20]] +; CHECK: [[V22:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V23:%[0-9]+]] = inttoptr i64 [[V22]] to i8** +; CHECK: [[V24:%[0-9]+]] = load i8*, i8** [[V23]] +; CHECK: [[V25:%[0-9]+]] = icmp ne i8* [[V24]], null +; CHECK: [[V26:%[0-9]+]] = or i1 [[V21]], [[V25]] +; CHECK: br i1 [[V26]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: [[V28:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[V28]], i32 4, i8* bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*), i32 1) +; CHECK: br label %{{[0-9]+}} + +; CHECK: store i8* bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*), i8** [[V6]] +; CHECK: [[V30:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V31:%[0-9]+]] = inttoptr i64 [[V30]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V31]] +; CHECK: [[V32:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V33:%[0-9]+]] = inttoptr i64 [[V32]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V33]] +; CHECK: [[V34:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V35:%[0-9]+]] = inttoptr i64 [[V34]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V35]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: [[V37:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[V37]], i32 4, i8* bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*), i32 1) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: [[V40:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V41:%[0-9]+]] = inttoptr i64 [[V40]] to i8** +; CHECK: [[V42:%[0-9]+]] = load i8*, i8** [[V41]] +; CHECK: [[V43:%[0-9]+]] = icmp ne i8* [[V42]], inttoptr (i64 -1 to i8*) +; CHECK: [[V44:%[0-9]+]] = or i1 false, [[V43]] +; CHECK: [[V45:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V46:%[0-9]+]] = inttoptr i64 [[V45]] to i8** +; CHECK: [[V47:%[0-9]+]] = load i8*, i8** [[V46]] +; CHECK: [[V48:%[0-9]+]] = icmp ne i8* [[V47]], inttoptr (i64 -1 to i8*) +; CHECK: [[V49:%[0-9]+]] = or i1 [[V44]], [[V48]] +; CHECK: [[V50:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V51:%[0-9]+]] = inttoptr i64 [[V50]] to i8** +; CHECK: [[V52:%[0-9]+]] = load i8*, i8** [[V51]] +; CHECK: [[V53:%[0-9]+]] = icmp ne i8* [[V52]], inttoptr (i64 -1 to i8*) +; CHECK: [[V54:%[0-9]+]] = or i1 [[V49]], [[V53]] +; CHECK: br i1 [[V54]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: [[V56:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[V56]], i32 4, i8* bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*), i32 1) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: %tmp1 = load i32, i32* %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret i32 %tmp1 +} + +define void @test_store(i32* %a) sanitize_type { +entry: + store i32 42, i32* %a, align 4, !tbaa !6 + ret void + +; CHECK-LABEL: @test_store +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint i32* %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8** +; CHECK: [[V7:%[0-9]+]] = load i8*, i8** [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp ne i8* [[V7]], bitcast ({ {{.*}} }* @__tysan_v1___ZTS1v_o_12 to i8*) +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: [[V10:%[0-9]+]] = icmp eq i8* [[V7]], null +; CHECK: br i1 [[V10]], label %{{[0-9]+}}, label %{{[0-9]+}} + +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to i8** +; CHECK: [[V14:%[0-9]+]] = load i8*, i8** [[V13]] +; CHECK: [[V15:%[0-9]+]] = icmp ne i8* [[V14]], null +; CHECK: [[V16:%[0-9]+]] = or i1 false, [[V15]] +; CHECK: [[V17:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V18:%[0-9]+]] = inttoptr i64 [[V17]] to i8** +; CHECK: [[V19:%[0-9]+]] = load i8*, i8** [[V18]] +; CHECK: [[V20:%[0-9]+]] = icmp ne i8* [[V19]], null +; CHECK: [[V21:%[0-9]+]] = or i1 [[V16]], [[V20]] +; CHECK: [[V22:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V23:%[0-9]+]] = inttoptr i64 [[V22]] to i8** +; CHECK: [[V24:%[0-9]+]] = load i8*, i8** [[V23]] +; CHECK: [[V25:%[0-9]+]] = icmp ne i8* [[V24]], null +; CHECK: [[V26:%[0-9]+]] = or i1 [[V21]], [[V25]] +; CHECK: br i1 [[V26]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: [[V28:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[V28]], i32 4, i8* bitcast ({ {{.*}} }* @__tysan_v1___ZTS1v_o_12 to i8*), i32 2) +; CHECK: br label %{{[0-9]+}} + +; CHECK: store i8* bitcast ({ {{.*}} }* @__tysan_v1___ZTS1v_o_12 to i8*), i8** [[V6]] +; CHECK: [[V30:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V31:%[0-9]+]] = inttoptr i64 [[V30]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V31]] +; CHECK: [[V32:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V33:%[0-9]+]] = inttoptr i64 [[V32]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V33]] +; CHECK: [[V34:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V35:%[0-9]+]] = inttoptr i64 [[V34]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V35]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: [[V37:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[V37]], i32 4, i8* bitcast ({ {{.*}} }* @__tysan_v1___ZTS1v_o_12 to i8*), i32 2) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: [[V40:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V41:%[0-9]+]] = inttoptr i64 [[V40]] to i8** +; CHECK: [[V42:%[0-9]+]] = load i8*, i8** [[V41]] +; CHECK: [[V43:%[0-9]+]] = icmp ne i8* [[V42]], inttoptr (i64 -1 to i8*) +; CHECK: [[V44:%[0-9]+]] = or i1 false, [[V43]] +; CHECK: [[V45:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V46:%[0-9]+]] = inttoptr i64 [[V45]] to i8** +; CHECK: [[V47:%[0-9]+]] = load i8*, i8** [[V46]] +; CHECK: [[V48:%[0-9]+]] = icmp ne i8* [[V47]], inttoptr (i64 -1 to i8*) +; CHECK: [[V49:%[0-9]+]] = or i1 [[V44]], [[V48]] +; CHECK: [[V50:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V51:%[0-9]+]] = inttoptr i64 [[V50]] to i8** +; CHECK: [[V52:%[0-9]+]] = load i8*, i8** [[V51]] +; CHECK: [[V53:%[0-9]+]] = icmp ne i8* [[V52]], inttoptr (i64 -1 to i8*) +; CHECK: [[V54:%[0-9]+]] = or i1 [[V49]], [[V53]] +; CHECK: br i1 [[V54]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: [[V56:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[V56]], i32 4, i8* bitcast ({ {{.*}} }* @__tysan_v1___ZTS1v_o_12 to i8*), i32 2) +; CHECK: br label %{{[0-9]+}} + +; CHECK: br label %{{[0-9]+}} + +; CHECK: store i32 42, i32* %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret void +} + +define i32 @test_load_unk(i32* %a) sanitize_type { +entry: + %tmp1 = load i32, i32* %a, align 4 + ret i32 %tmp1 + +; CHECK-LABEL: @test_load_unk +; CHECK: [[PTR:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[PTR]], i32 4, i8* null, i32 1) +; CHECK: ret i32 +} + +define void @test_store_unk(i32* %a) sanitize_type { +entry: + store i32 42, i32* %a, align 4 + ret void + +; CHECK-LABEL: @test_store_unk +; CHECK: [[PTR:%[0-9]+]] = bitcast i32* %a to i8* +; CHECK: call void @__tysan_check(i8* [[PTR]], i32 4, i8* null, i32 2) +; CHECK: ret void +} + +define i32 @test_load_nsan(i32* %a) { +entry: + %tmp1 = load i32, i32* %a, align 4, !tbaa !3 + ret i32 %tmp1 + +; CHECK-LABEL: @test_load_nsan +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint i32* %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8** +; CHECK: [[V7:%[0-9]+]] = load i8*, i8** [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp eq i8* [[V7]], null +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: store i8* bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*), i8** [[V6]] +; CHECK: [[V10:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V11:%[0-9]+]] = inttoptr i64 [[V10]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V11]] +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V13]] +; CHECK: [[V14:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V15:%[0-9]+]] = inttoptr i64 [[V14]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V15]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: %tmp1 = load i32, i32* %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret i32 %tmp1 +} + +define void @test_store_nsan(i32* %a) { +entry: + store i32 42, i32* %a, align 4, !tbaa !3 + ret void + +; CHECK-LABEL: @test_store_nsan +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint i32* %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8** +; CHECK: [[V7:%[0-9]+]] = load i8*, i8** [[V6]] +; CHECK: [[V8:%[0-9]+]] = icmp eq i8* [[V7]], null +; CHECK: br i1 [[V8]], label %{{[0-9]+}}, label %{{[0-9]+}}, !prof ![[PROFMD]] + +; CHECK: store i8* bitcast ({ {{.*}} }* @__tysan_v1_int_o_0 to i8*), i8** [[V6]] +; CHECK: [[V10:%[0-9]+]] = add i64 [[V5]], 8 +; CHECK: [[V11:%[0-9]+]] = inttoptr i64 [[V10]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V11]] +; CHECK: [[V12:%[0-9]+]] = add i64 [[V5]], 16 +; CHECK: [[V13:%[0-9]+]] = inttoptr i64 [[V12]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V13]] +; CHECK: [[V14:%[0-9]+]] = add i64 [[V5]], 24 +; CHECK: [[V15:%[0-9]+]] = inttoptr i64 [[V14]] to i8** +; CHECK: store i8* inttoptr (i64 -1 to i8*), i8** [[V15]] +; CHECK: br label %{{[0-9]+}} + +; CHECK: store i32 42, i32* %a, align 4, !tbaa !{{[0-9]+}} +; CHECK: ret void +} + +define void @test_anon_ns(i32* %a, i32* %b) sanitize_type { +entry: + store i32 42, i32* %a, align 4, !tbaa !8 + store i32 43, i32* %b, align 4, !tbaa !10 + ret void + +; CHECK-LABEL: @test_anon_ns +; CHECK: store i8* bitcast ({ {{.*}} }* @__tysan_v1___ZTSN12__GLOBAL____N__11zE_o_24 to i8*), i8** +; CHECK: ret void +} + +define void @test_anon_type(i32* %a) sanitize_type { +entry: + store i32 42, i32* %a, align 4, !tbaa !12 + ret void + +; CHECK-LABEL: @test_anon_type +; CHECK: store i8* bitcast ({ {{.*}} }* @__tysan_v1_____anonymous__027d9e575c5d34cb5d60d6a1d6276f95_o_24 to i8*), i8** +; CHECK: ret void +} + +declare void @alloca_test_use([10 x i8]*) +define void @alloca_test() sanitize_type { +entry: + %x = alloca [10 x i8], align 1 + call void @alloca_test_use([10 x i8]* %x) + ret void + +; CHECK-LABEL: @alloca_test +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: %x = alloca [10 x i8], align 1 +; CHECK: [[V2:%[0-9]+]] = ptrtoint [10 x i8]* %x to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8* +; CHECK: call void @llvm.memset.p0i8.i64(i8* [[V6]], i8 0, i64 80, i32 8, i1 false) +; CHECK: call void @alloca_test_use([10 x i8]* %x) +; CHECK: ret void +} + +%struct.s20 = type { i32, i32, [24 x i8] } +define void @byval_test(%struct.s20* byval align 32 %x) sanitize_type { +entry: + ret void + +; CHECK-LABEL: @byval_test +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint %struct.s20* %x to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8* +; CHECK: call void @llvm.memset.p0i8.i64(i8* [[V6]], i8 0, i64 256, i32 8, i1 false) +; CHECK: ret void +; NOTE: Ideally, we'd get the type from the caller's copy of the data (instead +; of setting it all to unknown). +} + +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) nounwind + +define void @memintr_test(i8* %a, i8* %b) nounwind uwtable sanitize_type { + entry: + tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 100, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i32 1, i1 false) + ret void + +; CHECK-LABEL: @memintr_test +; CHECK: [[V0:%[0-9]+]] = load i64, i64* @__tysan_app_memory_mask +; CHECK: [[V1:%[0-9]+]] = load i64, i64* @__tysan_shadow_memory_address +; CHECK: [[V2:%[0-9]+]] = ptrtoint i8* %a to i64 +; CHECK: [[V3:%[0-9]+]] = and i64 [[V2]], [[V0]] +; CHECK: [[V4:%[0-9]+]] = shl i64 [[V3]], 3 +; CHECK: [[V5:%[0-9]+]] = add i64 [[V4]], [[V1]] +; CHECK: [[V6:%[0-9]+]] = inttoptr i64 [[V5]] to i8* +; CHECK: call void @llvm.memset.p0i8.i64(i8* [[V6]], i8 0, i64 800, i32 8, i1 false) +; CHECK: tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 100, i32 1, i1 false) +; CHECK: [[V7:%[0-9]+]] = ptrtoint i8* %a to i64 +; CHECK: [[V8:%[0-9]+]] = and i64 [[V7]], [[V0]] +; CHECK: [[V9:%[0-9]+]] = shl i64 [[V8]], 3 +; CHECK: [[V10:%[0-9]+]] = add i64 [[V9]], [[V1]] +; CHECK: [[V11:%[0-9]+]] = inttoptr i64 [[V10]] to i8* +; CHECK: call void @llvm.memset.p0i8.i64(i8* [[V11]], i8 0, i64 800, i32 8, i1 false) +; CHECK: tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i32 1, i1 false) +; CHECK: [[V12:%[0-9]+]] = ptrtoint i8* %a to i64 +; CHECK: [[V13:%[0-9]+]] = and i64 [[V12]], [[V0]] +; CHECK: [[V14:%[0-9]+]] = shl i64 [[V13]], 3 +; CHECK: [[V15:%[0-9]+]] = add i64 [[V14]], [[V1]] +; CHECK: [[V16:%[0-9]+]] = inttoptr i64 [[V15]] to i8* +; CHECK: call void @llvm.memset.p0i8.i64(i8* [[V16]], i8 0, i64 800, i32 8, i1 false) +; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 100, i32 1, i1 false) +; CHECK: ret void +} + +define void @test_swifterror(i8** swifterror) sanitize_type { + %swifterror_ptr_value = load i8*, i8** %0 + ret void + +; CHECK-LABEL: @test_swifterror +; CHECK-NOT: __tysan_check +; CHECK: ret void +} + +define void @test_swifterror_2(i8** swifterror) sanitize_type { + store i8* null, i8** %0 + ret void + +; CHECK-LABEL: @test_swifterror_2 +; CHECK-NOT: __tysan_check +; CHECK: ret void +} + +; CHECK: ![[PROFMD]] = !{!"branch_weights", i32 1, i32 100000} + +!0 = !{!"Simple C++ TBAA"} +!1 = !{!"omnipotent char", !0, i64 0} +!2 = !{!"int", !1, i64 0} +!3 = !{!2, !2, i64 0} +!4 = !{!"_ZTS1x", !2, i64 0, !2, i64 4} +!5 = !{!"_ZTS1v", !2, i64 8, !2, i64 12, !4, i64 16} +!6 = !{!5, !2, i64 12} +!7 = !{!"_ZTSN12_GLOBAL__N_11zE", !2, i64 24} +!8 = !{!7, !2, i64 24} +!9 = !{!"_ZTS1yIN12_GLOBAL__N_11zEE", !2, i64 24} +!10 = !{!9, !2, i64 24} +!11 = !{!"", !2, i64 24} +!12 = !{!11, !2, i64 24} +