Index: include/llvm/Transforms/Instrumentation.h =================================================================== --- include/llvm/Transforms/Instrumentation.h +++ include/llvm/Transforms/Instrumentation.h @@ -127,7 +127,8 @@ // Insert MemorySanitizer instrumentation (detection of uninitialized reads) FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0, - bool Recover = false); + bool Recover = false, + bool EnableKmsan = false); // Insert ThreadSanitizer (race detection) instrumentation FunctionPass *createThreadSanitizerPass(); Index: lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -89,7 +89,28 @@ /// implementation ignores the load aspect of CAS/RMW, always returning a clean /// value. It implements the store part as a simple atomic store by storing a /// clean shadow. -// +/// +/// KernelMemorySanitizer (KMSAN) implementation. +/// +/// The major differences between KMSAN and MSan instrumentation are: +/// - KMSAN implies msan-track-origins=2, msan-keep-going=true; +/// - KMSAN allocates shadows and origins for each page separately, so there +/// are no explicit accesses to shadow and origin memory. +/// Shadow and origin values for a particular X-byte memory location +/// (X=1,2,4,8) are accessed via pointers obtained via the +/// __msan_metadata_ptr_for_load_X(ptr) +/// __msan_metadata_ptr_for_store_X(ptr) +/// functions. The corresponding functions check that the X-byte accesses +/// are possible and returns the pointers to shadow and origin memory. +/// __msan_metadata_ptr_for_load_n +/// __msan_metadata_ptr_for_store_X +/// - TLS variables are stored in a single struct in per-task storage. A call +/// to a function __msan_get_context_state() returning a pointer to that +/// struct is inserted into every instrumented function before the entry block; +/// - __msan_warning() now becomes __msan_warning_32(uptr origin) +/// +/// KernelMemorySanitizer only supports X86_64 at the moment. +/// //===----------------------------------------------------------------------===// #include "llvm/ADT/APInt.h" @@ -199,6 +220,11 @@ cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false)); +static cl::opt + ClEnableKmsan("msan-kernel", + cl::desc("Enable KernelMemorySanitizer instrumentation"), + cl::Hidden, cl::init(false)); + // This flag controls whether we check the shadow of the address // operand of load or store. Such bugs are very rare, since load from // a garbage address typically results in SEGV, but still happen @@ -357,11 +383,12 @@ // Pass identification, replacement for typeid. static char ID; - MemorySanitizer(int TrackOrigins = 0, bool Recover = false) - : FunctionPass(ID), - TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), - Recover(Recover || ClKeepGoing) {} - + MemorySanitizer(int TrackOrigins = 0, bool Recover = false, + bool EnableKmsan = false) + : FunctionPass(ID), CompileKernel(EnableKmsan || ClEnableKmsan), + TrackOrigins( + CompileKernel ? 2 : std::max(TrackOrigins, (int)ClTrackOrigins)), + Recover(Recover || ClKeepGoing || CompileKernel), WarningFn(nullptr) {} StringRef getPassName() const override { return "MemorySanitizer"; } void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -379,6 +406,11 @@ friend struct VarArgPowerPC64Helper; void initializeCallbacks(Module &M); + void createKernelApi(Module &M); + void createUserspaceApi(Module &M); + + /// \brief True if we're compiling the Linux kernel. + bool CompileKernel; /// \brief Track origins (allocation points) of uninitialized values. int TrackOrigins; @@ -389,32 +421,38 @@ Type *OriginTy; /// \brief Thread-local shadow storage for function parameters. - GlobalVariable *ParamTLS; + Value *ParamTLS; /// \brief Thread-local origin storage for function parameters. - GlobalVariable *ParamOriginTLS; + Value *ParamOriginTLS; /// \brief Thread-local shadow storage for function return value. - GlobalVariable *RetvalTLS; + Value *RetvalTLS; /// \brief Thread-local origin storage for function return value. - GlobalVariable *RetvalOriginTLS; + Value *RetvalOriginTLS; /// \brief Thread-local shadow storage for in-register va_arg function /// parameters (x86_64-specific). - GlobalVariable *VAArgTLS; + Value *VAArgTLS; + + // \brief Thread-local shadow storage for in-register va_arg function + // parameters (x86_64-specific, KMSAN only). + Value *VAArgOriginTLS; /// \brief Thread-local shadow storage for va_arg overflow area /// (x86_64-specific). - GlobalVariable *VAArgOverflowSizeTLS; + Value *VAArgOverflowSizeTLS; /// \brief Thread-local space used to pass origin value to the UMR reporting /// function. - GlobalVariable *OriginTLS; + Value *OriginTLS; /// \brief The run-time callback to print a warning. Value *WarningFn = nullptr; + // \brief KMSAN-specific error callback that takes the origin. + Value *MsanWarning32Fn; // These arrays are indexed by log2(AccessSize). Value *MaybeWarningFn[kNumberOfAccessSizes]; Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; @@ -423,6 +461,14 @@ /// allocation. Value *MsanSetAllocaOrigin4Fn; + Value *getKmsanShadowOriginAccessFn(bool isStore, int size); + + Value *MsanPoisonAllocaFn; + Value *MsanUnpoisonFn; + Value *MsanLoadArgShadowFn, *MsanLoadArgOriginFn; + Value *MsanStoreArgShadowFn; + Value *MsanStoreArgShadowOriginFn; + /// \brief Run-time helper that poisons stack on function entry. Value *MsanPoisonStackFn; @@ -433,6 +479,14 @@ /// \brief MSan runtime replacements for memmove, memcpy and memset. Value *MemmoveFn, *MemcpyFn, *MemsetFn; + /// \brief KMSAN callback for task-local function argument shadow. + Value *GetContextStateFn; + + /// \brief function returning a pair of shadow/origin pointers. + Value *MsanMetadataPtrForLoadN, *MsanMetadataPtrForStoreN; + Value *MsanMetadataPtrForLoad_1_8[4]; + Value *MsanMetadataPtrForStore_1_8[4]; + /// \brief Memory map parameters used in application-to-shadow calculation. const MemoryMapParams *MapParams; @@ -459,8 +513,9 @@ MemorySanitizer, "msan", "MemorySanitizer: detects uninitialized reads.", false, false) -FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) { - return new MemorySanitizer(TrackOrigins, Recover); +FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover, + bool CompileKernel) { + return new MemorySanitizer(TrackOrigins, Recover, CompileKernel); } /// \brief Create a non-const global initialized with the given string. @@ -475,13 +530,94 @@ GlobalValue::PrivateLinkage, StrConst, ""); } -/// \brief Insert extern declaration of runtime-provided functions and globals. -void MemorySanitizer::initializeCallbacks(Module &M) { - // Only do this once. - if (WarningFn) - return; +/// \brief Create KMSAN API callbacks. +void MemorySanitizer::createKernelApi(Module &M) { + IRBuilder<> IRB(*C); + + // These will be initialized in insertKmsanPrologue(). + RetvalTLS = nullptr; + RetvalOriginTLS = nullptr; + ParamTLS = nullptr; + ParamOriginTLS = nullptr; + VAArgTLS = nullptr; + VAArgOriginTLS = nullptr; + VAArgOverflowSizeTLS = nullptr; + // OriginTLS is unused in the kernel. + OriginTLS = nullptr; + + // Like __msan_warning(), but takes an origin. + MsanWarning32Fn = M.getOrInsertFunction("__msan_warning_32", IRB.getVoidTy(), + IRB.getInt32Ty()); + // Requests the per-task context state (kmsan_context_state*) from the + // runtime library. + GetContextStateFn = M.getOrInsertFunction( + "__msan_get_context_state", + PointerType::get( + StructType::get(ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), + ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), + ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), + ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */ + IRB.getInt64Ty(), + ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy, + OriginTy), + 0)); + + Type *RetTy = StructType::get( + PointerType::get(IRB.getInt8Ty(), 0), + PointerType::get(IRB.getInt32Ty(), 0)); + + for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) { + std::string name_load = + "__msan_metadata_ptr_for_load_" + std::to_string(size); + std::string name_store = + "__msan_metadata_ptr_for_store_" + std::to_string(size); + MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction( + name_load, RetTy, + PointerType::get(IRB.getInt8Ty(), 0)); + MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction( + name_store, RetTy, + PointerType::get(IRB.getInt8Ty(), 0)); + } + + MsanMetadataPtrForLoadN = M.getOrInsertFunction( + "__msan_metadata_ptr_for_load_n", RetTy, + PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty()); + MsanMetadataPtrForStoreN = M.getOrInsertFunction( + "__msan_metadata_ptr_for_store_n", RetTy, + PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty()); + + // Functions for poisoning and unpoisoning memory. + MsanPoisonAllocaFn = M.getOrInsertFunction( + "__msan_poison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, + IRB.getInt8PtrTy(), IntptrTy); + + MsanUnpoisonFn = M.getOrInsertFunction("__msan_unpoison", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IntptrTy); + + // Functions for loading the shadow for a given memory range into an app + // memory buffer and storing it back to the shadow memory. + MsanLoadArgShadowFn = + M.getOrInsertFunction("__msan_load_arg_shadow", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); + MsanLoadArgOriginFn = + M.getOrInsertFunction("__msan_load_arg_origin", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); + + MsanStoreArgShadowFn = + M.getOrInsertFunction("__msan_store_arg_shadow", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); + + // Functions for loading the origin for a given memory range into an app + // memory buffer and storing it back to the origin memory. + MsanStoreArgShadowOriginFn = + M.getOrInsertFunction("__msan_store_arg_shadow_origin", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy); +} + +void MemorySanitizer::createUserspaceApi(Module &M) { IRBuilder<> IRB(*C); + // Create the callback. // FIXME: this function should have "Cold" calling conv, // which is not yet implemented. @@ -489,6 +625,39 @@ : "__msan_warning_noreturn"; WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy()); + // Create the global TLS variables. + RetvalTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + + RetvalOriginTLS = new GlobalVariable( + M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, + "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); + + ParamTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + + ParamOriginTLS = new GlobalVariable( + M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", + nullptr, GlobalVariable::InitialExecTLSModel); + + VAArgTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + VAArgOverflowSizeTLS = new GlobalVariable( + M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, + "__msan_va_arg_overflow_size_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + OriginTLS = new GlobalVariable( + M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, + "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); + + // Create the _maybe_ functions. for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; AccessSizeIndex++) { unsigned AccessSize = 1 << AccessSizeIndex; @@ -503,12 +672,23 @@ IRB.getInt8PtrTy(), IRB.getInt32Ty()); } + // Functions for stack instrumentation. MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy(), IntptrTy); MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy); +} + +/// \brief Insert extern declaration of runtime-provided functions and globals. +void MemorySanitizer::initializeCallbacks(Module &M) { + // Only do this once. + static bool CallbacksInitialized = false; + if (CallbacksInitialized) + return; + + IRBuilder<> IRB(*C); MsanChainOriginFn = M.getOrInsertFunction( "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty()); MemmoveFn = M.getOrInsertFunction( @@ -521,40 +701,34 @@ "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy); - // Create globals. - RetvalTLS = new GlobalVariable( - M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - RetvalOriginTLS = new GlobalVariable( - M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, - "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); - - ParamTLS = new GlobalVariable( - M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - ParamOriginTLS = new GlobalVariable( - M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", - nullptr, GlobalVariable::InitialExecTLSModel); - - VAArgTLS = new GlobalVariable( - M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - VAArgOverflowSizeTLS = new GlobalVariable( - M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, - "__msan_va_arg_overflow_size_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - OriginTLS = new GlobalVariable( - M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, - "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); + if (!CompileKernel) { + createUserspaceApi(M); + } else { + createKernelApi(M); + } // We insert an empty inline asm after __msan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), StringRef(""), StringRef(""), /*hasSideEffects=*/true); + CallbacksInitialized = true; +} + +Value *MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore, int size) { + Value **Fns = + isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8; + switch (size) { + case 1: + return Fns[0]; + case 2: + return Fns[1]; + case 4: + return Fns[2]; + case 8: + return Fns[3]; + default: + return nullptr; + } } /// \brief Module-level initialization. @@ -693,6 +867,7 @@ ValueMap ShadowMap, OriginMap; std::unique_ptr VAHelper; const TargetLibraryInfo *TLI; + BasicBlock *ActualFnStart; // The following flags disable parts of MSan instrumentation based on // blacklist contents and command-line options. @@ -776,14 +951,14 @@ } } - void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, + void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, Value *MaybeOriginPtr, unsigned Alignment, bool AsCall) { const DataLayout &DL = F.getParent()->getDataLayout(); unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); if (Shadow->getType()->isAggregateType()) { paintOrigin(IRB, updateOrigin(Origin, IRB), - getOriginPtr(Addr, IRB, Alignment), StoreSize, + MaybeOriginPtr ? MaybeOriginPtr : getOriginPtr(Addr, IRB, Alignment), StoreSize, OriginAlignment); } else { Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); @@ -791,7 +966,7 @@ if (ConstantShadow) { if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) paintOrigin(IRB, updateOrigin(Origin, IRB), - getOriginPtr(Addr, IRB, Alignment), StoreSize, + MaybeOriginPtr ? MaybeOriginPtr : getOriginPtr(Addr, IRB, Alignment), StoreSize, OriginAlignment); return; } @@ -813,34 +988,94 @@ Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights); IRBuilder<> IRBNew(CheckTerm); paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), - getOriginPtr(Addr, IRBNew, Alignment), StoreSize, + MaybeOriginPtr ? MaybeOriginPtr : getOriginPtr(Addr, IRBNew, Alignment), StoreSize, OriginAlignment); } } } + void setShadowOriginForStoreKmsanPtrs(Instruction *I, Value *Addr, + Value *Shadow, Value *Origin, unsigned Alignment) { + IRBuilder<> IRB(I); + Type *ShadowTy = Shadow->getType(); + int BitWidth = VectorOrPrimitiveTypeSizeInBits(ShadowTy); + int Size = BitWidth / 8; + unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); + ///unsigned OriginAlignment = 4; // TODO(glider) + // Make sure Size is at least 1 if the operand is i1. + if (Size * 8 < BitWidth) + Size++; + + Value *SizeVal = ConstantInt::get(IRB.getInt64Ty(), Size); + Value *AddrCast = IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0)); + Instruction *ShadowOriginPtrs; + Value *Getter = MS.getKmsanShadowOriginAccessFn(/*isStore*/ true, Size); + if (Getter) { + ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast); + } else { + ShadowOriginPtrs = IRB.CreateCall(MS.MsanMetadataPtrForStoreN, {AddrCast, SizeVal}); + } + Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0); + ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0)); + IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment); + Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1); + storeOrigin(IRB, Addr, Shadow, Origin, OriginPtr, /*StoreAlignment*/OriginAlignment, /*InstrumentWithCalls*/false); + if (ClCheckAccessAddress) + insertShadowCheck(Addr, ShadowOriginPtrs); + + ///paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, Size, OriginAlignment); + ///IRB.CreateStore(Origin, OriginPtr); + } + + void storeShadowOrigin(Instruction *I, Value *Addr, + Value *Shadow, Value *Origin, bool InstrumentWithCalls, int StoreAlignment) { + if (MS.CompileKernel) { + setShadowOriginForStoreKmsanPtrs(I, Addr, Shadow, Origin, StoreAlignment); + } else { + IRBuilder<> IRB(I); + Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); + + StoreInst *NewI = + IRB.CreateAlignedStore(Shadow, ShadowPtr, StoreAlignment); + DEBUG(dbgs() << " STORE: " << *NewI << "\n"); + if (ClCheckAccessAddress) + insertShadowCheck(Addr, NewI); + + // TODO(glider): shouldn't the alignment be 4 here? + if (MS.TrackOrigins && !I->isAtomic()) + storeOrigin(IRB, Addr, Shadow, Origin, /*MaybeOriginPtr*/nullptr, StoreAlignment, InstrumentWithCalls); + } + } + void materializeStores(bool InstrumentWithCalls) { for (StoreInst *SI : StoreList) { IRBuilder<> IRB(SI); Value *Val = SI->getValueOperand(); Value *Addr = SI->getPointerOperand(); Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val); - Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); - - StoreInst *NewSI = - IRB.CreateAlignedStore(Shadow, ShadowPtr, SI->getAlignment()); - DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); - - if (ClCheckAccessAddress) - insertShadowCheck(Addr, NewSI); + Value *Origin = MS.TrackOrigins ? getOrigin(Val) : nullptr; + storeShadowOrigin(SI, Addr, Shadow, Origin, InstrumentWithCalls, SI->getAlignment()); if (SI->isAtomic()) SI->setOrdering(addReleaseOrdering(SI->getOrdering())); + } + } - if (MS.TrackOrigins && !SI->isAtomic()) - storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI->getAlignment(), - InstrumentWithCalls); + /// \brief Helper function to insert a warning at IRB's current insert point. + void insertWarningFn(IRBuilder<> &IRB, Value *Origin) { + if (!Origin) Origin = (Value *)IRB.getInt32(0); + if (!MS.CompileKernel) { + if (MS.TrackOrigins) { + IRB.CreateStore(Origin, MS.OriginTLS); + } + IRB.CreateCall(MS.WarningFn, {}); + } else { + IRB.CreateCall(MS.MsanWarning32Fn, Origin); } + IRB.CreateCall(MS.EmptyAsm, {}); + // FIXME: Insert UnreachableInst if !MS.Recover? + // This may invalidate some of the following checks and needs to be done + // at the very end. } void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, @@ -853,15 +1088,7 @@ Constant *ConstantShadow = dyn_cast_or_null(ConvertedShadow); if (ConstantShadow) { if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) { - if (MS.TrackOrigins) { - IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), - MS.OriginTLS); - } - IRB.CreateCall(MS.WarningFn, {}); - IRB.CreateCall(MS.EmptyAsm, {}); - // FIXME: Insert UnreachableInst if !MS.Recover? - // This may invalidate some of the following checks and needs to be done - // at the very end. + insertWarningFn(IRB, Origin); } return; } @@ -885,12 +1112,7 @@ /* Unreachable */ !MS.Recover, MS.ColdCallWeights); IRB.SetInsertPoint(CheckTerm); - if (MS.TrackOrigins) { - IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), - MS.OriginTLS); - } - IRB.CreateCall(MS.WarningFn, {}); - IRB.CreateCall(MS.EmptyAsm, {}); + insertWarningFn(IRB, Origin); DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); } } @@ -905,9 +1127,30 @@ DEBUG(dbgs() << "DONE:\n" << F); } + BasicBlock *insertKmsanPrologue(Function &F) { + BasicBlock *ret = + SplitBlock(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + Value *ContextState = IRB.CreateCall(MS.GetContextStateFn, {}); + Constant *Zero = IRB.getInt32(0); + MS.ParamTLS = IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(0)}, "param_shadow"); + MS.RetvalTLS = IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(1)}, "retval_shadow"); + MS.VAArgTLS = IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(2)}, "va_arg_shadow"); + MS.VAArgOriginTLS = IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(3)}, "va_arg_origin"); + MS.VAArgOverflowSizeTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(4)}, "va_arg_overflow_size"); + MS.ParamOriginTLS = IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(5)}, "param_origin"); + MS.RetvalOriginTLS = IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(6)}, "retval_origin"); + return ret; + } + /// \brief Add MemorySanitizer instrumentation to a function. bool runOnFunction() { MS.initializeCallbacks(*F.getParent()); + if (MS.CompileKernel) + ActualFnStart = insertKmsanPrologue(F); + else + ActualFnStart = &F.getEntryBlock(); // In the presence of unreachable blocks, we may see Phi nodes with // incoming nodes from such blocks. Since InstVisitor skips unreachable @@ -918,7 +1161,7 @@ // Iterate all BBs in depth-first order and create shadow instructions // for all instructions (where applicable). // For PHI nodes we create dummy shadow PHIs which will be finalized later. - for (BasicBlock *BB : depth_first(&F.getEntryBlock())) + for (BasicBlock *BB : depth_first(ActualFnStart)) visit(*BB); // Finalize PHI nodes. @@ -1059,9 +1302,10 @@ Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + if (ArgOffset) + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), - "_msarg"); + "_msarg_s_"); } /// \brief Compute the origin address for a given function argument. @@ -1069,16 +1313,17 @@ int ArgOffset) { if (!MS.TrackOrigins) return nullptr; Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + if (ArgOffset) + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), "_msarg_o"); } /// \brief Compute the shadow address for a retval. Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { - Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); - return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), - "_msret"); + return IRB.CreatePointerCast(MS.RetvalTLS, + PointerType::get(getShadowTy(A), 0), + "_msret"); } /// \brief Compute the origin address for a retval. @@ -1148,6 +1393,50 @@ return Constant::getNullValue(MS.OriginTy); } + /// \brief Unpoison an aligned byte range. + void unpoisonRange(IRBuilder<> &IRB, Value *Addr, unsigned Size, unsigned ArgAlign) { + if (!MS.CompileKernel) { + IRB.CreateMemSet( + getShadowPtr(Addr, IRB.getInt8Ty(), IRB), + Constant::getNullValue(IRB.getInt8Ty()), Size, ArgAlign); + } else { + Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size); + IRB.CreateCall(MS.MsanUnpoisonFn, {Addr, SizeVal}); + } + } + + /// \brief Create a memcpy() call that copies from SrcPtr to Shadow(DstPtr). + /// + /// For KMSAN, also copy the origins. + Value *createMemcpyToLoadArgShadow( + IRBuilder<> &IRB, Value *DstPtr, Value *SrcPtr, Value *Size, int Align) { + if (!MS.CompileKernel) { + Value *DstShadowPtr = + getShadowPtr(DstPtr, IRB.getInt8Ty(), IRB); + return IRB.CreateMemCpy(DstShadowPtr, SrcPtr, Size, Align); + } else { + // The above alignment is unused, because KMSAN runtime doesn't + // make any assumptions about it. + DstPtr = + IRB.CreateIntToPtr(DstPtr, IRB.getInt8PtrTy()); + return IRB.CreateCall(MS.MsanLoadArgShadowFn, {DstPtr, SrcPtr, Size}); + } + } + + Value *createMemcpyToLoadArgOrigin( + IRBuilder<> &IRB, Value *DstPtr, Value *SrcPtr, Value *Size, int Align) { + if (!MS.CompileKernel) { + // Do nothing, not implemented yet (breakes userspace ABI). + return nullptr; + } else { + // The above alignment is unused, because KMSAN runtime doesn't + // make any assumptions about it. + DstPtr = + IRB.CreateIntToPtr(DstPtr, IRB.getInt8PtrTy()); + return IRB.CreateCall(MS.MsanLoadArgOriginFn, {DstPtr, SrcPtr, Size}); + } + } + /// \brief Get the shadow value for a given Value. /// /// This function either returns the value set earlier with setShadow, @@ -1178,7 +1467,7 @@ if (*ShadowPtr) return *ShadowPtr; Function *F = A->getParent(); - IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); + IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI()); unsigned ArgOffset = 0; const DataLayout &DL = F->getParent()->getDataLayout(); for (auto &FArg : F->args()) { @@ -1204,14 +1493,13 @@ } if (Overflow) { // ParamTLS overflow. - EntryIRB.CreateMemSet( - getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), - Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign); + unpoisonRange(EntryIRB, V, Size, ArgAlign); } else { unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); - Value *Cpy = EntryIRB.CreateMemCpy( - getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, - CopyAlign); + Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size); + Base = + EntryIRB.CreatePointerCast(Base, EntryIRB.getInt8PtrTy()); + Value *Cpy = createMemcpyToLoadArgShadow(EntryIRB, V, Base, SizeVal, CopyAlign); DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); (void)Cpy; } @@ -1346,11 +1634,11 @@ InstVisitor::visit(I); } - /// \brief Instrument LoadInst + /// \brief Instrument LoadInst in the userspace. /// /// Loads the corresponding shadow and (optionally) origin. /// Optionally, checks that the load address is fully defined. - void visitLoadInst(LoadInst &I) { + void visitLoadInstUserspace(LoadInst &I) { assert(I.getType()->isSized() && "Load type must have size"); assert(!I.getMetadata("nosanitize")); IRBuilder<> IRB(I.getNextNode()); @@ -1364,12 +1652,6 @@ setShadow(&I, getCleanShadow(&I)); } - if (ClCheckAccessAddress) - insertShadowCheck(I.getPointerOperand(), &I); - - if (I.isAtomic()) - I.setOrdering(addAcquireOrdering(I.getOrdering())); - if (MS.TrackOrigins) { if (PropagateShadow) { unsigned Alignment = I.getAlignment(); @@ -1382,6 +1664,57 @@ } } + /// \brief Instrument LoadInst in the kernel. + void visitLoadInstKmsanPtrs(LoadInst &I) { + assert(I.getType()->isSized() && "Load type must have size"); + IRBuilder<> IRB(I.getNextNode()); + Type *ShadowTy = getShadowTy(&I); + Value *Addr = I.getPointerOperand(); + if (PropagateShadow && !I.getMetadata("nosanitize")) { + int BitWidth = VectorOrPrimitiveTypeSizeInBits(ShadowTy); + int Size = BitWidth / 8; + // Make sure Size is at least 1 if the operand is i1. + if (Size * 8 < BitWidth) + Size++; + Value *SizeVal = ConstantInt::get(IRB.getInt64Ty(), Size); + + Addr = IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0)); + Value *ShadowOriginPtrs; + Value *Getter = MS.getKmsanShadowOriginAccessFn(/*isStore*/ false, Size); + if (Getter) { + ShadowOriginPtrs = IRB.CreateCall(Getter, Addr); + } else { + ShadowOriginPtrs = IRB.CreateCall(MS.MsanMetadataPtrForLoadN, {Addr, SizeVal}); + } + Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0); + ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0)); + Value *Shadow = IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"); + Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1); + Value *Origin = IRB.CreateAlignedLoad(OriginPtr, std::max(kMinOriginAlignment, I.getAlignment())); + + setShadow(&I, Shadow); + setOrigin(&I, Origin); + } else { + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + } + } + + + void visitLoadInst(LoadInst &I) { + if (MS.CompileKernel) { + visitLoadInstKmsanPtrs(I); + } else { + visitLoadInstUserspace(I); + } + + if (ClCheckAccessAddress) + insertShadowCheck(I.getPointerOperand(), &I); + + if (I.isAtomic()) + I.setOrdering(addAcquireOrdering(I.getOrdering())); + } + /// \brief Instrument StoreInst /// /// Stores the corresponding shadow and (optionally) origin. @@ -1395,7 +1728,6 @@ IRBuilder<> IRB(&I); Value *Addr = I.getOperand(0); - Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); @@ -1406,8 +1738,8 @@ if (isa(I)) insertShadowCheck(I.getOperand(1), &I); - IRB.CreateStore(getCleanShadow(&I), ShadowPtr); - + storeShadowOrigin(&I, Addr, getCleanShadow(&I), getCleanOrigin(), + /*InstrumentWithCalls*/false, /*StoreAlignment*/1); setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); } @@ -2016,18 +2348,15 @@ IRBuilder<> IRB(&I); Value* Addr = I.getArgOperand(0); Value *Shadow = getShadow(&I, 1); - Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); + Value *Origin = MS.TrackOrigins ? getOrigin(&I, 1) : nullptr; // We don't know the pointer alignment (could be unaligned SSE store!). // Have to assume to worst case. - IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); + storeShadowOrigin(&I, Addr, Shadow, Origin, /*InstrumentWithCalls*/false, /*StoreAlignment*/1); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); - // FIXME: factor out common code from materializeStores - if (MS.TrackOrigins) - IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1)); return true; } @@ -2723,9 +3052,21 @@ if (ArgOffset + Size > kParamTLSSize) break; unsigned ParamAlignment = CS.getParamAlignment(i); unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); - Store = IRB.CreateMemCpy(ArgShadowBase, - getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), - Size, Alignment); + if (!MS.CompileKernel) { + Store = IRB.CreateMemCpy(ArgShadowBase, + getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), + Size, Alignment); + } else { + // Not using the above alignment, as KMSAN runtime doesn't make any + // assumptions about it. + Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size); + ArgShadowBase = + IRB.CreatePointerCast(ArgShadowBase, IRB.getInt8PtrTy()); + A = IRB.CreatePointerCast(A, IRB.getInt8PtrTy()); + Value *OriginPtr = getOriginPtrForArgument(A, IRB, ArgOffset); + OriginPtr = IRB.CreatePointerCast(OriginPtr, IRB.getInt8PtrTy()); + IRB.CreateCall(MS.MsanStoreArgShadowOriginFn, {ArgShadowBase, OriginPtr, A, SizeVal}); + } } else { Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; @@ -2734,11 +3075,13 @@ Constant *Cst = dyn_cast(ArgShadow); if (Cst && Cst->isNullValue()) ArgIsInitialized = true; } - if (MS.TrackOrigins && !ArgIsInitialized) - IRB.CreateStore(getOrigin(A), - getOriginPtrForArgument(A, IRB, ArgOffset)); - (void)Store; - assert(Size != 0 && Store != nullptr); + if (!MS.CompileKernel) { + if (MS.TrackOrigins && !ArgIsInitialized) + IRB.CreateStore(getOrigin(A), + getOriginPtrForArgument(A, IRB, ArgOffset)); + (void)Store; + assert(Size != 0 && Store != nullptr); + } DEBUG(dbgs() << " Param:" << *Store << "\n"); ArgOffset += alignTo(Size, 8); } @@ -2830,15 +3173,20 @@ "_msphi_o")); } - void visitAllocaInst(AllocaInst &I) { - setShadow(&I, getCleanShadow(&I)); - setOrigin(&I, getCleanOrigin()); - IRBuilder<> IRB(I.getNextNode()); - const DataLayout &DL = F.getParent()->getDataLayout(); - uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType()); - Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize); - if (I.isArrayAllocation()) - Len = IRB.CreateMul(Len, I.getArraySize()); + Value *getLocalVarDescription(AllocaInst &I) { + SmallString<2048> StackDescriptionStorage; + raw_svector_ostream StackDescription(StackDescriptionStorage); + // We create a string with a description of the stack allocation and + // pass it into __msan_set_alloca_origin. + // It will be printed by the run-time if stack-originated UMR is found. + // The first 4 bytes of the string are set to '----' and will be replaced + // by __msan_va_arg_overflow_size_tls at the first call. + StackDescription << "----" << I.getName() << "@" << F.getName(); + return createPrivateNonConstGlobalForString(*F.getParent(), + StackDescription.str()); + } + + void instrumentAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) { if (PoisonStack && ClPoisonStackWithCall) { IRB.CreateCall(MS.MsanPoisonStackFn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len}); @@ -2849,18 +3197,7 @@ } if (PoisonStack && MS.TrackOrigins) { - SmallString<2048> StackDescriptionStorage; - raw_svector_ostream StackDescription(StackDescriptionStorage); - // We create a string with a description of the stack allocation and - // pass it into __msan_set_alloca_origin. - // It will be printed by the run-time if stack-originated UMR is found. - // The first 4 bytes of the string are set to '----' and will be replaced - // by __msan_va_arg_overflow_size_tls at the first call. - StackDescription << "----" << I.getName() << "@" << F.getName(); - Value *Descr = - createPrivateNonConstGlobalForString(*F.getParent(), - StackDescription.str()); - + Value *Descr = getLocalVarDescription(I); IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len, IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), @@ -2868,6 +3205,33 @@ } } + void instrumentAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) { + Value *Descr = getLocalVarDescription(I); + Value *Pc = IRB.CreateCall( + Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), + IRB.getInt32(0)); + IRB.CreateCall(MS.MsanPoisonAllocaFn, + {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len, + IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), + IRB.CreatePointerCast(Pc, MS.IntptrTy)}); + } + + void visitAllocaInst(AllocaInst &I) { + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + IRBuilder<> IRB(I.getNextNode()); + const DataLayout &DL = F.getParent()->getDataLayout(); + uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType()); + Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize); + if (I.isArrayAllocation()) + Len = IRB.CreateMul(Len, I.getArraySize()); + + if (!MS.CompileKernel) + instrumentAllocaUserspace(I, IRB, Len); + else + instrumentAllocaKmsan(I, IRB, Len); + } + void visitSelectInst(SelectInst& I) { IRBuilder<> IRB(&I); // a = select b, c, d @@ -3016,6 +3380,7 @@ MemorySanitizer &MS; MemorySanitizerVisitor &MSV; Value *VAArgTLSCopy = nullptr; + Value *VAArgTLSOriginCopy = nullptr; Value *VAArgOverflowSize = nullptr; SmallVector VAStartInstrumentationList; @@ -3067,8 +3432,15 @@ uint64_t ArgSize = DL.getTypeAllocSize(RealTy); Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); OverflowOffset += alignTo(ArgSize, 8); - IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), - ArgSize, kShadowTLSAlignment); + if (!MS.CompileKernel) { + IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), + ArgSize, kShadowTLSAlignment); + } else { + Value *ArgSizeV = ConstantInt::get(MS.IntptrTy, ArgSize); + Base = IRB.CreatePointerCast(Base, IRB.getInt8PtrTy()); + A = IRB.CreatePointerCast(A, IRB.getInt8PtrTy()); + IRB.CreateCall(MS.MsanStoreArgShadowFn, {Base, A, ArgSizeV}); + } } else { ArgKind AK = classifyArgument(A); if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) @@ -3108,36 +3480,31 @@ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + if (ArgOffset) + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), "_msarg"); } - void visitVAStartInst(VAStartInst &I) override { - if (F.getCallingConv() == CallingConv::Win64) - return; + void unpoisonVAListTagForInst(IntrinsicInst &I) { IRBuilder<> IRB(&I); - VAStartInstrumentationList.push_back(&I); Value *VAListTag = I.getArgOperand(0); - Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); - // Unpoison the whole __va_list_tag. // FIXME: magic ABI constants. - IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), - /* size */24, /* alignment */8, false); + MSV.unpoisonRange(IRB, VAListTag, 24, 8); } - void visitVACopyInst(VACopyInst &I) override { + void visitVAStartInst(VAStartInst &I) { if (F.getCallingConv() == CallingConv::Win64) return; - IRBuilder<> IRB(&I); - Value *VAListTag = I.getArgOperand(0); - Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); + VAStartInstrumentationList.push_back(&I); + unpoisonVAListTagForInst(I); + } - // Unpoison the whole __va_list_tag. - // FIXME: magic ABI constants. - IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), - /* size */24, /* alignment */8, false); + void visitVACopyInst(VACopyInst &I) { + if (F.getCallingConv() == CallingConv::Win64) + return; + unpoisonVAListTagForInst(I); } void finalizeInstrumentation() override { @@ -3146,13 +3513,17 @@ if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); + if (MS.CompileKernel) { + VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); + IRB.CreateMemCpy(VAArgTLSOriginCopy, MS.VAArgOriginTLS, CopySize, 8); + } } // Instrument va_start. @@ -3168,10 +3539,12 @@ ConstantInt::get(MS.IntptrTy, 16)), Type::getInt64PtrTy(*MS.C)); Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); - Value *RegSaveAreaShadowPtr = - MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, - AMD64FpEndOffset, 16); + MSV.createMemcpyToLoadArgShadow(IRB, RegSaveAreaPtr, VAArgTLSCopy, + ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 16); + if (MS.CompileKernel) { + MSV.createMemcpyToLoadArgOrigin(IRB, RegSaveAreaPtr, VAArgTLSOriginCopy, + ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), 16); + } Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( @@ -3179,11 +3552,9 @@ ConstantInt::get(MS.IntptrTy, 8)), Type::getInt64PtrTy(*MS.C)); Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); - Value *OverflowArgAreaShadowPtr = - MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, AMD64FpEndOffset); - IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); + MSV.createMemcpyToLoadArgShadow(IRB, OverflowArgAreaPtr, SrcPtr, VAArgOverflowSize, 16); } } }; @@ -3260,7 +3631,7 @@ void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); @@ -3438,7 +3809,7 @@ if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), @@ -3658,7 +4029,7 @@ void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); Index: test/Instrumentation/MemorySanitizer/msan_basic.ll =================================================================== --- test/Instrumentation/MemorySanitizer/msan_basic.ll +++ test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -1,955 +0,0 @@ -; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s -; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null } - -; Check the presence and the linkage type of __msan_track_origins and -; other interface symbols. -; CHECK-NOT: @__msan_track_origins -; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1 -; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0 -; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}] -; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32 -; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}] -; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}] -; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}] -; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64 -; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32 - - -; Check instrumentation of stores - -define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { -entry: - store i32 %x, i32* %p, align 4 - ret void -} - -; CHECK-LABEL: @Store -; CHECK: load {{.*}} @__msan_param_tls -; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls -; CHECK: store -; CHECK-ORIGINS: icmp -; CHECK-ORIGINS: br i1 -; CHECK-ORIGINS: