diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake --- a/compiler-rt/cmake/config-ix.cmake +++ b/compiler-rt/cmake/config-ix.cmake @@ -305,7 +305,7 @@ else() set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64}) endif() -set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) +set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X}) set(ALL_HWASAN_SUPPORTED_ARCH ${X86_64} ${ARM64}) set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}) diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h --- a/compiler-rt/lib/msan/msan.h +++ b/compiler-rt/lib/msan/msan.h @@ -181,6 +181,20 @@ #define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x080000000000ULL) #define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL) +#elif SANITIZER_LINUX && SANITIZER_S390_64 +const MappingDesc kMemoryLayout[] = { + {0x000000000000ULL, 0x040000000000ULL, MappingDesc::APP, "low memory"}, + {0x040000000000ULL, 0x080000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x080000000000ULL, 0x180000000000ULL, MappingDesc::SHADOW, "shadow"}, + {0x180000000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x1C0000000000ULL, 0x2C0000000000ULL, MappingDesc::ORIGIN, "origin"}, + {0x2C0000000000ULL, 0x440000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x440000000000ULL, 0x500000000000ULL, MappingDesc::APP, "high memory"}}; + +#define MEM_TO_SHADOW(mem) \ + ((((uptr)(mem)) & ~0xC00000000000ULL) + 0x080000000000ULL) +#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL) + #elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64 // Low memory: main binary, MAP_32BIT mappings and modules diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp --- a/compiler-rt/lib/msan/msan_allocator.cpp +++ b/compiler-rt/lib/msan/msan_allocator.cpp @@ -92,6 +92,20 @@ using AddressSpaceView = LocalAddressSpaceView; }; +typedef SizeClassAllocator64 PrimaryAllocator; +#elif defined(__s390x__) +static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G + +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = 0x440000000000; + static const uptr kSpaceSize = 0x020000000000; // 2T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = LocalAddressSpaceView; +}; + typedef SizeClassAllocator64 PrimaryAllocator; #elif defined(__aarch64__) static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp --- a/compiler-rt/lib/msan/msan_interceptors.cpp +++ b/compiler-rt/lib/msan/msan_interceptors.cpp @@ -953,7 +953,9 @@ template static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length, int prot, int flags, int fd, OFF64_T offset) { - if (addr && !MEM_IS_APP(addr)) { + SIZE_T rounded_length = RoundUpTo(length, GetPageSize()); + void *end_addr = (char *)addr + rounded_length - 1; + if (addr && (!MEM_IS_APP(addr) || !MEM_IS_APP(end_addr))) { if (flags & map_fixed) { errno = errno_EINVAL; return (void *)-1; @@ -962,7 +964,18 @@ } } void *res = real_mmap(addr, length, prot, flags, fd, offset); - if (res != (void *)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); + if (res != (void *)-1) { + void *end_res = (char *)res + rounded_length - 1; + if (MEM_IS_APP(res) && MEM_IS_APP(end_res)) { + __msan_unpoison(res, rounded_length); + } else { + // Application has attempted to map more memory than is supported by + // MSAN. Act as if we ran out of memory. + internal_munmap(res, length); + errno = errno_ENOMEM; + return (void *)-1; + } + } return res; } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp @@ -15,14 +15,15 @@ #if SANITIZER_LINUX && SANITIZER_S390 -#include "sanitizer_libc.h" -#include "sanitizer_linux.h" - +#include #include #include #include #include +#include "sanitizer_libc.h" +#include "sanitizer_linux.h" + namespace __sanitizer { // --------------- sanitizer_libc.h @@ -122,8 +123,11 @@ // adjust this for their own kernels. struct utsname buf; unsigned int major, minor, patch = 0; + // uname may or may not be intercepted. Make sure we use the libc version. + using Uname = int (*)(struct utsname *); + Uname uname = reinterpret_cast(dlsym(RTLD_NEXT, "uname")); // This should never fail, but just in case... - if (uname(&buf)) + if (uname == nullptr || uname(&buf)) return false; const char *ptr = buf.release; major = internal_simple_strtoll(ptr, &ptr, 10); diff --git a/compiler-rt/test/msan/backtrace.cpp b/compiler-rt/test/msan/backtrace.cpp --- a/compiler-rt/test/msan/backtrace.cpp +++ b/compiler-rt/test/msan/backtrace.cpp @@ -12,8 +12,14 @@ int sz = backtrace(buf, sizeof(buf) / sizeof(*buf)); assert(sz > 0); for (int i = 0; i < sz; ++i) - if (!buf[i]) + if (!buf[i]) { +#if defined(__s390x__) + // backtrace() may return a bogus trailing NULL on s390x. + if (i == sz - 1) + continue; +#endif exit(1); + } char **s = backtrace_symbols(buf, sz); assert(s != 0); for (int i = 0; i < sz; ++i) diff --git a/compiler-rt/test/msan/dso-origin.cpp b/compiler-rt/test/msan/dso-origin.cpp --- a/compiler-rt/test/msan/dso-origin.cpp +++ b/compiler-rt/test/msan/dso-origin.cpp @@ -38,9 +38,10 @@ // CHECK: {{#0 0x.* in my_access .*dso-origin.cpp:}} // CHECK: {{#1 0x.* in main .*dso-origin.cpp:}}[[@LINE-5]] // CHECK: Uninitialized value was created by a heap allocation - // CHECK: {{#0 0x.* in .*malloc}} - // CHECK: {{#1 0x.* in my_alloc .*dso-origin.cpp:}} - // CHECK: {{#2 0x.* in main .*dso-origin.cpp:}}[[@LINE-10]] + // CHECK-FULL-STACK: {{#0 0x.* in .*malloc}} + // CHECK-FULL-STACK: {{#1 0x.* in my_alloc .*dso-origin.cpp:}} + // CHECK-FULL-STACK: {{#2 0x.* in main .*dso-origin.cpp:}}[[@LINE-10]] + // CHECK-SHORT-STACK: {{#0 0x.* in .*malloc}} // CHECK: SUMMARY: MemorySanitizer: use-of-uninitialized-value {{.*dso-origin.cpp:.* my_access}} return 0; } diff --git a/compiler-rt/test/msan/lit.cfg.py b/compiler-rt/test/msan/lit.cfg.py --- a/compiler-rt/test/msan/lit.cfg.py +++ b/compiler-rt/test/msan/lit.cfg.py @@ -41,7 +41,9 @@ # For mips64, mips64el we have forced store_context_size to 1 because these # archs use slow unwinder which is not async signal safe. Therefore we only # check the first frame since store_context size is 1. -if config.host_arch in ['mips64', 'mips64el']: +# On s390x BufferedStackTrace::UnwindFast can reliably provide only the top +# frame. +if config.host_arch in ['mips64', 'mips64el', 's390x']: config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK')) else: config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK')) diff --git a/compiler-rt/test/msan/mmap.cpp b/compiler-rt/test/msan/mmap.cpp --- a/compiler-rt/test/msan/mmap.cpp +++ b/compiler-rt/test/msan/mmap.cpp @@ -24,6 +24,9 @@ addr >= 0xe200000000ULL; #elif defined(__powerpc64__) return addr < 0x000100000000ULL || addr >= 0x300000000000ULL; +#elif defined(__s390x__) + return addr < 0x040000000000ULL || + (addr >= 0x440000000000ULL && addr < 0x500000000000); #elif defined(__aarch64__) struct AddrMapping { diff --git a/compiler-rt/test/msan/mmap_below_shadow.cpp b/compiler-rt/test/msan/mmap_below_shadow.cpp --- a/compiler-rt/test/msan/mmap_below_shadow.cpp +++ b/compiler-rt/test/msan/mmap_below_shadow.cpp @@ -27,6 +27,9 @@ #elif defined (__powerpc64__) uintptr_t hint = 0x2f0000000000ULL; const uintptr_t app_start = 0x300000000000ULL; +#elif defined(__s390x__) + uintptr_t hint = 0x07f000000000ULL; + const uintptr_t app_start = 0x020000000000ULL; #elif defined (__aarch64__) uintptr_t hint = 0x4f0000000ULL; const uintptr_t app_start = 0x7000000000ULL; diff --git a/compiler-rt/test/msan/param_tls_limit.cpp b/compiler-rt/test/msan/param_tls_limit.cpp --- a/compiler-rt/test/msan/param_tls_limit.cpp +++ b/compiler-rt/test/msan/param_tls_limit.cpp @@ -8,6 +8,12 @@ // AArch64 fails with: // void f801(S<801>): Assertion `__msan_test_shadow(&s, sizeof(s)) == -1' failed // XFAIL: aarch64 +// When passing huge structs by value, SystemZ uses pointers, therefore this +// test in its present form is unfortunately not applicable. +// ABI says: "A struct or union of any other size . Replace such an +// argument by a pointer to the object, or to a copy where necessary to enforce +// call-by-value semantics." +// XFAIL: s390x #include #include diff --git a/compiler-rt/test/msan/strlen_of_shadow.cpp b/compiler-rt/test/msan/strlen_of_shadow.cpp --- a/compiler-rt/test/msan/strlen_of_shadow.cpp +++ b/compiler-rt/test/msan/strlen_of_shadow.cpp @@ -21,6 +21,8 @@ #define LINEARIZE_MEM(mem) \ (((uintptr_t)(mem) & ~0x200000000000ULL) ^ 0x100000000000ULL) return (char *)(LINEARIZE_MEM(p) + 0x080000000000ULL); +#elif defined(__s390x__) + return (char *)(((uintptr_t)p & ~0xC00000000000ULL) + 0x080000000000ULL); #elif defined(__aarch64__) return (char *)((uintptr_t)p ^ 0x6000000000ULL); #endif diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -392,6 +392,14 @@ 0x1C0000000000, // OriginBase }; +// s390x Linux +static const MemoryMapParams Linux_S390X_MemoryMapParams = { + 0xC00000000000, // AndMask + 0, // XorMask (not used) + 0x080000000000, // ShadowBase + 0x1C0000000000, // OriginBase +}; + // aarch64 Linux static const MemoryMapParams Linux_AArch64_MemoryMapParams = { 0, // AndMask (not used) @@ -439,6 +447,11 @@ &Linux_PowerPC64_MemoryMapParams, }; +static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = { + nullptr, + &Linux_S390X_MemoryMapParams, +}; + static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = { nullptr, &Linux_AArch64_MemoryMapParams, @@ -484,6 +497,7 @@ friend struct VarArgMIPS64Helper; friend struct VarArgAArch64Helper; friend struct VarArgPowerPC64Helper; + friend struct VarArgSystemZHelper; void initializeModule(Module &M); void initializeCallbacks(Module &M); @@ -796,14 +810,25 @@ AccessSizeIndex++) { unsigned AccessSize = 1 << AccessSizeIndex; std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); + SmallVector, 2> MaybeWarningFnAttrs; + MaybeWarningFnAttrs.push_back(std::make_pair( + AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt))); + MaybeWarningFnAttrs.push_back(std::make_pair( + AttributeList::FirstArgIndex + 1, Attribute::get(*C, Attribute::ZExt))); MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( - FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), - IRB.getInt32Ty()); + FunctionName, AttributeList::get(*C, MaybeWarningFnAttrs), + IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty()); FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); + SmallVector, 2> MaybeStoreOriginFnAttrs; + MaybeStoreOriginFnAttrs.push_back(std::make_pair( + AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt))); + MaybeStoreOriginFnAttrs.push_back(std::make_pair( + AttributeList::FirstArgIndex + 2, Attribute::get(*C, Attribute::ZExt))); MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( - FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), - IRB.getInt8PtrTy(), IRB.getInt32Ty()); + FunctionName, AttributeList::get(*C, MaybeStoreOriginFnAttrs), + IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(), + IRB.getInt32Ty()); } MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( @@ -924,6 +949,9 @@ case Triple::ppc64le: MapParams = Linux_PowerPC_MemoryMapParams.bits64; break; + case Triple::systemz: + MapParams = Linux_S390_MemoryMapParams.bits64; + break; case Triple::aarch64: case Triple::aarch64_be: MapParams = Linux_ARM_MemoryMapParams.bits64; @@ -4600,6 +4628,251 @@ } }; +/// SystemZ-specific implementation of VarArgHelper. +struct VarArgSystemZHelper : public VarArgHelper { + static const unsigned SystemZGpOffset = 16; + static const unsigned SystemZGpEndOffset = 56; + static const unsigned SystemZFpOffset = 128; + static const unsigned SystemZFpEndOffset = 160; + static const unsigned SysteZRegSaveAreaSize = 160; + static const unsigned SystemZOverflowOffset = 160; + static const unsigned SystemZVAListTagSize = 32; + static const unsigned SystemZOverflowArgAreaPtrOffset = 16; + static const unsigned SystemZRegSaveAreaPtrOffset = 24; + + Function &F; + MemorySanitizer &MS; + MemorySanitizerVisitor &MSV; + Value *VAArgTLSCopy = nullptr; + Value *VAArgTLSOriginCopy = nullptr; + Value *VAArgOverflowSize = nullptr; + + SmallVector VAStartInstrumentationList; + + enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; + + VarArgSystemZHelper(Function &F, MemorySanitizer &MS, + MemorySanitizerVisitor &MSV) + : F(F), MS(MS), MSV(MSV) {} + + ArgKind classifyArgument(Value *arg) { + Type *T = arg->getType(); + // T is a SystemZABIInfo output, and there are only a few possibilities + // of what it can be. In particular, single element structs and large + // types have already been taken care of. + if (T->isFloatingPointTy()) + return AK_FloatingPoint; + if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) + return AK_GeneralPurpose; + if (T->isPointerTy()) + return AK_GeneralPurpose; + return AK_Memory; + } + + void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { + unsigned GpOffset = SystemZGpOffset; + unsigned FpOffset = SystemZFpOffset; + unsigned OverflowOffset = SystemZOverflowOffset; + const DataLayout &DL = F.getParent()->getDataLayout(); + for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); + ArgIt != End; ++ArgIt) { + Value *A = *ArgIt; + unsigned ArgNo = CS.getArgumentNo(ArgIt); + bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams(); + // SystemZABIInfo does not produce ByVal parameters. + assert(!CS.paramHasAttr(ArgNo, Attribute::ByVal)); + ArgKind AK = classifyArgument(A); + if (AK == AK_GeneralPurpose && GpOffset >= SystemZGpEndOffset) + AK = AK_Memory; + if (AK == AK_FloatingPoint && FpOffset >= SystemZFpEndOffset) + AK = AK_Memory; + Value *ShadowBase = nullptr, *OriginBase = nullptr; + switch (AK) { + case AK_GeneralPurpose: { + // Always keep track of GpOffset, but store shadow only for varargs. + uint64_t ArgSize = 8; + if (GpOffset + ArgSize <= kParamTLSSize) { + if (!IsFixed) { + uint64_t ArgAllocSize = DL.getTypeAllocSize(A->getType()); + ShadowBase = getShadowPtrForVAArgument( + A->getType(), IRB, GpOffset + (ArgSize - ArgAllocSize)); + if (MS.TrackOrigins) + OriginBase = getOriginPtrForVAArgument( + A->getType(), IRB, GpOffset + (ArgSize - ArgAllocSize)); + } + GpOffset += ArgSize; + } else { + GpOffset = kParamTLSSize; + } + break; + } + case AK_FloatingPoint: { + // Always keep track of FpOffset, but store shadow only for varargs. + uint64_t ArgSize = 8; + if (FpOffset + ArgSize <= kParamTLSSize) { + if (!IsFixed) { + uint64_t ArgAllocSize = DL.getTypeAllocSize(A->getType()); + ShadowBase = getShadowPtrForVAArgument( + A->getType(), IRB, FpOffset + (ArgSize - ArgAllocSize)); + if (MS.TrackOrigins) + OriginBase = getOriginPtrForVAArgument( + A->getType(), IRB, FpOffset + (ArgSize - ArgAllocSize)); + } + FpOffset += ArgSize; + } else { + FpOffset = kParamTLSSize; + } + break; + } + case AK_Memory: { + // Keep track of OverflowOffset and store shadow only for varargs. + uint64_t ArgAllocSize = DL.getTypeAllocSize(A->getType()); + uint64_t ArgSize = alignTo(ArgAllocSize, 8); + if (!IsFixed) { + if (OverflowOffset + ArgSize <= kParamTLSSize) { + ShadowBase = getShadowPtrForVAArgument( + A->getType(), IRB, OverflowOffset + (ArgSize - ArgAllocSize)); + if (MS.TrackOrigins) + OriginBase = getOriginPtrForVAArgument( + A->getType(), IRB, OverflowOffset + (ArgSize - ArgAllocSize)); + OverflowOffset += ArgSize; + } else { + OverflowOffset = kParamTLSSize; + } + } + break; + } + } + if (!ShadowBase) + continue; + Value *Shadow = MSV.getShadow(A); + IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment); + if (MS.TrackOrigins) { + Value *Origin = MSV.getOrigin(A); + unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); + MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize, + std::max(kShadowTLSAlignment, kMinOriginAlignment)); + } + } + Constant *OverflowSize = ConstantInt::get( + IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset); + IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); + } + + Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, + unsigned ArgOffset) { + Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), + "_msarg_va_s"); + } + + Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { + Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), + "_msarg_va_o"); + } + + void unpoisonVAListTagForInst(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr, *OriginPtr; + const Align Alignment = Align(8); + std::tie(ShadowPtr, OriginPtr) = + MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment, + /*isStore*/ true); + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + SystemZVAListTagSize, Alignment, false); + } + + void visitVAStartInst(VAStartInst &I) override { + VAStartInstrumentationList.push_back(&I); + unpoisonVAListTagForInst(I); + } + + void visitVACopyInst(VACopyInst &I) override { unpoisonVAListTagForInst(I); } + + void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) { + Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C); + Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr( + IRB.CreateAdd( + IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)), + PointerType::get(RegSaveAreaPtrTy, 0)); + Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr); + Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; + const Align Alignment = Align(8); + std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = + MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, + /*isStore*/ true); + // TODO(iii): copy only fragments filled by visitCallSite() + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + SysteZRegSaveAreaSize); + if (MS.TrackOrigins) + IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy, + Alignment, SysteZRegSaveAreaSize); + } + + void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) { + Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C); + Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( + IRB.CreateAdd( + IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)), + PointerType::get(OverflowArgAreaPtrTy, 0)); + Value *OverflowArgAreaPtr = + IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr); + Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr; + const Align Alignment = Align(8); + std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) = + MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(), + Alignment, /*isStore*/ true); + Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, + SystemZOverflowOffset); + IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); + if (MS.TrackOrigins) { + SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy, + SystemZOverflowOffset); + IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); + } + } + + void finalizeInstrumentation() override { + assert(!VAArgOverflowSize && !VAArgTLSCopy && + "finalizeInstrumentation called twice"); + if (!VAStartInstrumentationList.empty()) { + // If there is a va_start in this function, make a backup copy of + // va_arg_tls somewhere in the function entry block. + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); + VAArgOverflowSize = + IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); + Value *CopySize = + IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset), + VAArgOverflowSize); + VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); + if (MS.TrackOrigins) { + VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); + IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS, + Align(8), CopySize); + } + } + + // Instrument va_start. + // Copy va_list shadow from the backup copy of the TLS contents. + for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { + CallInst *OrigInst = VAStartInstrumentationList[i]; + IRBuilder<> IRB(OrigInst->getNextNode()); + Value *VAListTag = OrigInst->getArgOperand(0); + copyRegSaveArea(IRB, VAListTag); + copyOverflowArea(IRB, VAListTag); + } + } +}; + /// A no-op implementation of VarArgHelper. struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, @@ -4630,6 +4903,8 @@ else if (TargetTriple.getArch() == Triple::ppc64 || TargetTriple.getArch() == Triple::ppc64le) return new VarArgPowerPC64Helper(Func, Msan, Visitor); + else if (TargetTriple.getArch() == Triple::systemz) + return new VarArgSystemZHelper(Func, Msan, Visitor); else return new VarArgNoOpHelper(Func, Msan, Visitor); }