diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake --- a/compiler-rt/cmake/config-ix.cmake +++ b/compiler-rt/cmake/config-ix.cmake @@ -324,7 +324,7 @@ else() set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64} ${S390X} ${RISCV64}) endif() -set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X}) +set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X} ${RISCV64}) set(ALL_HWASAN_SUPPORTED_ARCH ${X86_64} ${ARM64}) set(ALL_MEMPROF_SUPPORTED_ARCH ${X86_64}) set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC32} ${PPC64} diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h --- a/compiler-rt/lib/msan/msan.h +++ b/compiler-rt/lib/msan/msan.h @@ -158,6 +158,32 @@ # define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL) # define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL) +#elif SANITIZER_LINUX && SANITIZER_RISCV64 + +// RISCV64 mapping: +// - 0x0000000000-0x0200000000: Program own segments +// - 0x2a00000000-0x3000000000: PIE program segments +// - 0x3a00000000-0x4000000000: libraries segments. +const MappingDesc kMemoryLayout[] = { + {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"}, + {0x000200000000ULL, 0x000a00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x000a00000000ULL, 0x001000000000ULL, MappingDesc::SHADOW, "shadow-3"}, + {0x001000000000ULL, 0x001600000000ULL, MappingDesc::ORIGIN, "origin-3"}, + {0x001600000000ULL, 0x001a00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x001a00000000ULL, 0x002000000000ULL, MappingDesc::SHADOW, "shadow-2"}, + {0x002000000000ULL, 0x002600000000ULL, MappingDesc::ORIGIN, "origin-2"}, + {0x002600000000ULL, 0x002a00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x002a00000000ULL, 0x003000000000ULL, MappingDesc::APP, "app-2"}, + {0x003000000000ULL, 0x003200000000ULL, MappingDesc::SHADOW, "shadow-1"}, + {0x003200000000ULL, 0x003600000000ULL, MappingDesc::INVALID, "invalid"}, + {0x003600000000ULL, 0x003800000000ULL, MappingDesc::ORIGIN, "origin-1"}, + {0x003800000000ULL, 0x003a00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x003a00000000ULL, 0x004000000000ULL, MappingDesc::APP, "app-3"}, +}; + +#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x3000000000ULL) +#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x0600000000ULL) + #elif SANITIZER_LINUX && SANITIZER_PPC64 const MappingDesc kMemoryLayout[] = { {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "low memory"}, diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp --- a/compiler-rt/lib/msan/msan_allocator.cpp +++ b/compiler-rt/lib/msan/msan_allocator.cpp @@ -110,6 +110,20 @@ #elif defined(__aarch64__) static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(Metadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = 20; + using AddressSpaceView = LocalAddressSpaceView; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; +typedef SizeClassAllocator32 PrimaryAllocator; +#elif SANITIZER_RISCV64 +static const uptr kMaxAllowedMallocSize = 2UL << 30; + struct AP32 { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; diff --git a/compiler-rt/test/msan/allocator_mapping.cpp b/compiler-rt/test/msan/allocator_mapping.cpp --- a/compiler-rt/test/msan/allocator_mapping.cpp +++ b/compiler-rt/test/msan/allocator_mapping.cpp @@ -8,7 +8,7 @@ // This test only makes sense for the 64-bit allocator. The 32-bit allocator // does not have a fixed mapping. Exclude platforms that use the 32-bit // allocator. -// UNSUPPORTED: target-is-mips64,target-is-mips64el,aarch64 +// UNSUPPORTED: target-is-mips64,target-is-mips64el,aarch64,riscv64 #include #include diff --git a/compiler-rt/test/msan/mmap.cpp b/compiler-rt/test/msan/mmap.cpp --- a/compiler-rt/test/msan/mmap.cpp +++ b/compiler-rt/test/msan/mmap.cpp @@ -27,6 +27,10 @@ #elif defined(__s390x__) return addr < 0x040000000000ULL || (addr >= 0x440000000000ULL && addr < 0x500000000000); +#elif defined(__riscv) && __riscv_xlen == 64 + return (addr < 0x000200000000ULL) || + (addr >= 0x002a00000000ULL && addr < 0x003000000000ULL) || + (addr >= 0x003a00000000ULL && addr < 0x004000000000ULL); #elif defined(__aarch64__) struct AddrMapping { @@ -62,6 +66,8 @@ // Large enough to quickly exhaust the entire address space. #if defined(__mips64) || defined(__aarch64__) const size_t kMapSize = 0x100000000ULL; +#elif defined(__riscv) && __riscv_xlen == 64 + const size_t kMapSize = 0x80000000ULL; #else const size_t kMapSize = 0x1000000000ULL; #endif diff --git a/compiler-rt/test/msan/mmap_below_shadow.cpp b/compiler-rt/test/msan/mmap_below_shadow.cpp --- a/compiler-rt/test/msan/mmap_below_shadow.cpp +++ b/compiler-rt/test/msan/mmap_below_shadow.cpp @@ -33,6 +33,9 @@ #elif defined (__aarch64__) uintptr_t hint = 0x4f0000000ULL; const uintptr_t app_start = 0x7000000000ULL; +#elif defined(__riscv) && __riscv_xlen == 64 + uintptr_t hint = 0x000900000000ULL; + const uintptr_t app_start = 0x000000000000ULL; #endif uintptr_t p = (uintptr_t)mmap( (void *)hint, 4096, PROT_WRITE, diff --git a/compiler-rt/test/msan/param_tls_limit.cpp b/compiler-rt/test/msan/param_tls_limit.cpp --- a/compiler-rt/test/msan/param_tls_limit.cpp +++ b/compiler-rt/test/msan/param_tls_limit.cpp @@ -8,12 +8,19 @@ // AArch64 fails with: // void f801(S<801>): Assertion `__msan_test_shadow(&s, sizeof(s)) == -1' failed // XFAIL: aarch64 +// // When passing huge structs by value, SystemZ uses pointers, therefore this // test in its present form is unfortunately not applicable. // ABI says: "A struct or union of any other size . Replace such an // argument by a pointer to the object, or to a copy where necessary to enforce // call-by-value semantics." // XFAIL: s390x +// +// RISC-V fails because struct parameters aren't annotated with `byval` by +// Clang. The structs are passed by reference and don't trigger the overflow +// required for the test scenarios. An equivalent test with a large number of +// parameters passes but is extremely verbose. +// XFAIL: riscv #include #include diff --git a/compiler-rt/test/msan/strlen_of_shadow.cpp b/compiler-rt/test/msan/strlen_of_shadow.cpp --- a/compiler-rt/test/msan/strlen_of_shadow.cpp +++ b/compiler-rt/test/msan/strlen_of_shadow.cpp @@ -25,6 +25,8 @@ return (char *)(((uintptr_t)p & ~0xC00000000000ULL) + 0x080000000000ULL); #elif defined(__aarch64__) return (char *)((uintptr_t)p ^ 0x6000000000ULL); +#elif defined(__riscv) && __riscv_xlen == 64 + return (char *)((uintptr_t)p ^ 0x3000000000ULL); #endif } diff --git a/compiler-rt/test/msan/vararg.cpp b/compiler-rt/test/msan/vararg.cpp --- a/compiler-rt/test/msan/vararg.cpp +++ b/compiler-rt/test/msan/vararg.cpp @@ -16,10 +16,11 @@ // Check that shadow and origin are passed through va_args. -// Copying origins on AArch64, MIPS and PowerPC isn't supported yet. +// Copying origins on AArch64, MIPS, PowerPC and RISC-V isn't supported yet. // XFAIL: aarch64 // XFAIL: mips // XFAIL: powerpc64 +// XFAIL: riscv64 #include #include diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -416,6 +416,14 @@ 0x01000000000, // OriginBase }; +// riscv64 Linux +static const MemoryMapParams Linux_RISCV64_MemoryMapParams = { + 0, // AndMask (not used) + 0x03000000000, // XorMask + 0, // ShadowBase (not used) + 0x00600000000, // OriginBase +}; + // i386 FreeBSD static const MemoryMapParams FreeBSD_I386_MemoryMapParams = { 0x000180000000, // AndMask @@ -465,6 +473,11 @@ &Linux_AArch64_MemoryMapParams, }; +static const PlatformMemoryMapParams Linux_RISCV_MemoryMapParams = { + nullptr, + &Linux_RISCV64_MemoryMapParams, +}; + static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = { &FreeBSD_I386_MemoryMapParams, &FreeBSD_X86_64_MemoryMapParams, @@ -505,6 +518,7 @@ friend struct VarArgMIPS64Helper; friend struct VarArgAArch64Helper; friend struct VarArgPowerPC64Helper; + friend struct VarArgRISCV64Helper; friend struct VarArgSystemZHelper; void initializeModule(Module &M); @@ -959,6 +973,9 @@ case Triple::aarch64_be: MapParams = Linux_ARM_MemoryMapParams.bits64; break; + case Triple::riscv64: + MapParams = Linux_RISCV_MemoryMapParams.bits64; + break; default: report_fatal_error("unsupported architecture"); } @@ -5264,6 +5281,147 @@ } }; +struct VarArgRISCV64Helper : public VarArgHelper { + Function &F; + MemorySanitizer &MS; + MemorySanitizerVisitor &MSV; + Value *VAArgTLSCopy = nullptr; + Value *VAArgSize = nullptr; + static constexpr int XSize = 8; // XLEN in bytes. + static constexpr int NumArgRegs = 8; + + SmallVector VAStartInstrumentationList; + + VarArgRISCV64Helper(Function &F, MemorySanitizer &MS, + MemorySanitizerVisitor &MSV) + : F(F), MS(MS), MSV(MSV) {} + + void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override { + const DataLayout &DL = F.getParent()->getDataLayout(); + unsigned NumParams = CB.getFunctionType()->getNumParams(); + unsigned VAArgOffset = 0; + unsigned VAArgSkew = 0; + int ArgRegister = 0; + bool IsSkewed = false; + + // Depending on the fixed params, VAArgOffset zero may point to an address + // that is either (2*XLEN)-aligned or just XLEN-aligned. In other words, it + // may be XLEN-skewed. We need to account for that when computing the + // alignment of the varargs. So, we first compute the skew (in bytes) + // introduced by the fixed params. + for (auto ArgIt = CB.arg_begin(), End = CB.arg_begin() + NumParams; + ArgIt != End; ++ArgIt) { + Value *A = *ArgIt; + Align ArgAlign = DL.getABITypeAlign(A->getType()); + uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); + assert(ArgSize <= 2 * XSize && "Unexpected argument size"); + if (ArgSize == 2 * XSize) { + if (ArgRegister == NumArgRegs - 1) + IsSkewed = true; + else if (ArgRegister >= NumArgRegs && ArgAlign >= Align(2 * XSize)) + IsSkewed = false; + } else { + IsSkewed = !IsSkewed; + } + ArgRegister += alignTo(ArgSize, XSize) / XSize; + } + if (IsSkewed) + VAArgSkew = XSize; + for (auto ArgIt = CB.arg_begin() + NumParams, End = CB.arg_end(); + ArgIt != End; ++ArgIt) { + Value *A = *ArgIt; + uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); + assert(ArgSize <= 2 * XSize && "Unexpected argument size"); + Align ArgAlign = DL.getABITypeAlign(A->getType()); + if (ArgAlign < Align(XSize)) + ArgAlign = Align(XSize); + VAArgOffset = alignTo(VAArgOffset, ArgAlign, VAArgSkew); + Value *Base = + getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize); + VAArgOffset += alignTo(ArgSize, XSize); + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); + } + Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset); + // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of + // a new class member i.e. it is the total size of all VarArgs. + IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); + } + + /// Compute the shadow address for a given va_arg. + Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, + unsigned ArgOffset, unsigned ArgSize) { + // Make sure we don't overflow __msan_va_arg_tls. + if (ArgOffset + ArgSize > kParamTLSSize) + return nullptr; + Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), + "_msarg"); + } + + void visitVAStartInst(VAStartInst &I) override { + IRBuilder<> IRB(&I); + VAStartInstrumentationList.push_back(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr, *OriginPtr; + const Align Alignment = Align(XSize); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + /* size */ XSize, Alignment, false); + } + + void visitVACopyInst(VACopyInst &I) override { + IRBuilder<> IRB(&I); + VAStartInstrumentationList.push_back(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr, *OriginPtr; + const Align Alignment = Align(XSize); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + /* size */ XSize, Alignment, false); + } + + void finalizeInstrumentation() override { + assert(!VAArgSize && !VAArgTLSCopy && + "finalizeInstrumentation called twice"); + IRBuilder<> IRB(MSV.FnPrologueEnd); + VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); + Value *CopySize = + IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); + + if (!VAStartInstrumentationList.empty()) { + // If there is a va_start in this function, make a backup copy of + // va_arg_tls somewhere in the function entry block. + VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(XSize), MS.VAArgTLS, Align(XSize), + CopySize); + } + + // Instrument va_start. + // Copy va_list shadow from the backup copy of the TLS contents. + for (size_t I = 0, N = VAStartInstrumentationList.size(); I < N; I++) { + CallInst *OrigInst = VAStartInstrumentationList[I]; + IRBuilder<> IRB(OrigInst->getNextNode()); + Value *VAListTag = OrigInst->getArgOperand(0); + Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C); + Value *RegSaveAreaPtrPtr = + IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + PointerType::get(RegSaveAreaPtrTy, 0)); + Value *RegSaveAreaPtr = + IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr); + Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; + const Align Alignment = Align(XSize); + std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = + MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), + Alignment, /*isStore*/ true); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); + } + } +}; + /// A no-op implementation of VarArgHelper. struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, @@ -5294,6 +5452,8 @@ else if (TargetTriple.getArch() == Triple::ppc64 || TargetTriple.getArch() == Triple::ppc64le) return new VarArgPowerPC64Helper(Func, Msan, Visitor); + else if (TargetTriple.getArch() == Triple::riscv64) + return new VarArgRISCV64Helper(Func, Msan, Visitor); else if (TargetTriple.getArch() == Triple::systemz) return new VarArgSystemZHelper(Func, Msan, Visitor); else