Index: compiler-rt/lib/asan/asan_allocator.h =================================================================== --- compiler-rt/lib/asan/asan_allocator.h +++ compiler-rt/lib/asan/asan_allocator.h @@ -133,11 +133,15 @@ const uptr kAllocatorSize = 0x2000000000ULL; // 128G. typedef VeryCompactSizeClassMap SizeClassMap; # elif defined(__aarch64__) -// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA +// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA // so no need to different values for different VMA. const uptr kAllocatorSpace = 0x10000000000ULL; const uptr kAllocatorSize = 0x10000000000ULL; // 3T. typedef DefaultSizeClassMap SizeClassMap; +# elif defined(__sparc__) +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +typedef DefaultSizeClassMap SizeClassMap; # elif SANITIZER_WINDOWS const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x8000000000ULL; // 500G Index: compiler-rt/lib/asan/asan_globals.cc =================================================================== --- compiler-rt/lib/asan/asan_globals.cc +++ compiler-rt/lib/asan/asan_globals.cc @@ -115,7 +115,11 @@ if (flags()->report_globals >= 2) ReportGlobal(g, "Search"); if (IsAddressNearGlobal(addr, g)) { +#if defined(__GNUC__) && defined(__sparc__) + internal_memcpy (&globals[res], &g, sizeof (Global)); +#else globals[res] = g; +#endif if (reg_sites) reg_sites[res] = FindRegistrationSite(&g); res++; Index: compiler-rt/lib/asan/asan_mapping.h =================================================================== --- compiler-rt/lib/asan/asan_mapping.h +++ compiler-rt/lib/asan/asan_mapping.h @@ -100,6 +100,13 @@ // || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow || // || `[0x00000000000000, 0x0fffffffffffff]` || LowMem || // +// Default Linux/SPARC64 (52-bit VMA) mapping: +// || `[0x8000000000000, 0xfffffffffffff]` || HighMem || +// || `[0x1080000000000, 0x207ffffffffff]` || HighShadow || +// || `[0x0090000000000, 0x107ffffffffff]` || ShadowGap || +// || `[0x0080000000000, 0x008ffffffffff]` || LowShadow || +// || `[0x0000000000000, 0x007ffffffffff]` || LowMem || +// // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || @@ -162,6 +169,7 @@ static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; +static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 @@ -224,6 +232,8 @@ # define SHADOW_OFFSET kDefaultShadowOffset64 # elif defined(__mips64) # define SHADOW_OFFSET kMIPS64_ShadowOffset64 +# elif defined(__sparc__) +# define SHADOW_OFFSET kSPARC64_ShadowOffset64 # elif SANITIZER_WINDOWS64 # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # else @@ -270,6 +280,8 @@ #if SANITIZER_MYRIAD2 #include "asan_mapping_myriad.h" +#elif defined(__sparc__) && SANITIZER_WORDSIZE == 64 +#include "asan_mapping_sparc64.h" #else #define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) Index: compiler-rt/lib/asan/asan_mapping_sparc64.h =================================================================== --- /dev/null +++ compiler-rt/lib/asan/asan_mapping_sparc64.h @@ -0,0 +1,100 @@ +//===-- asan_mapping_sparc64.h -----------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// SPARC64-specific definitions for ASan memory mapping. +//===----------------------------------------------------------------------===// +#ifndef ASAN_MAPPING_SPARC64_H +#define ASAN_MAPPING_SPARC64_H + +// This is tailored to the 52-bit VM layout on SPARC-T4 and later. +// The VM space is split into two 51-bit halves at both ends: the low part +// has all the bits above the 51st cleared, while the high part has them set. +// 0xfff8000000000000 - 0xffffffffffffffff +// 0x0000000000000000 - 0x0007ffffffffffff + +#define VMA_BITS 52 +#define HIGH_BITS (64 - VMA_BITS) + +// The idea is to chop the high bits before doing the scaling, so the two +// parts become contiguous again and the usual scheme can be applied. + +#define MEM_TO_SHADOW(mem) \ + ((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET)) + +#define kLowMemBeg 0 +#define kLowMemEnd (SHADOW_OFFSET - 1) + +#define kLowShadowBeg SHADOW_OFFSET +#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) + +// But of course there is the huge hole between the high shadow memory, +// which is in the low part, and the beginning of the high part. + +#define kHighMemBeg (-(1ULL << (VMA_BITS - 1))) + +#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) +#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) + +#define kMidShadowBeg 0 +#define kMidShadowEnd 0 + +// With the zero shadow base we can not actually map pages starting from 0. +// This constant is somewhat arbitrary. +#define kZeroBaseShadowStart 0 +#define kZeroBaseMaxShadowStart (1 << 18) + +#define kShadowGapBeg (kLowShadowEnd + 1) +#define kShadowGapEnd (kHighShadowBeg - 1) + +#define kShadowGap2Beg 0 +#define kShadowGap2End 0 + +#define kShadowGap3Beg 0 +#define kShadowGap3End 0 + +namespace __asan { + +static inline bool AddrIsInLowMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return a <= kLowMemEnd; +} + +static inline bool AddrIsInLowShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return a >= kLowShadowBeg && a <= kLowShadowEnd; +} + +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInHighMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd; +} + +static inline bool AddrIsInHighShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd; +} + +static inline bool AddrIsInShadowGap(uptr a) { + PROFILE_ASAN_MAPPING(); + return a >= kShadowGapBeg && a <= kShadowGapEnd; +} + +} // namespace __asan + +#endif // ASAN_MAPPING_SPARC64_H Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -39,7 +39,9 @@ using AddressSpaceView = LocalAddressSpaceView; using ByteMap = __sanitizer::ByteMap; typedef NoOpMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = 0; + static const uptr kFlags = + SANITIZER_SIGN_EXTENDED_ADDRESSES ? + SizeClassAllocator32FlagMasks::kSignExtendedAddresses : 0; }; typedef SizeClassAllocator32 PrimaryInternalAllocator; Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -41,6 +41,7 @@ enum { kRandomShuffleChunks = 1, kUseSeparateSizeClassForBatch = 2, + kSignExtendedAddresses = 4, }; }; @@ -64,6 +65,9 @@ SizeClassAllocator32FlagMasks::kRandomShuffleChunks; static const bool kUseSeparateSizeClassForBatch = Params::kFlags & SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; + static const bool kSignExtendedAddresses = Params::kFlags & + SizeClassAllocator32FlagMasks::kSignExtendedAddresses; + COMPILER_CHECK(!(kSignExtendedAddresses && (kSpaceSize & (kSpaceSize - 1)))); struct TransferBatch { static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2; @@ -181,6 +185,8 @@ bool PointerIsMine(const void *p) { uptr mem = reinterpret_cast(p); + if (kSignExtendedAddresses) + mem &= (kSpaceSize - 1); if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) return false; return GetSizeClass(p) != 0; @@ -273,6 +279,8 @@ COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0); uptr ComputeRegionId(uptr mem) { + if (kSignExtendedAddresses) + mem &= (kSpaceSize - 1); const uptr res = mem >> kRegionSizeLog; CHECK_LT(res, kNumPossibleRegions); return res; Index: compiler-rt/lib/sanitizer_common/sanitizer_linux.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_linux.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_linux.cc @@ -1054,6 +1054,8 @@ return (1ULL << 40) - 1; // 0x000000ffffffffffUL; # elif defined(__s390x__) return (1ULL << 53) - 1; // 0x001fffffffffffffUL; +# elif defined(__sparc__) + return ~(uptr)0; # else return (1ULL << 47) - 1; // 0x00007fffffffffffUL; # endif @@ -1845,10 +1847,20 @@ u64 esr; if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; return esr & ESR_ELx_WNR ? WRITE : READ; -#elif SANITIZER_SOLARIS && defined(__sparc__) +#elif defined(__sparc__) // Decode the instruction to determine the access type. // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype). +# if SANITIZER_SOLARIS uptr pc = ucontext->uc_mcontext.gregs[REG_PC]; +# else + // Historical BSDism here. + struct sigcontext *scontext = (struct sigcontext *)context; +# if defined(__arch64__) + uptr pc = scontext->sigc_regs.tpc; +# else + uptr pc = scontext->si_regs.pc; +# endif +# endif u32 instr = *(u32 *)pc; return (instr >> 21) & 1 ? WRITE: READ; #else @@ -1939,28 +1951,27 @@ // pointer, but GCC always uses r31 when we need a frame pointer. *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; #elif defined(__sparc__) - ucontext_t *ucontext = (ucontext_t*)context; - uptr *stk_ptr; -# if defined(__sparcv9) || defined (__arch64__) -# ifndef MC_PC -# define MC_PC REG_PC -# endif -# ifndef MC_O6 -# define MC_O6 REG_O6 +# if defined(__arch64__) || defined(__sparcv9) +# define STACK_BIAS 2047 +# else +# define STACK_BIAS 0 # endif # if SANITIZER_SOLARIS -# define mc_gregs gregs -# endif - *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; - *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; - stk_ptr = (uptr *) (*sp + 2047); - *bp = stk_ptr[15]; -# else + ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.gregs[REG_PC]; - *sp = ucontext->uc_mcontext.gregs[REG_O6]; - stk_ptr = (uptr *) *sp; - *bp = stk_ptr[15]; + *sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS; +# else + // Historical BSDism here. + struct sigcontext *scontext = (struct sigcontext *)context; +# if defined(__arch64__) + *pc = scontext->sigc_regs.tpc; + *sp = scontext->sigc_regs.u_regs[14] + STACK_BIAS; +# else + *pc = scontext->si_regs.pc; + *sp = scontext->si_regs.u_regs[14]; +# endif # endif + *bp = (uptr) ((uhwptr *) *sp)[14] + STACK_BIAS; #elif defined(__mips__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.pc; Index: compiler-rt/lib/sanitizer_common/sanitizer_platform.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_platform.h +++ compiler-rt/lib/sanitizer_common/sanitizer_platform.h @@ -240,10 +240,21 @@ # else # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) # endif +#elif defined(__sparc__) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52) #else # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) #endif +// Whether the addresses are sign-extended from the VMA range to the word. +// The SPARC64 Linux port implements this to split the VMA space into two +// non-contiguous halves with a huge hole in the middle. +#if defined(__sparc__) && SANITIZER_WORDSIZE == 64 +# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1 +#else +# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0 +#endif + // The AArch64 linux port uses the canonical syscall set as mandated by // the upstream linux community for all new ports. Other ports may still // use legacy syscalls. Index: compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h +++ compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h @@ -18,7 +18,7 @@ static const u32 kStackTraceMax = 256; -#if defined(__sparc__) || (SANITIZER_LINUX && defined(__mips__)) +#if SANITIZER_LINUX && defined(__mips__) # define SANITIZER_CAN_FAST_UNWIND 0 #elif SANITIZER_WINDOWS # define SANITIZER_CAN_FAST_UNWIND 0 Index: compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc @@ -17,10 +17,9 @@ namespace __sanitizer { uptr StackTrace::GetNextInstructionPc(uptr pc) { -#if defined(__mips__) +#if defined(__sparc__) || defined(__mips__) return pc + 8; -#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \ - defined(__aarch64__) +#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) return pc + 4; #else return pc + 1; Index: compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc @@ -12,9 +12,13 @@ // Implemention of fast stack unwinding for Sparc. //===----------------------------------------------------------------------===// -// This file is ported to Sparc v8, but it should be easy to port to -// Sparc v9. -#if defined(__sparcv8__) || defined(__sparcv8) || defined(__sparc_v8__) +#if defined(__sparc__) + +#if defined(__arch64__) || defined(__sparcv9) +#define STACK_BIAS 2047 +#else +#define STACK_BIAS 0 +#endif #include "sanitizer_common.h" #include "sanitizer_stacktrace.h" @@ -25,34 +29,59 @@ uptr stack_bottom, u32 max_depth) { const uptr kPageSize = GetPageSizeCached(); CHECK_GE(max_depth, 2); +#if defined(__GNUC__) + // __builtin_return_address returns the address of the call instruction + // on the SPARC and not the return address, so we need to compensate. + trace_buffer[0] = GetNextInstructionPc(pc); +#else trace_buffer[0] = pc; +#endif size = 1; if (stack_top < 4096) return; // Sanity check for stack top. // Flush register windows to memory +#if defined(__sparc_v9__) || defined(__sparcv9__) || defined(__sparcv9) + asm volatile("flushw" ::: "memory"); +#else asm volatile("ta 3" ::: "memory"); - uhwptr *frame = (uhwptr*)bp; +#endif + // On the SPARC, the return address is not in the frame, it is in a + // register. There is no way to access it off of the current frame + // pointer, but it can be accessed off the previous frame pointer by + // reading the value from the register window save area. + uptr prev_bp = GET_CURRENT_FRAME(); + uptr next_bp = prev_bp; + unsigned int i = 0; + while (next_bp != bp && + IsAligned(next_bp, sizeof(uhwptr)) && + i++ < 8) { + prev_bp = next_bp; + next_bp = (uptr) ((uhwptr *) next_bp)[14] + STACK_BIAS; + } + if (next_bp == bp) + bp = prev_bp; // Lowest possible address that makes sense as the next frame pointer. // Goes up as we walk the stack. uptr bottom = stack_bottom; // Avoid infinite loop when frame == frame[0] by using frame > prev_frame. - while (IsValidFrame((uptr)frame, stack_top, bottom) && - IsAligned((uptr)frame, sizeof(*frame)) && + while (IsValidFrame(bp, stack_top, bottom) && + IsAligned(bp, sizeof(uhwptr)) && size < max_depth) { - uhwptr pc1 = frame[15]; + uhwptr pc1 = ((uhwptr *)bp)[15]; // Let's assume that any pointer in the 0th page is invalid and // stop unwinding here. If we're adding support for a platform // where this isn't true, we need to reconsider this check. if (pc1 < kPageSize) break; if (pc1 != pc) { - trace_buffer[size++] = (uptr) pc1; + // %o7 contains the address of the call instruction and not the + // return address, so we need to compensate. + trace_buffer[size++] = GetNextInstructionPc((uptr) pc1); } - bottom = (uptr)frame; - frame = (uhwptr*)frame[14]; + bottom = bp; + bp = (uptr) ((uhwptr *) bp)[14] + STACK_BIAS; } } } // namespace __sanitizer -#endif // !defined(__sparcv8__) && !defined(__sparcv8) && - // !defined(__sparc_v8__) +#endif // !defined(__sparc__) Index: compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc @@ -135,7 +135,13 @@ if (to_pop == 0 && size > 1) to_pop = 1; PopStackFrames(to_pop); +#if defined(__GNUC__) && defined(__sparc__) + // __builtin_return_address returns the address of the call instruction + // on the SPARC and not the return address, so we need to compensate. + trace_buffer[0] = GetNextInstructionPc(pc); +#else trace_buffer[0] = pc; +#endif } void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,