diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -154,9 +154,10 @@ for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) { const scudo::uptr Align = 1U << AlignLog; for (scudo::sptr Delta = -32; Delta <= 32; Delta++) { - if (static_cast(1U << SizeLog) + Delta < 0) + if ((1LL << SizeLog) + Delta < 0) continue; - const scudo::uptr Size = (1U << SizeLog) + Delta; + const scudo::uptr Size = + static_cast((1LL << SizeLog) + Delta); void *P = Allocator->allocate(Size, Origin, Align); EXPECT_NE(P, nullptr); EXPECT_TRUE(Allocator->isOwned(P)); @@ -333,7 +334,8 @@ const char Marker = 0xab; memset(P, Marker, ReallocSize); for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) { - const scudo::uptr NewSize = ReallocSize + Delta; + const scudo::uptr NewSize = + static_cast(static_cast(ReallocSize) + Delta); void *NewP = Allocator->reallocate(P, NewSize); EXPECT_EQ(NewP, P); for (scudo::uptr I = 0; I < ReallocSize - 32; I++) @@ -355,11 +357,13 @@ std::vector V; for (scudo::uptr I = 0; I < 64U; I++) V.push_back(Allocator->allocate( - rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin)); + static_cast(std::rand()) % + (TypeParam::Primary::SizeClassMap::MaxSize / 2U), + Origin)); Allocator->disable(); Allocator->iterateOverChunks( 0U, static_cast(SCUDO_MMAP_RANGE_SIZE - 1), - [](uintptr_t Base, size_t Size, void *Arg) { + [](uintptr_t Base, UNUSED size_t Size, void *Arg) { std::vector *V = reinterpret_cast *>(Arg); void *P = reinterpret_cast(Base); EXPECT_NE(std::find(V->begin(), V->end(), P), V->end()); @@ -444,7 +448,9 @@ std::vector V; for (scudo::uptr I = 0; I < 64U; I++) V.push_back(Allocator->allocate( - rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin)); + static_cast(std::rand()) % + (TypeParam::Primary::SizeClassMap::MaxSize / 2U), + Origin)); for (auto P : V) Allocator->deallocate(P, Origin); @@ -463,7 +469,9 @@ std::vector V; for (scudo::uptr I = 0; I < 64U; I++) V.push_back(Allocator->allocate( - rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin)); + static_cast(std::rand()) % + (TypeParam::Primary::SizeClassMap::MaxSize / 2U), + Origin)); for (auto P : V) Allocator->deallocate(P, Origin); @@ -494,7 +502,7 @@ } std::vector> V; for (scudo::uptr I = 0; I < 256U; I++) { - const scudo::uptr Size = std::rand() % 4096U; + const scudo::uptr Size = static_cast(std::rand()) % 4096U; void *P = Allocator->allocate(Size, Origin); // A region could have ran out of memory, resulting in a null P. if (P) @@ -727,17 +735,17 @@ // Regression test: make realloc-in-place happen at the very right end of a // mapped region. - constexpr int nPtrs = 10000; - for (int i = 1; i < 32; ++i) { + constexpr size_t nPtrs = 10000; + for (scudo::uptr i = 1; i < 32; ++i) { scudo::uptr Size = 16 * i - 1; std::vector Ptrs; - for (int i = 0; i < nPtrs; ++i) { + for (size_t i = 0; i < nPtrs; ++i) { void *P = Allocator->allocate(Size, Origin); P = Allocator->reallocate(P, Size + 1); Ptrs.push_back(P); } - for (int i = 0; i < nPtrs; ++i) + for (size_t i = 0; i < nPtrs; ++i) Allocator->deallocate(Ptrs[i], Origin); } } diff --git a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp @@ -76,12 +76,16 @@ } TEST_F(MemtagTest, ExtractTag) { +// The test is already skipped on anything other than 64 bit. But +// compiling on 32 bit leads to warnings/errors, so skip compiling the test. +#if defined(__LP64__) uptr Tags = 0; // Try all value for the top byte and check the tags values are in the // expected range. for (u64 Top = 0; Top < 0x100; ++Top) Tags = Tags | (1u << extractTag(Addr | (Top << 56))); EXPECT_EQ(0xffffull, Tags); +#endif } TEST_F(MemtagDeathTest, AddFixedTag) { @@ -121,10 +125,14 @@ } TEST_F(MemtagTest, SelectRandomTagWithMask) { +// The test is already skipped on anything other than 64 bit. But +// compiling on 32 bit leads to warnings/errors, so skip compiling the test. +#if defined(__LP64__) for (uptr j = 0; j < 32; ++j) { for (uptr i = 0; i < 1000; ++i) EXPECT_NE(j, extractTag(selectRandomTag(Addr, 1ull << j))); } +#endif } TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(LoadStoreTagUnaligned)) { @@ -158,6 +166,9 @@ } TEST_F(MemtagTest, StoreTags) { +// The test is already skipped on anything other than 64 bit. But +// compiling on 32 bit leads to warnings/errors, so skip compiling the test. +#if defined(__LP64__) const uptr MaxTaggedSize = 4 * archMemoryTagGranuleSize(); for (uptr Size = 0; Size <= MaxTaggedSize; ++Size) { uptr NoTagBegin = Addr + archMemoryTagGranuleSize(); @@ -186,6 +197,7 @@ // Reset tags without using StoreTags. MemMap.releasePagesToOS(Addr, BufferSize); } +#endif } } // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -253,7 +253,8 @@ Cache.init(nullptr, Allocator.get()); std::vector> V; for (scudo::uptr I = 0; I < 64U; I++) { - const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize; + const scudo::uptr Size = + static_cast(std::rand()) % Primary::SizeClassMap::MaxSize; const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); void *P = Cache.allocate(ClassId); V.push_back(std::make_pair(ClassId, P)); @@ -300,8 +301,8 @@ Cv.wait(Lock); } for (scudo::uptr I = 0; I < 256U; I++) { - const scudo::uptr Size = - std::rand() % Primary::SizeClassMap::MaxSize / 4; + const scudo::uptr Size = static_cast(std::rand()) % + Primary::SizeClassMap::MaxSize / 4; const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size); void *P = Cache.allocate(ClassId); diff --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp @@ -134,8 +134,9 @@ // Strip trailing '.'-pages before comparing the results as they are not // going to be reported to range_recorder anyway. const char *LastX = strrchr(TestCase, 'x'); - std::string Expected(TestCase, - LastX == nullptr ? 0 : (LastX - TestCase + 1)); + std::string Expected( + TestCase, + LastX == nullptr ? 0U : static_cast(LastX - TestCase + 1)); EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str()); } } diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp @@ -136,10 +136,10 @@ AlignLog++) { const scudo::uptr Align = 1U << AlignLog; for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) { - if (static_cast(1U << SizeLog) + Delta <= 0) + if ((1LL << SizeLog) + Delta <= 0) continue; - const scudo::uptr UserSize = - scudo::roundUp((1U << SizeLog) + Delta, MinAlign); + const scudo::uptr UserSize = scudo::roundUp( + static_cast((1LL << SizeLog) + Delta), MinAlign); const scudo::uptr Size = HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0); void *P = Allocator->allocate(Options, Size, Align); @@ -160,7 +160,8 @@ std::vector V; const scudo::uptr PageSize = scudo::getPageSizeCached(); for (scudo::uptr I = 0; I < 32U; I++) - V.push_back(Allocator->allocate(Options, (std::rand() % 16) * PageSize)); + V.push_back(Allocator->allocate( + Options, (static_cast(std::rand()) % 16U) * PageSize)); auto Lambda = [&V](scudo::uptr Block) { EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast(Block)), V.end()); @@ -215,8 +216,9 @@ } for (scudo::uptr I = 0; I < 128U; I++) { // Deallocate 75% of the blocks. - const bool Deallocate = (rand() & 3) != 0; - void *P = Allocator->allocate(Options, (std::rand() % 16) * PageSize); + const bool Deallocate = (std::rand() & 3) != 0; + void *P = Allocator->allocate( + Options, (static_cast(std::rand()) % 16U) * PageSize); if (Deallocate) Allocator->deallocate(Options, P); else diff --git a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp @@ -38,7 +38,7 @@ void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); } void initCache(CacheT *Cache) { *Cache = {}; } - void commitBack(scudo::TSD *TSD) {} + void commitBack(UNUSED scudo::TSD *TSD) {} TSDRegistryT *getTSDRegistry() { return &TSDRegistry; } void callPostInitCallback() {} diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -267,7 +267,7 @@ TEST(ScudoWrappersCTest, OtherAlloc) { #if HAVE_PVALLOC - const size_t PageSize = sysconf(_SC_PAGESIZE); + const size_t PageSize = static_cast(sysconf(_SC_PAGESIZE)); void *P = pvalloc(Size); EXPECT_NE(P, nullptr); @@ -329,7 +329,7 @@ static uintptr_t BoundaryP; static size_t Count; -static void callback(uintptr_t Base, size_t Size, void *Arg) { +static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) { if (scudo::archSupportsMemoryTagging()) { Base = scudo::untagPointer(Base); BoundaryP = scudo::untagPointer(BoundaryP); @@ -343,7 +343,7 @@ // aligned on a page, then run the malloc_iterate on both the pages that the // block is a boundary for. It must only be seen once by the callback function. TEST(ScudoWrappersCTest, MallocIterateBoundary) { - const size_t PageSize = sysconf(_SC_PAGESIZE); + const size_t PageSize = static_cast(sysconf(_SC_PAGESIZE)); #if SCUDO_ANDROID // Android uses a 16 byte alignment for both 32 bit and 64 bit. const size_t BlockDelta = 16U; @@ -461,7 +461,7 @@ static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER; static bool Ready; -static void *enableMalloc(void *Unused) { +static void *enableMalloc(UNUSED void *Unused) { // Initialize the allocator for this thread. void *P = malloc(Size); EXPECT_NE(P, nullptr); diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cpp @@ -103,7 +103,7 @@ Cv.wait(Lock); } for (size_t I = 0; I < 256U; I++) { - const size_t N = std::rand() % 128U; + const size_t N = static_cast(std::rand()) % 128U; uintptr_t *P = new uintptr_t[N]; if (P) { memset(P, 0x42, sizeof(uintptr_t) * N);