Commit 12361241 authored by Qi Wang's avatar Qi Wang Committed by Facebook Github Bot

MemoryIdler: use mallctl directly for tcache.flush

Summary:
tcache.flush may fail if tcache is disabled.  Avoid using mallctlCall
which throws on error.

Reviewed By: davidtgoldblatt

Differential Revision: D6115419

fbshipit-source-id: 39411c80af08dc7c855efd43297809b749f935bf
parent 5bb0e1da
...@@ -46,7 +46,8 @@ void MemoryIdler::flushLocalMallocCaches() { ...@@ -46,7 +46,8 @@ void MemoryIdler::flushLocalMallocCaches() {
} }
try { try {
mallctlCall("thread.tcache.flush"); // Not using mallctlCall as this will fail if tcache is disabled.
mallctl("thread.tcache.flush", nullptr, nullptr, nullptr, 0);
// By default jemalloc has 4 arenas per cpu, and then assigns each // By default jemalloc has 4 arenas per cpu, and then assigns each
// thread to one of those arenas. This means that in any service // thread to one of those arenas. This means that in any service
...@@ -67,7 +68,7 @@ void MemoryIdler::flushLocalMallocCaches() { ...@@ -67,7 +68,7 @@ void MemoryIdler::flushLocalMallocCaches() {
mallctlRead("thread.arena", &arenaForCurrent); mallctlRead("thread.arena", &arenaForCurrent);
if (narenas > 2 * CacheLocality::system().numCpus && if (narenas > 2 * CacheLocality::system().numCpus &&
mallctlnametomib("arena.0.purge", mib, &miblen) == 0) { mallctlnametomib("arena.0.purge", mib, &miblen) == 0) {
mib[1] = size_t(arenaForCurrent); mib[1] = static_cast<size_t>(arenaForCurrent);
mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0); mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0);
} }
} catch (const std::runtime_error& ex) { } catch (const std::runtime_error& ex) {
...@@ -117,7 +118,7 @@ static void fetchStackLimits() { ...@@ -117,7 +118,7 @@ static void fetchStackLimits() {
assert(rawSize > guardSize); assert(rawSize > guardSize);
// stack goes down, so guard page adds to the base addr // stack goes down, so guard page adds to the base addr
tls_stackLimit = uintptr_t(addr) + guardSize; tls_stackLimit = reinterpret_cast<uintptr_t>(addr) + guardSize;
tls_stackSize = rawSize - guardSize; tls_stackSize = rawSize - guardSize;
assert((tls_stackLimit & (pageSize() - 1)) == 0); assert((tls_stackLimit & (pageSize() - 1)) == 0);
...@@ -125,7 +126,7 @@ static void fetchStackLimits() { ...@@ -125,7 +126,7 @@ static void fetchStackLimits() {
FOLLY_NOINLINE static uintptr_t getStackPtr() { FOLLY_NOINLINE static uintptr_t getStackPtr() {
char marker; char marker;
auto rv = uintptr_t(&marker); auto rv = reinterpret_cast<uintptr_t>(&marker);
return rv; return rv;
} }
...@@ -133,7 +134,7 @@ void MemoryIdler::unmapUnusedStack(size_t retain) { ...@@ -133,7 +134,7 @@ void MemoryIdler::unmapUnusedStack(size_t retain) {
if (tls_stackSize == 0) { if (tls_stackSize == 0) {
fetchStackLimits(); fetchStackLimits();
} }
if (tls_stackSize <= std::max(size_t(1), retain)) { if (tls_stackSize <= std::max(static_cast<size_t>(1), retain)) {
// covers both missing stack info, and impossibly large retain // covers both missing stack info, and impossibly large retain
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment