Commit ac354aba authored by Jason Evans's avatar Jason Evans Committed by Sara Golemon

Universally update to jemalloc 4.0.0.

Summary: Universally update to jemalloc 4.0.0.

Update MALLOC_CONF/malloc_conf configuration, and use the
"arena.<i>.purge" mallctl (replaces "arenas.purge").

Reviewed By: @​bmaurer

Differential Revision: D2355602
parent 6dd4bc6a
......@@ -31,7 +31,8 @@ bool usingJEMallocSlow() {
// (!!). http://goo.gl/xpmctm
if (mallocx == nullptr || rallocx == nullptr || xallocx == nullptr
|| sallocx == nullptr || dallocx == nullptr || nallocx == nullptr
|| mallctl == nullptr) {
|| mallctl == nullptr || mallctlnametomib == nullptr
|| mallctlbymib == nullptr) {
return false;
}
......
......@@ -58,7 +58,7 @@ namespace folly {
#pragma GCC system_header
/**
* Declare *allocx() and mallctl() as weak symbols. These will be provided by
* Declare *allocx() and mallctl*() as weak symbols. These will be provided by
* jemalloc if we are using jemalloc, or will be NULL if we are using another
* malloc implementation.
*/
......@@ -76,6 +76,11 @@ extern "C" size_t nallocx(size_t, int)
__attribute__((__weak__));
extern "C" int mallctl(const char*, void*, size_t*, void*, size_t)
__attribute__((__weak__));
extern "C" int mallctlnametomib(const char*, size_t*, size_t*)
__attribute__((__weak__));
extern "C" int mallctlbymib(const size_t*, size_t, void*, size_t*, void*,
size_t)
__attribute__((__weak__));
#include <bits/functexcept.h>
#define FOLLY_HAVE_MALLOC_H 1
......
......@@ -32,6 +32,9 @@ void dallocx(void*, int) __attribute__((__weak__));
size_t nallocx(size_t, int) __attribute__((__weak__));
int mallctl(const char*, void*, size_t*, void*, size_t)
__attribute__((__weak__));
int mallctlnametomib(const char*, size_t*, size_t*) __attribute__((__weak__));
int mallctlbymib(const size_t*, size_t, void*, size_t*, void*, size_t)
__attribute__((__weak__));
#else
extern void* (*mallocx)(size_t, int);
extern void* (*rallocx)(void*, size_t, int);
......@@ -40,6 +43,9 @@ extern size_t (*sallocx)(const void*, int);
extern void (*dallocx)(void*, int);
extern size_t (*nallocx)(size_t, int);
extern int (*mallctl)(const char*, void*, size_t*, void*, size_t);
extern int (*mallctlnametomib)(const char*, size_t*, size_t*);
extern int (*mallctlbymib)(const size_t*, size_t, void*, size_t*, void*,
size_t);
#endif
}
......
......@@ -26,6 +26,9 @@ size_t (*sallocx)(const void*, int) = nullptr;
void (*dallocx)(void*, int) = nullptr;
size_t (*nallocx)(size_t, int) = nullptr;
int (*mallctl)(const char*, void*, size_t*, void*, size_t) = nullptr;
int (*mallctlnametomib)(const char*, size_t*, size_t*) = nullptr;
int (*mallctlbymib)(const size_t*, size_t, void*, size_t*, void*, size_t) =
nullptr;
#endif
}
......@@ -51,8 +51,8 @@ static unsigned mallctlWrapper(const char* cmd, const unsigned* in,
void MemoryIdler::flushLocalMallocCaches() {
if (usingJEMalloc()) {
if (!mallctl) {
FB_LOG_EVERY_MS(ERROR, 10000) << "mallctl weak link failed";
if (!mallctl || !mallctlnametomib || !mallctlbymib) {
FB_LOG_EVERY_MS(ERROR, 10000) << "mallctl* weak link failed";
return;
}
......@@ -69,12 +69,15 @@ void MemoryIdler::flushLocalMallocCaches() {
// purging the arenas is counter-productive. We use the heuristic
// that if narenas <= 2 * num_cpus then we shouldn't do anything here,
// which detects when the narenas has been reduced from the default
unsigned narenas;
unsigned arenaForCurrent;
if (mallctlWrapper("arenas.narenas", nullptr, &narenas) == 0 &&
unsigned narenas, arenaForCurrent;
size_t mib[3];
size_t miblen = 3;
if (mallctlWrapper("opt.narenas", nullptr, &narenas) == 0 &&
narenas > 2 * CacheLocality::system().numCpus &&
mallctlWrapper("thread.arena", nullptr, &arenaForCurrent) == 0) {
(void)mallctlWrapper("arenas.purge", &arenaForCurrent, nullptr);
mallctlWrapper("thread.arena", nullptr, &arenaForCurrent) == 0 &&
mallctlnametomib("arena.0.purge", mib, &miblen) == 0) {
mib[1] = size_t(arenaForCurrent);
mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0);
}
}
}
......
......@@ -269,7 +269,7 @@ TEST(small_vector, BasicGuarantee) {
// Run this with.
// MALLOC_CONF=prof_leak:true
// LD_PRELOAD=${JEMALLOC_PATH}/lib/libjemalloc.so.1
// LD_PRELOAD=${JEMALLOC_PATH}/lib/libjemalloc.so.2
// LD_PRELOAD="$LD_PRELOAD:"${UNWIND_PATH}/lib/libunwind.so.7
TEST(small_vector, leak_test) {
for (int j = 0; j < 1000; ++j) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment