Commit 6c7906af authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

XLOG_EVERY_N_THREAD

Summary:
[Folly] `XLOG_EVERY_N_THREAD`.

Implemented with a single per-thread map for all `XLOG_EVERY_N_THREAD` collectively. This approach has better TLS overhad than a `thread_local` counter per `XLOG_EVERY_N_THREAD`, at the cost of integer-keyed map lookups.

While the `thread_local` counter approach is twice as fast as this approach for a noop log-handler, it is expected that the runtime overhead of real log-handlers will make this type of saving immaterial.

Reviewed By: simpkins, luciang

Differential Revision: D15293529

fbshipit-source-id: 7af535a0d329473126a0e4dcffc19d750e8b8b10
parent 8042bf57
......@@ -19,6 +19,7 @@
#include <cstddef>
#include <thread>
#include <boost/preprocessor/repetition/repeat.hpp>
#include <boost/thread/barrier.hpp>
#include <folly/Benchmark.h>
......@@ -114,6 +115,41 @@ BENCHMARK(xlog_every_n_exact, iters) {
XLOG_EVERY_N_EXACT(INFO, FLAGS_every_n, "hi");
});
}
BENCHMARK(xlog_every_n_thread, iters) {
runXlogEveryNBench(iters, [] { //
XLOG_EVERY_N_THREAD(INFO, FLAGS_every_n, "hi");
});
}
#define XLOG_EVERY_N_THREAD_BENCH_REP(z, n, text) \
XLOG_EVERY_N_THREAD(INFO, FLAGS_every_n, "hi");
BENCHMARK(xlog_every_n_thread_1_256, iters) {
runXlogEveryNBench(iters, [] { //
for (auto i = 0; i < 256; ++i) {
BOOST_PP_REPEAT(1, XLOG_EVERY_N_THREAD_BENCH_REP, unused)
}
});
}
BENCHMARK(xlog_every_n_thread_16_16, iters) {
runXlogEveryNBench(iters, [] { //
for (auto i = 0; i < 16; ++i) {
BOOST_PP_REPEAT(16, XLOG_EVERY_N_THREAD_BENCH_REP, unused)
}
});
}
BENCHMARK(xlog_every_n_thread_256_1, iters) {
runXlogEveryNBench(iters, [] { //
for (auto i = 0; i < 1; ++i) {
BOOST_PP_REPEAT(256, XLOG_EVERY_N_THREAD_BENCH_REP, unused)
}
});
}
#undef XLOG_EVERY_N_THREAD_BENCH_REP
} // namespace folly
FOLLY_INIT_LOGGING_CONFIG(".=INFO:default; default=null");
......
......@@ -311,6 +311,23 @@ TEST_F(XlogTest, rateLimiting) {
"msg 49"));
handler->clearMessages();
// Test XLOG_EVERY_N_THREAD
for (size_t n = 0; n < 50; ++n) {
XLOG_EVERY_N_THREAD(DBG1, 7, "msg ", n);
}
EXPECT_THAT(
handler->getMessageValues(),
ElementsAre(
"msg 0",
"msg 7",
"msg 14",
"msg 21",
"msg 28",
"msg 35",
"msg 42",
"msg 49"));
handler->clearMessages();
// Test XLOG_EVERY_MS and XLOG_N_PER_MS
// We test these together to minimize the number of sleep operations.
for (size_t n = 0; n < 10; ++n) {
......@@ -352,6 +369,50 @@ TEST_F(XlogTest, rateLimiting) {
handler->clearMessages();
}
TEST_F(XlogTest, rateLimitingEndOfThread) {
auto handler = make_shared<TestLogHandler>();
LoggerDB::get().getCategory("xlog_test")->addHandler(handler);
LoggerDB::get().setLevel("xlog_test", LogLevel::DBG1);
auto th = std::thread([&] {
auto enqueue = [](int num) {
pthread_key_t key;
pthread_key_create(&key, [](void* obj) {
auto* i = static_cast<int*>(obj);
XLOG_EVERY_N_THREAD(DBG1, 1, "dtor ", *i);
delete i;
});
pthread_setspecific(key, new int(num));
};
enqueue(100);
enqueue(101);
for (size_t n = 0; n < 50; ++n) {
XLOG_EVERY_N_THREAD(DBG1, 7, "msg ", n);
}
enqueue(102);
enqueue(103);
});
th.join();
EXPECT_THAT(
handler->getMessageValues(),
ElementsAreArray({
"msg 0",
"msg 7",
"msg 14",
"msg 21",
"msg 28",
"msg 35",
"msg 42",
"msg 49",
"dtor 100",
"dtor 101",
"dtor 102",
"dtor 103",
}));
handler->clearMessages();
}
TEST_F(XlogTest, getXlogCategoryName) {
EXPECT_EQ("foo.cpp", getXlogCategoryNameForFile("foo.cpp"));
EXPECT_EQ("foo.h", getXlogCategoryNameForFile("foo.h"));
......
......@@ -14,12 +14,46 @@
* limitations under the License.
*/
#include <folly/logging/xlog.h>
#include <type_traits>
#include <unordered_map>
#include <folly/Synchronized.h>
#include <folly/portability/PThread.h>
using folly::StringPiece;
namespace folly {
namespace detail {
size_t& xlogEveryNThreadEntry(void const* const key) {
using Map = std::unordered_map<void const*, size_t>;
static auto pkey = [] {
pthread_key_t k;
pthread_key_create(&k, [](void* arg) {
auto& map = *static_cast<Map**>(arg);
delete map;
// This destructor occurs during some arbitrary stage of thread teardown.
// But some subsequent stage may invoke this function again! In which case
// the map, which has already been deleted, must be recreated and a fresh
// counter returned. Clearing the map pointer here signals that the map
// has been deleted and that the next call to this function in the same
// thread must recreate the map.
map = nullptr;
});
return k;
}();
thread_local Map* map;
if (!map) {
pthread_setspecific(pkey, &map);
map = new Map();
}
return (*map)[key];
}
} // namespace detail
namespace {
/**
* buck copies header files from their original location in the source tree
......
......@@ -156,6 +156,41 @@
}(), \
##__VA_ARGS__)
namespace folly {
namespace detail {
size_t& xlogEveryNThreadEntry(void const* const key);
} // namespace detail
} // namespace folly
/**
* Similar to XLOG(...) except only log a message every @param n
* invocations per thread.
*
* The internal counter is thread-local, avoiding the contention
* which the XLOG_EVERY_N variations which use a global counter
* may suffer. If a given XLOG_EVERY_N or variation expansion is
* encountered concurrently by multiple threads in a hot code
* path and the global counter in the expansion is observed to
* be contended, then switching to XLOG_EVERY_N_THREAD can help.
*
* Every thread that invokes this expansion has a counter for
* this expansion. The internal counters are all stored in a
* single thread-local map to control TLS overhead, at the cost
* of a small runtime performance hit.
*/
#define XLOG_EVERY_N_THREAD(level, n, ...) \
XLOG_IF( \
level, \
[]() FOLLY_EXPORT -> bool { \
static char folly_detail_xlog_key; \
auto& folly_detail_xlog_count = \
::folly::detail::xlogEveryNThreadEntry(&folly_detail_xlog_key); \
return FOLLY_UNLIKELY((folly_detail_xlog_count++ % (n)) == 0); \
}(), \
##__VA_ARGS__)
/**
* Similar to XLOG(...) except only log at most @param count messages
* per @param ms millisecond interval.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment