Commit 1e877625 authored by Adam Simpkins's avatar Adam Simpkins Committed by Facebook Github Bot

logging: make IntervalRateLimiter constexpr-constructible

Summary:
This changes `IntervalRateLimiter` to allow it to be `constexpr`-constructible.

We now always initialize the `timestamp_` field to 0.  The very first call to
`check()` will now always call `checkSlow()` which will then initialize
`timestamp_` properly.

This also removes the pure virtual `RateLimiter` interface class.  At the
moment `IntervalRateLimiter` is the only implementation, and all call sites
use this class directly.  We can always add the `RateLimiter` interface back
in the future if we need it later.

Reviewed By: yfeldblum

Differential Revision: D8798167

fbshipit-source-id: 80885a16506a8daa67653bd0a92accae7a973289
parent 16ae8fd4
......@@ -17,12 +17,6 @@
namespace folly {
namespace logging {
IntervalRateLimiter::IntervalRateLimiter(
uint64_t maxPerInterval,
clock::duration interval)
: maxPerInterval_{maxPerInterval},
interval_{interval},
timestamp_{clock::now().time_since_epoch().count()} {}
bool IntervalRateLimiter::checkSlow() {
auto ts = timestamp_.load();
......@@ -38,6 +32,17 @@ bool IntervalRateLimiter::checkSlow() {
return false;
}
if (ts == 0) {
// If we initialized timestamp_ for the very first time increment count_ by
// one instead of setting it to 0. Our original increment made it roll over
// to 0, so other threads may have already incremented it again and passed
// the check.
auto origCount = count_.fetch_add(1, std::memory_order_acq_rel);
// Check to see if other threads already hit the rate limit cap before we
// finished checkSlow().
return (origCount < maxPerInterval_);
}
// In the future, if we wanted to return the number of dropped events we
// could use (count_.exchange(0) - maxPerInterval_) here.
count_.store(1, std::memory_order_release);
......
......@@ -24,15 +24,6 @@
namespace folly {
namespace logging {
/**
* An interface for rate limiting checkers.
*/
class RateLimiter {
public:
virtual ~RateLimiter() {}
virtual bool check() = 0;
};
/**
* A rate limiter that can rate limit events to N events per M milliseconds.
*
......@@ -40,13 +31,16 @@ class RateLimiter {
* When messages are being rate limited it is slightly slower, as it has to
* check the clock each time check() is called in this case.
*/
class IntervalRateLimiter : public RateLimiter {
class IntervalRateLimiter {
public:
using clock = chrono::coarse_steady_clock;
IntervalRateLimiter(uint64_t maxPerInterval, clock::duration interval);
constexpr IntervalRateLimiter(
uint64_t maxPerInterval,
clock::duration interval)
: maxPerInterval_{maxPerInterval}, interval_{interval} {}
bool check() override final {
bool check() {
auto origCount = count_.fetch_add(1, std::memory_order_acq_rel);
if (origCount < maxPerInterval_) {
return true;
......@@ -60,11 +54,15 @@ class IntervalRateLimiter : public RateLimiter {
const uint64_t maxPerInterval_;
const clock::time_point::duration interval_;
std::atomic<uint64_t> count_{0};
// Initialize count_ to the maximum possible value so that the first
// call to check() will call checkSlow() to initialize timestamp_,
// but subsequent calls will hit the fast-path and avoid checkSlow()
std::atomic<uint64_t> count_{std::numeric_limits<uint64_t>::max()};
// Ideally timestamp_ would be a
// std::atomic<clock::time_point>, but this does not
// work since time_point's constructor is not noexcept
std::atomic<clock::rep> timestamp_;
std::atomic<clock::rep> timestamp_{0};
};
} // namespace logging
} // namespace folly
......@@ -15,6 +15,8 @@
*/
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <string>
#include <thread>
......@@ -68,3 +70,52 @@ TEST(RateLimiter, interval1per100ms) {
TEST(RateLimiter, interval15per150ms) {
intervalTest(15, 150ms);
}
TEST(RateLimiter, concurrentThreads) {
constexpr uint64_t maxEvents = 20;
constexpr uint64_t numThreads = 32;
IntervalRateLimiter limiter{20, 10s};
std::atomic<uint32_t> count{0};
std::mutex m;
std::condition_variable cv;
bool go = false;
auto threadMain = [&]() {
// Have each thread wait for go to become true before starting.
// This hopefully gives us the best chance of having all threads start
// at close to the same time.
{
std::unique_lock<std::mutex> lock{m};
cv.wait(lock, [&go] { return go; });
}
for (uint64_t iteration = 0; iteration < maxEvents * 2; ++iteration) {
if (limiter.check()) {
count.fetch_add(1, std::memory_order_relaxed);
}
}
};
// Start the threads
std::vector<std::thread> threads;
threads.reserve(numThreads);
for (uint64_t n = 0; n < numThreads; ++n) {
threads.emplace_back(threadMain);
}
// Set go to true and notify all the threads
{
std::lock_guard<std::mutex> lg(m);
go = true;
}
cv.notify_all();
// Wait for all of the threads
for (auto& thread : threads) {
thread.join();
}
// We should have passed the check exactly maxEvents times
EXPECT_EQ(maxEvents, count.load(std::memory_order_relaxed));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment