Commit 1e877625 authored by Adam Simpkins's avatar Adam Simpkins Committed by Facebook Github Bot

logging: make IntervalRateLimiter constexpr-constructible

Summary:
This changes `IntervalRateLimiter` to allow it to be `constexpr`-constructible.

We now always initialize the `timestamp_` field to 0.  The very first call to
`check()` will now always call `checkSlow()` which will then initialize
`timestamp_` properly.

This also removes the pure virtual `RateLimiter` interface class.  At the
moment `IntervalRateLimiter` is the only implementation, and all call sites
use this class directly.  We can always add the `RateLimiter` interface back
in the future if we need it later.

Reviewed By: yfeldblum

Differential Revision: D8798167

fbshipit-source-id: 80885a16506a8daa67653bd0a92accae7a973289
parent 16ae8fd4
...@@ -17,12 +17,6 @@ ...@@ -17,12 +17,6 @@
namespace folly { namespace folly {
namespace logging { namespace logging {
IntervalRateLimiter::IntervalRateLimiter(
uint64_t maxPerInterval,
clock::duration interval)
: maxPerInterval_{maxPerInterval},
interval_{interval},
timestamp_{clock::now().time_since_epoch().count()} {}
bool IntervalRateLimiter::checkSlow() { bool IntervalRateLimiter::checkSlow() {
auto ts = timestamp_.load(); auto ts = timestamp_.load();
...@@ -38,6 +32,17 @@ bool IntervalRateLimiter::checkSlow() { ...@@ -38,6 +32,17 @@ bool IntervalRateLimiter::checkSlow() {
return false; return false;
} }
if (ts == 0) {
// If we initialized timestamp_ for the very first time increment count_ by
// one instead of setting it to 0. Our original increment made it roll over
// to 0, so other threads may have already incremented it again and passed
// the check.
auto origCount = count_.fetch_add(1, std::memory_order_acq_rel);
// Check to see if other threads already hit the rate limit cap before we
// finished checkSlow().
return (origCount < maxPerInterval_);
}
// In the future, if we wanted to return the number of dropped events we // In the future, if we wanted to return the number of dropped events we
// could use (count_.exchange(0) - maxPerInterval_) here. // could use (count_.exchange(0) - maxPerInterval_) here.
count_.store(1, std::memory_order_release); count_.store(1, std::memory_order_release);
......
...@@ -24,15 +24,6 @@ ...@@ -24,15 +24,6 @@
namespace folly { namespace folly {
namespace logging { namespace logging {
/**
* An interface for rate limiting checkers.
*/
class RateLimiter {
public:
virtual ~RateLimiter() {}
virtual bool check() = 0;
};
/** /**
* A rate limiter that can rate limit events to N events per M milliseconds. * A rate limiter that can rate limit events to N events per M milliseconds.
* *
...@@ -40,13 +31,16 @@ class RateLimiter { ...@@ -40,13 +31,16 @@ class RateLimiter {
* When messages are being rate limited it is slightly slower, as it has to * When messages are being rate limited it is slightly slower, as it has to
* check the clock each time check() is called in this case. * check the clock each time check() is called in this case.
*/ */
class IntervalRateLimiter : public RateLimiter { class IntervalRateLimiter {
public: public:
using clock = chrono::coarse_steady_clock; using clock = chrono::coarse_steady_clock;
IntervalRateLimiter(uint64_t maxPerInterval, clock::duration interval); constexpr IntervalRateLimiter(
uint64_t maxPerInterval,
clock::duration interval)
: maxPerInterval_{maxPerInterval}, interval_{interval} {}
bool check() override final { bool check() {
auto origCount = count_.fetch_add(1, std::memory_order_acq_rel); auto origCount = count_.fetch_add(1, std::memory_order_acq_rel);
if (origCount < maxPerInterval_) { if (origCount < maxPerInterval_) {
return true; return true;
...@@ -60,11 +54,15 @@ class IntervalRateLimiter : public RateLimiter { ...@@ -60,11 +54,15 @@ class IntervalRateLimiter : public RateLimiter {
const uint64_t maxPerInterval_; const uint64_t maxPerInterval_;
const clock::time_point::duration interval_; const clock::time_point::duration interval_;
std::atomic<uint64_t> count_{0}; // Initialize count_ to the maximum possible value so that the first
// call to check() will call checkSlow() to initialize timestamp_,
// but subsequent calls will hit the fast-path and avoid checkSlow()
std::atomic<uint64_t> count_{std::numeric_limits<uint64_t>::max()};
// Ideally timestamp_ would be a // Ideally timestamp_ would be a
// std::atomic<clock::time_point>, but this does not // std::atomic<clock::time_point>, but this does not
// work since time_point's constructor is not noexcept // work since time_point's constructor is not noexcept
std::atomic<clock::rep> timestamp_; std::atomic<clock::rep> timestamp_{0};
}; };
} // namespace logging } // namespace logging
} // namespace folly } // namespace folly
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
*/ */
#include <chrono> #include <chrono>
#include <condition_variable>
#include <mutex>
#include <string> #include <string>
#include <thread> #include <thread>
...@@ -68,3 +70,52 @@ TEST(RateLimiter, interval1per100ms) { ...@@ -68,3 +70,52 @@ TEST(RateLimiter, interval1per100ms) {
TEST(RateLimiter, interval15per150ms) { TEST(RateLimiter, interval15per150ms) {
intervalTest(15, 150ms); intervalTest(15, 150ms);
} }
TEST(RateLimiter, concurrentThreads) {
constexpr uint64_t maxEvents = 20;
constexpr uint64_t numThreads = 32;
IntervalRateLimiter limiter{20, 10s};
std::atomic<uint32_t> count{0};
std::mutex m;
std::condition_variable cv;
bool go = false;
auto threadMain = [&]() {
// Have each thread wait for go to become true before starting.
// This hopefully gives us the best chance of having all threads start
// at close to the same time.
{
std::unique_lock<std::mutex> lock{m};
cv.wait(lock, [&go] { return go; });
}
for (uint64_t iteration = 0; iteration < maxEvents * 2; ++iteration) {
if (limiter.check()) {
count.fetch_add(1, std::memory_order_relaxed);
}
}
};
// Start the threads
std::vector<std::thread> threads;
threads.reserve(numThreads);
for (uint64_t n = 0; n < numThreads; ++n) {
threads.emplace_back(threadMain);
}
// Set go to true and notify all the threads
{
std::lock_guard<std::mutex> lg(m);
go = true;
}
cv.notify_all();
// Wait for all of the threads
for (auto& thread : threads) {
thread.join();
}
// We should have passed the check exactly maxEvents times
EXPECT_EQ(maxEvents, count.load(std::memory_order_relaxed));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment