Commit 018a9487 authored by Andrii Grynenko's avatar Andrii Grynenko Committed by Facebook GitHub Bot

Template AtomicNotificationQueue by Task and Consumer

Summary: This allows migrating projects that were creating custom NotificationQueues.

Differential Revision: D24580356

fbshipit-source-id: ce9a15c408224b8a471246fde2bd121eab4ae329
parent 2109302c
......@@ -35,22 +35,29 @@ namespace folly {
* Tasks can be added to the queue from any thread. A single EventBase
* thread can be listening to the queue. Tasks are processed in FIFO order.
*/
template <typename Task, typename Consumer>
class AtomicNotificationQueue : private EventBase::LoopCallback,
private EventHandler {
class Task {
public:
Task(Func&& func, std::shared_ptr<RequestContext> rctx)
: func_(std::move(func)), rctx_(std::move(rctx)) {}
void execute() && noexcept;
private:
Func func_;
std::shared_ptr<RequestContext> rctx_;
};
static_assert(
noexcept(std::declval<Consumer>()(std::declval<Task&&>())),
"Consumer::operator()(Task&&) should be noexcept.");
class AtomicQueue;
class Queue {
public:
struct Node {
Task task;
std::shared_ptr<RequestContext> rctx{RequestContext::saveContext()};
private:
friend class AtomicNotificationQueue::AtomicQueue;
friend class Queue;
template <typename T>
explicit Node(T&& t) : task(std::forward<T>(t)) {}
Node* next{};
};
Queue() {}
Queue(Queue&& other) noexcept;
Queue& operator=(Queue&& other) noexcept;
......@@ -60,23 +67,12 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
ssize_t size() const;
Task& front();
Node& front();
void pop();
void clear();
struct Node {
private:
friend class AtomicNotificationQueue::AtomicQueue;
friend class Queue;
explicit Node(Task&& t) : value(std::move(t)) {}
Task value;
Node* next{};
};
private:
friend class AtomicNotificationQueue::AtomicQueue;
......@@ -139,7 +135,8 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
* Pushes a task into the queue. Returns true iff the queue was armed.
* Can be called from any thread.
*/
bool push(Task&& value);
template <typename T>
bool push(T&& value);
/*
* Returns true if the queue has tasks.
......@@ -183,7 +180,8 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
}
private:
alignas(folly::cacheline_align_v) std::atomic<Queue::Node*> head_{};
alignas(
folly::cacheline_align_v) std::atomic<typename Queue::Node*> head_{};
std::atomic<ssize_t> pushCount_{0};
alignas(folly::cacheline_align_v) ssize_t successfulArmCount_{0};
ssize_t consumerDisarmCount_{0};
......@@ -191,7 +189,7 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
};
public:
AtomicNotificationQueue();
explicit AtomicNotificationQueue(Consumer&& consumer = Consumer());
~AtomicNotificationQueue() override;
......@@ -205,7 +203,7 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
* Returns the number of tasks in the queue.
* Can be called from any thread.
*/
int32_t size() const;
size_t size() const;
/*
* Checks if the queue is empty.
......@@ -217,7 +215,8 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
* Adds a task into the queue.
* Can be called from any thread.
*/
void putMessage(Func&& func);
template <typename T>
void putMessage(T&& task);
/*
* Detaches the queue from an EventBase.
......@@ -319,6 +318,9 @@ class AtomicNotificationQueue : private EventBase::LoopCallback,
ssize_t writesObserved_{0};
ssize_t writesLocal_{0};
const pid_t pid_;
Consumer consumer_;
};
} // namespace folly
#include <folly/io/async/AtomicNotificationQueue-inl.h>
......@@ -26,6 +26,7 @@
#include <mutex>
#include <thread>
#include <folly/ExceptionString.h>
#include <folly/Memory.h>
#include <folly/String.h>
#include <folly/io/async/AtomicNotificationQueue.h>
......@@ -132,6 +133,14 @@ EventBaseBackend::~EventBaseBackend() {
namespace folly {
class EventBase::FuncRunner {
public:
void operator()(Func&& func) noexcept {
func();
func = nullptr;
}
};
/*
* EventBase methods
*/
......@@ -680,7 +689,7 @@ bool EventBase::runLoopCallbacks() {
void EventBase::initNotificationQueue() {
// Infinite size queue
queue_ = std::make_unique<AtomicNotificationQueue>();
queue_ = std::make_unique<AtomicNotificationQueue<Func, FuncRunner>>();
// Mark this as an internal event, so event_base_loop() will return if
// there are no other events besides this one installed.
......
......@@ -55,6 +55,7 @@ class EventBaseBackendBase;
using Cob = Func; // defined in folly/Executor.h
template <typename Task, typename Consumer>
class AtomicNotificationQueue;
template <typename MessageT>
class NotificationQueue;
......@@ -853,6 +854,8 @@ class EventBase : public TimeoutManager,
}
private:
class FuncRunner;
folly::VirtualEventBase* tryGetVirtualEventBase();
void applyLoopKeepAlive();
......@@ -900,7 +903,7 @@ class EventBase : public TimeoutManager,
// A notification queue for runInEventBaseThread() to use
// to send function requests to the EventBase thread.
std::unique_ptr<AtomicNotificationQueue> queue_;
std::unique_ptr<AtomicNotificationQueue<Func, FuncRunner>> queue_;
ssize_t loopKeepAliveCount_{0};
std::atomic<ssize_t> loopKeepAliveCountAtomic_{0};
bool loopKeepAliveActive_{false};
......
......@@ -29,30 +29,31 @@ static size_t constexpr kMaxRead = 20;
static size_t constexpr kProducerWarmup = 1000;
static size_t constexpr kBusyLoopSize = 0;
using Task = std::pair<Func, std::shared_ptr<RequestContext>>;
struct FuncRunner {
void operator()(Func&& message) noexcept {
message();
message = nullptr;
}
};
class MockConsumer : public NotificationQueue<Task>::Consumer {
class MockConsumer : public NotificationQueue<Func>::Consumer {
public:
void messageAvailable(Task&& message) noexcept override {
RequestContextScopeGuard rctx(std::move(message.second));
message.first();
void messageAvailable(Func&& message) noexcept override {
funcRunner_(std::move(message));
}
};
void putMessageHelper(NotificationQueue<Task>& q, Func f) {
q.putMessage(Task(std::move(f), RequestContext::saveContext()));
}
private:
FuncRunner funcRunner_;
};
struct AtomicNotificationQueueConsumerAdaptor {
void startConsuming(EventBase* evb, AtomicNotificationQueue* queue) {
void startConsuming(
EventBase* evb,
AtomicNotificationQueue<Func, FuncRunner>* queue) {
queue->startConsuming(evb);
}
};
void putMessageHelper(AtomicNotificationQueue& q, Func f) {
q.putMessage(std::move(f));
}
static void burn(size_t n) {
for (size_t i = 0; i < n; ++i) {
folly::doNotOptimizeAway(i);
......@@ -107,7 +108,7 @@ void multiProducerMultiConsumer(
producersWarmedUp.fetch_add(1, std::memory_order_relaxed) + 1) {
warmUpBaton.post();
}
putMessageHelper(queue, [&itemsToProcess, &finishedBaton]() {
queue.putMessage([&itemsToProcess, &finishedBaton]() {
burn(kBusyLoopSize);
if (itemsToProcess.fetch_sub(1, std::memory_order_relaxed) == 0) {
finishedBaton.post();
......@@ -141,7 +142,7 @@ void multiProducerMultiConsumerNQ(
int iters,
size_t numProducers,
size_t numConsumers) {
multiProducerMultiConsumer<NotificationQueue<Task>, MockConsumer>(
multiProducerMultiConsumer<NotificationQueue<Func>, MockConsumer>(
iters, numProducers, numConsumers);
}
......@@ -151,7 +152,7 @@ void multiProducerMultiConsumerANQ(
size_t numConsumers) {
CHECK(numConsumers == 1);
multiProducerMultiConsumer<
AtomicNotificationQueue,
AtomicNotificationQueue<Func, FuncRunner>,
AtomicNotificationQueueConsumerAdaptor>(
iters, numProducers, numConsumers);
}
......@@ -168,13 +169,13 @@ BENCHMARK(EnqueueBenchmark, n) {
BENCHMARK(DequeueBenchmark, n) {
BenchmarkSuspender suspender;
NotificationQueue<Task> queue;
NotificationQueue<Func> queue;
EventBase base;
MockConsumer consumer;
consumer.setMaxReadAtOnce(kMaxRead);
consumer.startConsumingInternal(&base, &queue);
for (unsigned int i = 0; i < n; ++i) {
putMessageHelper(queue, []() {});
queue.putMessage([]() {});
}
suspender.dismiss();
for (unsigned int i = 0; i <= (n / kMaxRead); ++i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment