Commit 5bd275f0 authored by Michael Park's avatar Michael Park Committed by Facebook Github Bot

Introduced `folly/executors/EDFThreadPoolExecutor`.

Summary: This patch introduces an earliest-deadline-first (EDF) executor: `folly/executors/EDFThreadPoolExecutor`.

Reviewed By: interwq

Differential Revision: D13983430

fbshipit-source-id: 0df9bc4a1cdbe9059489d1e1abad88b9e6d17ad9
parent 6eecca09
/*
* Copyright 2019-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <array>
#include <atomic>
#include <chrono>
#include <cstddef>
#include <exception>
#include <limits>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include <folly/ScopeGuard.h>
#include <folly/executors/EDFThreadPoolExecutor.h>
namespace folly {
class EDFThreadPoolExecutor::Task {
public:
explicit Task(Func&& f, int repeat, uint64_t deadline)
: f_(std::move(f)), total_(repeat), deadline_(deadline) {}
explicit Task(std::vector<Func>&& fs, uint64_t deadline)
: fs_(std::move(fs)), total_(fs_.size()), deadline_(deadline) {}
uint64_t getDeadline() const {
return deadline_;
}
bool isDone() const {
return iter_.load(std::memory_order_relaxed) >= total_;
}
int next() {
if (isDone()) {
return -1;
}
int result = iter_.fetch_add(1, std::memory_order_relaxed);
return result < total_ ? result : -1;
}
void run(int i) {
folly::RequestContextScopeGuard guard(context_);
if (f_) {
f_();
if (i >= total_ - 1) {
std::exchange(f_, nullptr);
}
} else {
DCHECK(0 <= i && i < total_);
fs_[i]();
std::exchange(fs_[i], nullptr);
}
}
Func f_;
std::vector<Func> fs_;
std::atomic<int> iter_{0};
int total_;
uint64_t deadline_;
TaskStats stats_;
std::shared_ptr<RequestContext> context_ = RequestContext::saveContext();
std::chrono::steady_clock::time_point enqueueTime_ =
std::chrono::steady_clock::now();
};
class EDFThreadPoolExecutor::TaskQueue {
public:
using TaskPtr = std::shared_ptr<Task>;
// This is not a `Synchronized` because we perform a few "peek" operations.
struct Bucket {
SharedMutex mutex;
struct Compare {
bool operator()(const TaskPtr& lhs, const TaskPtr& rhs) const {
return lhs->getDeadline() > rhs->getDeadline();
}
};
std::priority_queue<TaskPtr, std::vector<TaskPtr>, Compare> tasks;
std::atomic<bool> empty{true};
};
static constexpr std::size_t kNumBuckets = 2 << 5;
explicit TaskQueue()
: buckets_{}, curDeadline_(kLatestDeadline), numItems_(0) {}
void push(TaskPtr task) {
auto deadline = task->getDeadline();
auto& bucket = getBucket(deadline);
{
SharedMutex::WriteHolder guard(&bucket.mutex);
bucket.tasks.push(std::move(task));
bucket.empty.store(bucket.tasks.empty(), std::memory_order_relaxed);
}
numItems_.fetch_add(1, std::memory_order_seq_cst);
// Update current earliest deadline if necessary
uint64_t curDeadline = curDeadline_.load(std::memory_order_relaxed);
do {
if (curDeadline <= deadline) {
break;
}
} while (!curDeadline_.compare_exchange_weak(
curDeadline, deadline, std::memory_order_relaxed));
}
TaskPtr pop() {
bool needDeadlineUpdate = false;
for (;;) {
if (numItems_.load(std::memory_order_seq_cst) == 0) {
return nullptr;
}
auto curDeadline = curDeadline_.load(std::memory_order_relaxed);
auto& bucket = getBucket(curDeadline);
if (needDeadlineUpdate || bucket.empty.load(std::memory_order_relaxed)) {
// Try setting the next earliest deadline. However no need to
// enforce as there might be insertion happening.
// If there is no next deadline, we set deadline to `kLatestDeadline`.
curDeadline_.compare_exchange_weak(
curDeadline,
findNextDeadline(curDeadline),
std::memory_order_relaxed);
needDeadlineUpdate = false;
continue;
}
{
// Fast path. Take bucket reader lock.
SharedMutex::ReadHolder guard(&bucket.mutex);
if (bucket.tasks.empty()) {
continue;
}
const auto& task = bucket.tasks.top();
if (!task->isDone() && task->getDeadline() == curDeadline) {
return task;
}
// If the task is finished already, fall through to remove it.
}
{
// Take the writer lock to clean up the finished task.
SharedMutex::WriteHolder guard(&bucket.mutex);
if (bucket.tasks.empty()) {
continue;
}
const auto& task = bucket.tasks.top();
if (task->isDone()) {
// Current task finished. Remove from the queue.
bucket.tasks.pop();
bucket.empty.store(bucket.tasks.empty(), std::memory_order_relaxed);
numItems_.fetch_sub(1, std::memory_order_seq_cst);
}
}
// We may have finished processing the current task / bucket. Going back
// to the beginning of the loop to find the next bucket.
needDeadlineUpdate = true;
}
}
std::size_t size() const {
return numItems_.load(std::memory_order_seq_cst);
}
private:
Bucket& getBucket(uint64_t deadline) {
return buckets_[deadline % kNumBuckets];
}
uint64_t findNextDeadline(uint64_t prevDeadline) {
auto begin = prevDeadline % kNumBuckets;
uint64_t earliestDeadline = kLatestDeadline;
for (std::size_t i = 0; i < kNumBuckets; ++i) {
auto& bucket = buckets_[(begin + i) % kNumBuckets];
// Peek without locking first.
if (bucket.empty.load(std::memory_order_relaxed)) {
continue;
}
SharedMutex::ReadHolder guard(&bucket.mutex);
auto curDeadline = curDeadline_.load(std::memory_order_relaxed);
if (prevDeadline != curDeadline) {
// Bail out early if something already happened
return curDeadline;
}
// Verify again after locking
if (bucket.tasks.empty()) {
continue;
}
const auto& task = bucket.tasks.top();
auto deadline = task->getDeadline();
if (deadline < earliestDeadline) {
earliestDeadline = deadline;
}
if ((deadline <= prevDeadline) ||
(deadline - prevDeadline < kNumBuckets)) {
// Found the next highest priority, or new tasks were added.
// No need to scan anymore.
break;
}
}
return earliestDeadline;
}
std::array<Bucket, kNumBuckets> buckets_;
std::atomic<uint64_t> curDeadline_;
// All operations performed on `numItems_` explicitly specify memory
// ordering of `std::memory_order_seq_cst`. This is due to `numItems_`
// performing Dekker's algorithm with `numIdleThreads_` prior to consumer
// threads (workers) wait on `sem_`.
std::atomic<std::size_t> numItems_;
};
EDFThreadPoolExecutor::EDFThreadPoolExecutor(
std::size_t numThreads,
std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor(numThreads, numThreads, std::move(threadFactory)),
taskQueue_(std::make_unique<TaskQueue>()) {
setNumThreads(numThreads);
}
EDFThreadPoolExecutor::~EDFThreadPoolExecutor() {
stop();
}
void EDFThreadPoolExecutor::add(Func f) {
add(std::move(f), kLatestDeadline);
}
void EDFThreadPoolExecutor::add(Func f, uint64_t deadline) {
add(std::move(f), 1, deadline);
}
void EDFThreadPoolExecutor::add(Func f, std::size_t total, uint64_t deadline) {
if (UNLIKELY(isJoin_.load(std::memory_order_relaxed) || total == 0)) {
return;
}
taskQueue_->push(std::make_shared<Task>(std::move(f), total, deadline));
auto numIdleThreads = numIdleThreads_.load(std::memory_order_seq_cst);
if (numIdleThreads > 0) {
// If idle threads are available notify them, otherwise all worker threads
// are running and will get around to this task in time.
sem_.post(std::min(total, numIdleThreads));
}
}
void EDFThreadPoolExecutor::add(std::vector<Func> fs, uint64_t deadline) {
if (UNLIKELY(fs.empty())) {
return;
}
auto total = fs.size();
taskQueue_->push(std::make_shared<Task>(std::move(fs), deadline));
auto numIdleThreads = numIdleThreads_.load(std::memory_order_seq_cst);
if (numIdleThreads > 0) {
// If idle threads are available notify them, otherwise all worker threads
// are running and will get around to this task in time.
sem_.post(std::min(total, numIdleThreads));
}
}
folly::Executor::KeepAlive<> EDFThreadPoolExecutor::deadlineExecutor(
uint64_t deadline) {
class DeadlineExecutor : public folly::Executor {
public:
static KeepAlive<> create(
uint64_t deadline,
KeepAlive<EDFThreadPoolExecutor> executor) {
return makeKeepAlive(new DeadlineExecutor(deadline, std::move(executor)));
}
void add(folly::Func f) override {
executor_->add(std::move(f), deadline_);
}
bool keepAliveAcquire() override {
const auto count =
keepAliveCount_.fetch_add(1, std::memory_order_relaxed);
DCHECK_GT(count, 0);
return true;
}
void keepAliveRelease() override {
const auto count =
keepAliveCount_.fetch_sub(1, std::memory_order_acq_rel);
DCHECK_GT(count, 0);
if (count == 1) {
delete this;
}
}
private:
DeadlineExecutor(
uint64_t deadline,
KeepAlive<EDFThreadPoolExecutor> executor)
: deadline_(deadline), executor_(std::move(executor)) {}
std::atomic<size_t> keepAliveCount_{1};
uint64_t deadline_;
KeepAlive<EDFThreadPoolExecutor> executor_;
};
return DeadlineExecutor::create(deadline, getKeepAliveToken(this));
}
void EDFThreadPoolExecutor::threadRun(ThreadPtr thread) {
this->threadPoolHook_.registerThread();
thread->startupBaton.post();
for (;;) {
auto task = take();
// Handle thread stopping
if (UNLIKELY(!task)) {
// Actually remove the thread from the list.
SharedMutex::WriteHolder w{&threadListLock_};
for (auto& o : observers_) {
o->threadStopped(thread.get());
}
threadList_.remove(thread);
stoppedThreads_.add(thread);
return;
}
int iter = task->next();
if (UNLIKELY(iter < 0)) {
// This task is already finished
continue;
}
thread->idle = false;
auto startTime = std::chrono::steady_clock::now();
task->stats_.waitTime = startTime - task->enqueueTime_;
try {
task->run(iter);
} catch (const std::exception& e) {
LOG(ERROR) << "EDFThreadPoolExecutor: func threw unhandled "
<< typeid(e).name() << " exception: " << e.what();
} catch (...) {
LOG(ERROR)
<< "EDFThreadPoolExecutor: func threw unhandled non-exception object";
}
task->stats_.runTime = std::chrono::steady_clock::now() - startTime;
thread->idle = true;
thread->lastActiveTime = std::chrono::steady_clock::now();
thread->taskStatsCallbacks->callbackList.withRLock([&](auto& callbacks) {
*thread->taskStatsCallbacks->inCallback = true;
SCOPE_EXIT {
*thread->taskStatsCallbacks->inCallback = false;
};
try {
for (auto& callback : callbacks) {
callback(task->stats_);
}
} catch (const std::exception& e) {
LOG(ERROR) << "EDFThreadPoolExecutor: task stats callback threw "
"unhandled "
<< typeid(e).name() << " exception: " << e.what();
} catch (...) {
LOG(ERROR) << "EDFThreadPoolExecutor: task stats callback threw "
"unhandled non-exception object";
}
});
}
}
// threadListLock_ is writelocked.
void EDFThreadPoolExecutor::stopThreads(std::size_t numThreads) {
threadsToStop_.fetch_add(numThreads, std::memory_order_relaxed);
sem_.post(numThreads);
}
// threadListLock_ is read (or write) locked.
std::size_t EDFThreadPoolExecutor::getPendingTaskCountImpl() const {
return taskQueue_->size();
}
bool EDFThreadPoolExecutor::shouldStop() {
// in normal cases, only do a read (prevents cache line bounces)
if (threadsToStop_.load(std::memory_order_relaxed) <= 0 ||
isJoin_.load(std::memory_order_relaxed)) {
return false;
}
// modify only if needed
if (threadsToStop_.fetch_sub(1, std::memory_order_relaxed) > 0) {
return true;
} else {
threadsToStop_.fetch_add(1, std::memory_order_relaxed);
return false;
}
}
std::shared_ptr<EDFThreadPoolExecutor::Task> EDFThreadPoolExecutor::take() {
if (UNLIKELY(shouldStop())) {
return nullptr;
}
if (auto task = taskQueue_->pop()) {
return task;
}
if (UNLIKELY(isJoin_.load(std::memory_order_relaxed))) {
return nullptr;
}
// No tasks on the horizon, so go sleep
numIdleThreads_.fetch_add(1, std::memory_order_seq_cst);
SCOPE_EXIT {
numIdleThreads_.fetch_sub(1, std::memory_order_seq_cst);
};
for (;;) {
if (UNLIKELY(shouldStop())) {
return nullptr;
}
if (auto task = taskQueue_->pop()) {
// It's possible to return a finished task here, in which case
// the worker will call this function again.
return task;
}
if (UNLIKELY(isJoin_.load(std::memory_order_relaxed))) {
return nullptr;
}
sem_.wait();
}
}
} // namespace folly
/*
* Copyright 2019-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <cstddef>
#include <memory>
#include <vector>
#include <folly/executors/SoftRealTimeExecutor.h>
#include <folly/executors/ThreadPoolExecutor.h>
#include <folly/synchronization/LifoSem.h>
namespace folly {
/**
* `EDFThreadPoolExecutor` is a `SoftRealTimeExecutor` that implements
* the earliest-deadline-first scheduling policy.
*/
class EDFThreadPoolExecutor : public SoftRealTimeExecutor,
public ThreadPoolExecutor {
public:
class Task;
class TaskQueue;
static constexpr uint64_t kEarliestDeadline = 0;
static constexpr uint64_t kLatestDeadline =
std::numeric_limits<uint64_t>::max();
explicit EDFThreadPoolExecutor(
std::size_t numThreads,
std::shared_ptr<ThreadFactory> threadFactory =
std::make_shared<NamedThreadFactory>("EDFThreadPool"));
~EDFThreadPoolExecutor() override;
using ThreadPoolExecutor::add;
void add(Func f) override;
void add(Func f, uint64_t deadline) override;
void add(Func f, std::size_t total, uint64_t deadline);
void add(std::vector<Func> fs, uint64_t deadline);
folly::Executor::KeepAlive<> deadlineExecutor(uint64_t deadline);
private:
void threadRun(ThreadPtr thread) override;
void stopThreads(std::size_t numThreads) override;
std::size_t getPendingTaskCountImpl() const override;
bool shouldStop();
std::shared_ptr<Task> take();
std::unique_ptr<TaskQueue> taskQueue_;
LifoSem sem_;
std::atomic<int> threadsToStop_{0};
// All operations performed on `numIdleThreads_` explicitly specify memory
// ordering of `std::memory_order_seq_cst`. This is due to `numIdleThreads_`
// performing Dekker's algorithm with `numItems` prior to consumer threads
// (workers) wait on `sem_`.
std::atomic<std::size_t> numIdleThreads_{0};
};
} // namespace folly
/*
* Copyright 2019-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/Executor.h>
namespace folly {
// `SoftRealTimeExecutor` is an executor that performs some priority-based
// scheduling with a deadline assigned to each task. __Soft__ real-time
// means that not every deadline is guaranteed to be met.
class SoftRealTimeExecutor : public virtual Executor {
void add(Func) override = 0;
// Add a task with an assigned abstract deadline.
//
// NOTE: The type of `deadline` was chosen to be an integral rather than
// a typed time point or duration (e.g., `std::chrono::time_point`) to allow
// for flexbility. While the deadline for a task may be a time point,
// it could also be a duration or the size of the task, which emulates
// rate-monotonic scheduling that prioritizes small tasks. It also enables
// for exmaple, tiered scheduling (strictly prioritizing a category of tasks)
// by assigning the high-bit of the deadline.
virtual void add(Func, uint64_t deadline) = 0;
};
} // namespace folly
...@@ -113,6 +113,11 @@ void ThreadPoolExecutor::runTask(const ThreadPtr& thread, Task&& task) { ...@@ -113,6 +113,11 @@ void ThreadPoolExecutor::runTask(const ThreadPtr& thread, Task&& task) {
}); });
} }
void ThreadPoolExecutor::add(Func, std::chrono::milliseconds, Func) {
throw std::runtime_error(
"add() with expiration is not implemented for this Executor");
}
size_t ThreadPoolExecutor::numThreads() const { size_t ThreadPoolExecutor::numThreads() const {
return maxThreads_.load(std::memory_order_relaxed); return maxThreads_.load(std::memory_order_relaxed);
} }
......
...@@ -14,6 +14,11 @@ ...@@ -14,6 +14,11 @@
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <algorithm>
#include <mutex>
#include <queue>
#include <folly/DefaultKeepAliveExecutor.h> #include <folly/DefaultKeepAliveExecutor.h>
#include <folly/Memory.h> #include <folly/Memory.h>
#include <folly/SharedMutex.h> #include <folly/SharedMutex.h>
...@@ -24,10 +29,6 @@ ...@@ -24,10 +29,6 @@
#include <folly/portability/GFlags.h> #include <folly/portability/GFlags.h>
#include <folly/synchronization/Baton.h> #include <folly/synchronization/Baton.h>
#include <algorithm>
#include <mutex>
#include <queue>
#include <glog/logging.h> #include <glog/logging.h>
namespace folly { namespace folly {
...@@ -64,7 +65,7 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor { ...@@ -64,7 +65,7 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor {
void add(Func func) override = 0; void add(Func func) override = 0;
virtual void virtual void
add(Func func, std::chrono::milliseconds expiration, Func expireCallback) = 0; add(Func func, std::chrono::milliseconds expiration, Func expireCallback);
void setThreadFactory(std::shared_ptr<ThreadFactory> threadFactory) { void setThreadFactory(std::shared_ptr<ThreadFactory> threadFactory) {
CHECK(numThreads() == 0); CHECK(numThreads() == 0);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <folly/Exception.h> #include <folly/Exception.h>
#include <folly/VirtualExecutor.h> #include <folly/VirtualExecutor.h>
#include <folly/executors/CPUThreadPoolExecutor.h> #include <folly/executors/CPUThreadPoolExecutor.h>
#include <folly/executors/EDFThreadPoolExecutor.h>
#include <folly/executors/FutureExecutor.h> #include <folly/executors/FutureExecutor.h>
#include <folly/executors/IOThreadPoolExecutor.h> #include <folly/executors/IOThreadPoolExecutor.h>
#include <folly/executors/ThreadPoolExecutor.h> #include <folly/executors/ThreadPoolExecutor.h>
...@@ -50,10 +51,14 @@ TEST(ThreadPoolExecutorTest, CPUBasic) { ...@@ -50,10 +51,14 @@ TEST(ThreadPoolExecutorTest, CPUBasic) {
basic<CPUThreadPoolExecutor>(); basic<CPUThreadPoolExecutor>();
} }
TEST(IOThreadPoolExecutorTest, IOBasic) { TEST(ThreadPoolExecutorTest, IOBasic) {
basic<IOThreadPoolExecutor>(); basic<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFBasic) {
basic<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void resize() { static void resize() {
TPE tpe(100); TPE tpe(100);
...@@ -72,6 +77,10 @@ TEST(ThreadPoolExecutorTest, IOResize) { ...@@ -72,6 +77,10 @@ TEST(ThreadPoolExecutorTest, IOResize) {
resize<IOThreadPoolExecutor>(); resize<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFResize) {
resize<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void stop() { static void stop() {
TPE tpe(1); TPE tpe(1);
...@@ -113,6 +122,10 @@ TEST(ThreadPoolExecutorTest, IOStop) { ...@@ -113,6 +122,10 @@ TEST(ThreadPoolExecutorTest, IOStop) {
stop<IOThreadPoolExecutor>(); stop<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFStop) {
stop<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void join() { static void join() {
TPE tpe(10); TPE tpe(10);
...@@ -136,6 +149,10 @@ TEST(ThreadPoolExecutorTest, IOJoin) { ...@@ -136,6 +149,10 @@ TEST(ThreadPoolExecutorTest, IOJoin) {
join<IOThreadPoolExecutor>(); join<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFJoin) {
join<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void destroy() { static void destroy() {
TPE tpe(1); TPE tpe(1);
...@@ -177,6 +194,10 @@ TEST(ThreadPoolExecutorTest, IODestroy) { ...@@ -177,6 +194,10 @@ TEST(ThreadPoolExecutorTest, IODestroy) {
destroy<IOThreadPoolExecutor>(); destroy<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFDestroy) {
destroy<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void resizeUnderLoad() { static void resizeUnderLoad() {
TPE tpe(10); TPE tpe(10);
...@@ -202,6 +223,10 @@ TEST(ThreadPoolExecutorTest, IOResizeUnderLoad) { ...@@ -202,6 +223,10 @@ TEST(ThreadPoolExecutorTest, IOResizeUnderLoad) {
resizeUnderLoad<IOThreadPoolExecutor>(); resizeUnderLoad<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFResizeUnderLoad) {
resizeUnderLoad<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void poolStats() { static void poolStats() {
folly::Baton<> startBaton, endBaton; folly::Baton<> startBaton, endBaton;
...@@ -262,6 +287,10 @@ TEST(ThreadPoolExecutorTest, IOTaskStats) { ...@@ -262,6 +287,10 @@ TEST(ThreadPoolExecutorTest, IOTaskStats) {
taskStats<IOThreadPoolExecutor>(); taskStats<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFTaskStats) {
taskStats<EDFThreadPoolExecutor>();
}
template <class TPE> template <class TPE>
static void expiration() { static void expiration() {
TPE tpe(1); TPE tpe(1);
...@@ -347,6 +376,10 @@ TEST(ThreadPoolExecutorTest, IOFuturePool) { ...@@ -347,6 +376,10 @@ TEST(ThreadPoolExecutorTest, IOFuturePool) {
futureExecutor<IOThreadPoolExecutor>(); futureExecutor<IOThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, EDFFuturePool) {
futureExecutor<EDFThreadPoolExecutor>();
}
TEST(ThreadPoolExecutorTest, PriorityPreemptionTest) { TEST(ThreadPoolExecutorTest, PriorityPreemptionTest) {
bool tookLopri = false; bool tookLopri = false;
auto completed = 0; auto completed = 0;
...@@ -394,11 +427,12 @@ class TestObserver : public ThreadPoolExecutor::Observer { ...@@ -394,11 +427,12 @@ class TestObserver : public ThreadPoolExecutor::Observer {
std::atomic<int> threads_{0}; std::atomic<int> threads_{0};
}; };
TEST(ThreadPoolExecutorTest, IOObserver) { template <typename TPE>
static void testObserver() {
auto observer = std::make_shared<TestObserver>(); auto observer = std::make_shared<TestObserver>();
{ {
IOThreadPoolExecutor exe(10); TPE exe(10);
exe.addObserver(observer); exe.addObserver(observer);
exe.setNumThreads(3); exe.setNumThreads(3);
exe.setNumThreads(0); exe.setNumThreads(0);
...@@ -410,20 +444,16 @@ TEST(ThreadPoolExecutorTest, IOObserver) { ...@@ -410,20 +444,16 @@ TEST(ThreadPoolExecutorTest, IOObserver) {
observer->checkCalls(); observer->checkCalls();
} }
TEST(ThreadPoolExecutorTest, CPUObserver) { TEST(ThreadPoolExecutorTest, IOObserver) {
auto observer = std::make_shared<TestObserver>(); testObserver<IOThreadPoolExecutor>();
}
{ TEST(ThreadPoolExecutorTest, CPUObserver) {
CPUThreadPoolExecutor exe(10); testObserver<CPUThreadPoolExecutor>();
exe.addObserver(observer); }
exe.setNumThreads(3);
exe.setNumThreads(0);
exe.setNumThreads(7);
exe.removeObserver(observer);
exe.setNumThreads(10);
}
observer->checkCalls(); TEST(ThreadPoolExecutorTest, EDFObserver) {
testObserver<EDFThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, AddWithPriority) { TEST(ThreadPoolExecutorTest, AddWithPriority) {
...@@ -434,6 +464,10 @@ TEST(ThreadPoolExecutorTest, AddWithPriority) { ...@@ -434,6 +464,10 @@ TEST(ThreadPoolExecutorTest, AddWithPriority) {
IOThreadPoolExecutor ioExe(10); IOThreadPoolExecutor ioExe(10);
EXPECT_THROW(ioExe.addWithPriority(f, 0), std::runtime_error); EXPECT_THROW(ioExe.addWithPriority(f, 0), std::runtime_error);
// EDF exe doesn't support priorities
EDFThreadPoolExecutor edfExe(10);
EXPECT_THROW(edfExe.addWithPriority(f, 0), std::runtime_error);
CPUThreadPoolExecutor cpuExe(10, 3); CPUThreadPoolExecutor cpuExe(10, 3);
cpuExe.addWithPriority(f, -1); cpuExe.addWithPriority(f, -1);
cpuExe.addWithPriority(f, 0); cpuExe.addWithPriority(f, 0);
...@@ -715,6 +749,10 @@ TEST(ThreadPoolExecutorTest, RemoveThreadTestCPU) { ...@@ -715,6 +749,10 @@ TEST(ThreadPoolExecutorTest, RemoveThreadTestCPU) {
removeThreadTest<CPUThreadPoolExecutor>(); removeThreadTest<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, RemoveThreadTestEDF) {
removeThreadTest<EDFThreadPoolExecutor>();
}
template <typename TPE> template <typename TPE>
static void resizeThreadWhileExecutingTest() { static void resizeThreadWhileExecutingTest() {
TPE tpe(10); TPE tpe(10);
...@@ -746,6 +784,10 @@ TEST(ThreadPoolExecutorTest, resizeThreadWhileExecutingTestCPU) { ...@@ -746,6 +784,10 @@ TEST(ThreadPoolExecutorTest, resizeThreadWhileExecutingTestCPU) {
resizeThreadWhileExecutingTest<CPUThreadPoolExecutor>(); resizeThreadWhileExecutingTest<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, resizeThreadWhileExecutingTestEDF) {
resizeThreadWhileExecutingTest<EDFThreadPoolExecutor>();
}
template <typename TPE> template <typename TPE>
void keepAliveTest() { void keepAliveTest() {
auto executor = std::make_unique<TPE>(4); auto executor = std::make_unique<TPE>(4);
...@@ -770,6 +812,10 @@ TEST(ThreadPoolExecutorTest, KeepAliveTestCPU) { ...@@ -770,6 +812,10 @@ TEST(ThreadPoolExecutorTest, KeepAliveTestCPU) {
keepAliveTest<CPUThreadPoolExecutor>(); keepAliveTest<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, KeepAliveTestEDF) {
keepAliveTest<EDFThreadPoolExecutor>();
}
int getNumThreadPoolExecutors() { int getNumThreadPoolExecutors() {
int count = 0; int count = 0;
ThreadPoolExecutor::withAll([&count](ThreadPoolExecutor&) { count++; }); ThreadPoolExecutor::withAll([&count](ThreadPoolExecutor&) { count++; });
...@@ -799,6 +845,10 @@ TEST(ThreadPoolExecutorTest, registersToExecutorListTestCPU) { ...@@ -799,6 +845,10 @@ TEST(ThreadPoolExecutorTest, registersToExecutorListTestCPU) {
registersToExecutorListTest<CPUThreadPoolExecutor>(); registersToExecutorListTest<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, registersToExecutorListTestEDF) {
registersToExecutorListTest<EDFThreadPoolExecutor>();
}
template <typename TPE> template <typename TPE>
static void testUsesNameFromNamedThreadFactory() { static void testUsesNameFromNamedThreadFactory() {
auto ntf = std::make_shared<NamedThreadFactory>("my_executor"); auto ntf = std::make_shared<NamedThreadFactory>("my_executor");
...@@ -814,6 +864,10 @@ TEST(ThreadPoolExecutorTest, testUsesNameFromNamedThreadFactoryCPU) { ...@@ -814,6 +864,10 @@ TEST(ThreadPoolExecutorTest, testUsesNameFromNamedThreadFactoryCPU) {
testUsesNameFromNamedThreadFactory<CPUThreadPoolExecutor>(); testUsesNameFromNamedThreadFactory<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, testUsesNameFromNamedThreadFactoryEDF) {
testUsesNameFromNamedThreadFactory<EDFThreadPoolExecutor>();
}
TEST(ThreadPoolExecutorTest, DynamicThreadsTest) { TEST(ThreadPoolExecutorTest, DynamicThreadsTest) {
boost::barrier barrier{3}; boost::barrier barrier{3};
auto twice_waiting_task = [&] { barrier.wait(), barrier.wait(); }; auto twice_waiting_task = [&] { barrier.wait(), barrier.wait(); };
...@@ -933,6 +987,10 @@ TEST(ThreadPoolExecutorTest, WeakRefTestCPU) { ...@@ -933,6 +987,10 @@ TEST(ThreadPoolExecutorTest, WeakRefTestCPU) {
WeakRefTest<CPUThreadPoolExecutor>(); WeakRefTest<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, WeakRefTestEDF) {
WeakRefTest<EDFThreadPoolExecutor>();
}
TEST(ThreadPoolExecutorTest, VirtualExecutorTestIO) { TEST(ThreadPoolExecutorTest, VirtualExecutorTestIO) {
virtualExecutorTest<IOThreadPoolExecutor>(); virtualExecutorTest<IOThreadPoolExecutor>();
} }
...@@ -940,3 +998,7 @@ TEST(ThreadPoolExecutorTest, VirtualExecutorTestIO) { ...@@ -940,3 +998,7 @@ TEST(ThreadPoolExecutorTest, VirtualExecutorTestIO) {
TEST(ThreadPoolExecutorTest, VirtualExecutorTestCPU) { TEST(ThreadPoolExecutorTest, VirtualExecutorTestCPU) {
virtualExecutorTest<CPUThreadPoolExecutor>(); virtualExecutorTest<CPUThreadPoolExecutor>();
} }
TEST(ThreadPoolExecutorTest, VirtualExecutorTestEDF) {
virtualExecutorTest<EDFThreadPoolExecutor>();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment