Commit cd3552c1 authored by Sam Akhavan's avatar Sam Akhavan Committed by Facebook GitHub Bot

Back out "Fix a shutdown race in CPUThreadPoolExecutor::add" (#1561)

Summary:
Pull Request resolved: https://github.com/facebook/folly/pull/1561

As title.

Reviewed By: jdq

Differential Revision: D27736665

fbshipit-source-id: 2f09ea4dad25389a46faccca4a5803ea9e077e0d
parent d31902b9
......@@ -14,10 +14,8 @@
* limitations under the License.
*/
#include <folly/Executor.h>
#include <folly/executors/CPUThreadPoolExecutor.h>
#include <atomic>
#include <folly/Memory.h>
#include <folly/concurrency/QueueObserver.h>
#include <folly/executors/task_queue/PriorityLifoSemMPMCQueue.h>
......@@ -52,7 +50,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor(
std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor(
numThreads,
FLAGS_dynamic_cputhreadpoolexecutor ? 1 : numThreads,
FLAGS_dynamic_cputhreadpoolexecutor ? 0 : numThreads,
std::move(threadFactory)),
taskQueue_(taskQueue.release()) {
setNumThreads(numThreads);
......@@ -74,7 +72,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor(
size_t numThreads, std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor(
numThreads,
FLAGS_dynamic_cputhreadpoolexecutor ? 1 : numThreads,
FLAGS_dynamic_cputhreadpoolexecutor ? 0 : numThreads,
std::move(threadFactory)),
taskQueue_(std::allocate_shared<default_queue>(default_queue_alloc{})) {
setNumThreads(numThreads);
......@@ -85,9 +83,7 @@ CPUThreadPoolExecutor::CPUThreadPoolExecutor(
std::pair<size_t, size_t> numThreads,
std::shared_ptr<ThreadFactory> threadFactory)
: ThreadPoolExecutor(
std::max(numThreads.first, size_t{1}),
numThreads.second,
std::move(threadFactory)),
numThreads.first, numThreads.second, std::move(threadFactory)),
taskQueue_(std::allocate_shared<default_queue>(default_queue_alloc{})) {
setNumThreads(numThreads.first);
registerThreadPoolExecutor(this);
......@@ -163,7 +159,15 @@ void CPUThreadPoolExecutor::add(Func func) {
void CPUThreadPoolExecutor::add(
Func func, std::chrono::milliseconds expiration, Func expireCallback) {
addImpl<false>(std::move(func), 0, expiration, std::move(expireCallback));
CPUTask task{std::move(func), expiration, std::move(expireCallback), 0};
if (auto queueObserver = getQueueObserver(0)) {
task.queueObserverPayload() =
queueObserver->onEnqueued(task.context_.get());
}
auto result = taskQueue_->add(std::move(task));
if (!result.reusedThread) {
ensureActiveThreads();
}
}
void CPUThreadPoolExecutor::addWithPriority(Func func, int8_t priority) {
......@@ -175,43 +179,15 @@ void CPUThreadPoolExecutor::add(
int8_t priority,
std::chrono::milliseconds expiration,
Func expireCallback) {
addImpl<true>(
std::move(func), priority, expiration, std::move(expireCallback));
}
template <bool withPriority>
void CPUThreadPoolExecutor::addImpl(
Func func,
int8_t priority,
std::chrono::milliseconds expiration,
Func expireCallback) {
if (withPriority) {
CHECK(getNumPriorities() > 0);
}
CHECK(getNumPriorities() > 0);
CPUTask task(
std::move(func), expiration, std::move(expireCallback), priority);
if (auto queueObserver = getQueueObserver(priority)) {
task.queueObserverPayload() =
queueObserver->onEnqueued(task.context_.get());
}
// It's not safe to expect that the executor is alive after a task is added to
// the queue (this task could be holding the last KeepAlive and when finished
// - it may unblock the executor shutdown).
// If we need executor to be alive after adding into the queue, we have to
// acquire a KeepAlive.
bool mayNeedToAddThreads = minThreads_.load(std::memory_order_relaxed) == 0 ||
activeThreads_.load(std::memory_order_relaxed) <
maxThreads_.load(std::memory_order_relaxed);
folly::Executor::KeepAlive<> ka = mayNeedToAddThreads
? getKeepAliveToken(this)
: folly::Executor::KeepAlive<>{};
auto result = withPriority
? taskQueue_->addWithPriority(std::move(task), priority)
: taskQueue_->add(std::move(task));
if (mayNeedToAddThreads && !result.reusedThread) {
auto result = taskQueue_->addWithPriority(std::move(task), priority);
if (!result.reusedThread) {
ensureActiveThreads();
}
}
......
......@@ -162,13 +162,6 @@ class CPUThreadPoolExecutor : public ThreadPoolExecutor {
bool tryDecrToStop();
bool taskShouldStop(folly::Optional<CPUTask>&);
template <bool withPriority>
void addImpl(
Func func,
int8_t priority,
std::chrono::milliseconds expiration,
Func expireCallback);
std::unique_ptr<folly::QueueObserverFactory> createQueueObserverFactory();
QueueObserver* FOLLY_NULLABLE getQueueObserver(int8_t pri);
......
......@@ -92,38 +92,28 @@ TEST(CleanupTest, EnsureCleanupAfterTaskBasic) {
}
TEST(CleanupTest, Errors) {
auto runTest = [](auto releaseCleanedFunc) {
auto cleaned = std::make_unique<Cleaned>();
auto cleaned = std::make_unique<Cleaned>();
cleaned->addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); }));
cleaned->addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); }));
cleaned->addCleanup(folly::makeSemiFuture<folly::Unit>(
std::runtime_error("failed cleanup")));
releaseCleanedFunc(cleaned);
};
cleaned->addCleanup(
folly::makeSemiFuture<folly::Unit>(std::runtime_error("failed cleanup")));
folly::ManualExecutor exec;
EXPECT_EXIT(
runTest([](auto& cleaned) {
folly::ManualExecutor exec;
cleaned->cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
}),
cleaned->cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec),
testing::KilledBySignal(SIGABRT),
".*noexcept.*");
EXPECT_EXIT(
runTest([](auto& cleaned) { cleaned.reset(); }),
testing::KilledBySignal(SIGABRT),
".*destructed.*");
cleaned.reset(), testing::KilledBySignal(SIGABRT), ".*destructed.*");
runTest([](auto& cleaned) {
// must leak the Cleaned as its destructor will abort.
(void)cleaned.release();
});
// must leak the Cleaned as its destructor will abort.
(void)cleaned.release();
}
TEST(CleanupTest, Invariants) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment