Commit 8045a2a0 authored by Kirk Shoop's avatar Kirk Shoop Committed by Facebook GitHub Bot

Add Support for nested MasterPtr and Cleanup tasks

Summary:
Allow a T that derives from EnableMasterFromThis<T> to use masterLockFromThis() to get a non-owning shared_ptr to this and to use masterRefFromThis() to get a MasterPtrRef<> from this.

Adds MasterPtr::cleanup() that returns SemiFuture<Unit>. join() just does a blocking wait on the SemiFuture returned from cleanup().

Allows a T to provide a T::cleanup() method that will be composed into the MasterPtr::cleanup() work.

MasterPtr now uses SemiFuture<Unit> instead of Baton. This allows users of MasterPtr::cleanup() to compose cleanup work with other tasks.

Andrii suggested that the cleanup feature be extracted out of MasterPtr

Adds cleanup traits (that MasterPtr satisfies) and a Cleanup type that satisfies the cleanup traits and allows objects that are not heap-allocated to participate in structured concurrency by deriving from Cleanup.

Reviewed By: andriigrynenko

Differential Revision: D19584561

fbshipit-source-id: aa2d608effe613ec84b08f902a1c61561f3458bb
parent 4bce7f6d
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <mutex>
#include <folly/futures/Future.h>
#include <glog/logging.h>
namespace folly {
// Structured Async Cleanup
//
// Structured Async Cleanup - traits
//
namespace detail {
struct cleanup_fn {
template <
class T,
class R = decltype(std::declval<T>().cleanup()),
std::enable_if_t<std::is_same_v<R, folly::SemiFuture<folly::Unit>>, int> =
0>
R operator()(T&& t) const {
return ((T &&) t).cleanup();
}
};
} // namespace detail
template <class T>
constexpr bool is_cleanup_v = folly::is_invocable_v<detail::cleanup_fn, T>;
template <typename T>
using is_cleanup = std::bool_constant<is_cleanup_v<T>>;
// Structured Async Cleanup
//
// This implementation is a base class that collects a set of cleanup tasks
// and runs them in reverse order.
//
// A class derived from Cleanup
// - only allows cleanup to be run once
// - is required to complete cleanup before running the destructor
// - *should not* run cleanup tasks. Running the cleanup task should be
// delegated to the owner of the derived class
// - *should not* be owned by a shared_ptr. Cleanup is intended to remove
// shared ownership.
//
class Cleanup {
public:
Cleanup() : safe_to_destruct_(false), cleanup_(folly::makeSemiFuture()) {}
~Cleanup() {
if (!safe_to_destruct_) {
LOG(FATAL) << "Cleanup must complete before it is destructed.";
}
}
// Returns: a SemiFuture that, just like destructors, sequences the cleanup
// tasks added in reverse of the order they were added.
//
// calls to cleanup() do not mutate state. The returned SemiFuture, once
// it has been given an executor, does mutate state and must not overlap with
// any calls to addCleanup().
//
folly::SemiFuture<folly::Unit> cleanup() {
return folly::makeSemiFuture()
.deferValue([this](folly::Unit) {
if (!cleanup_.valid()) {
LOG(FATAL) << "cleanup already run - cleanup task invalid.";
}
return std::move(cleanup_);
})
.defer([this](folly::Try<folly::Unit> t) {
if (t.hasException()) {
LOG(FATAL) << "Cleanup actions must be noexcept.";
}
this->safe_to_destruct_ = true;
});
}
protected:
// includes the provided SemiFuture under the scope of this.
//
// when the cleanup() for this started it will get this SemiFuture first.
//
// order matters, just like destructors, cleanup tasks will be run in reverse
// of the order they were added.
//
// all gets will use the Executor provided to the SemiFuture returned by
// cleanup()
//
// calls to addCleanup() must not overlap with each other and must not overlap
// with a running SemiFuture returned from addCleanup().
//
void addCleanup(folly::SemiFuture<folly::Unit> c) {
if (!cleanup_.valid()) {
LOG(FATAL)
<< "Cleanup::addCleanup must not be called after Cleanup::cleanup.";
}
cleanup_ = std::move(c).deferValue(
[nested = std::move(cleanup_)](folly::Unit) mutable {
return std::move(nested);
});
}
// includes the provided model of Cleanup under the scope of this
//
// when the cleanup() for this started it will cleanup this first.
//
// order matters, just like destructors, cleanup tasks will be run in reverse
// of the order they were added.
//
// all gets will use the Executor provided to the SemiFuture returned by
// cleanup()
//
// calls to addCleanup() must not overlap with each other and must not overlap
// with a running SemiFuture returned from addCleanup().
//
template <
class OtherCleanup,
std::enable_if_t<is_cleanup_v<OtherCleanup>, int> = 0>
void addCleanup(OtherCleanup&& c) {
addCleanup(((OtherCleanup &&) c).cleanup());
}
private:
bool safe_to_destruct_;
folly::SemiFuture<folly::Unit> cleanup_;
};
} // namespace folly
......@@ -20,81 +20,106 @@
#include <mutex>
#include <folly/Function.h>
#include <folly/synchronization/Baton.h>
#include <folly/experimental/Cleanup.h>
#include <folly/futures/Future.h>
#include <glog/logging.h>
namespace folly {
template <typename T>
class EnableMasterFromThis;
template <typename T>
class MasterPtr;
template <typename T>
class MasterPtrRef;
namespace detail {
struct publicallyDerivedFromEnableMasterFromThis_fn {
template <class T>
void operator()(const EnableMasterFromThis<T>&) const {}
};
} // namespace detail
template <class T>
constexpr bool is_enable_master_from_this_v = folly::
is_invocable_v<detail::publicallyDerivedFromEnableMasterFromThis_fn, T>;
template <typename T>
using is_enable_master_from_this =
std::bool_constant<is_enable_master_from_this_v<T>>;
/**
* EnableMasterFromThis provides an object with appropriate access to the
* functionality of the MasterPtr holding this.
*/
template <typename T>
class EnableMasterFromThis {
template <class O>
static void set(
EnableMasterFromThis<O>* that,
const std::shared_ptr<std::shared_ptr<O>>& outerPtrShared) {
that->outerPtrWeak_ = outerPtrShared;
that->lockedPtrWeak_ = *outerPtrShared;
}
template <class O>
static auto set(O*, const std::shared_ptr<std::shared_ptr<T>>&) ->
typename std::enable_if<
!std::is_base_of<EnableMasterFromThis<T>, O>::value>::type {}
// initializes members when the MasterPtr for this is constructed
//
// used by the MasterPtr for this, to invoke the EnableMasterFromThis base of
// T, if it exists.
template <
class O,
class Master,
std::enable_if_t<is_enable_master_from_this_v<O>, int> = 0>
static void set(EnableMasterFromThis<O>* that, Master& m) {
that->outerPtrWeak_ = m.outerPtrWeak_;
}
template <
class O,
class Master,
std::enable_if_t<!is_enable_master_from_this_v<O>, int> = 0>
static void set(O*, Master&) {}
public:
// Gets a non-owning reference to the pointer. MasterPtr::join() does *NOT*
// wait for outstanding MasterPtrRef objects to be released.
// Gets a non-owning reference to the pointer. MasterPtr::join() and the
// MasterPtr::cleanup() work do *NOT* wait for outstanding MasterPtrRef
// objects to be released.
MasterPtrRef<T> masterRefFromThis() {
return MasterPtrRef<T>(outerPtrWeak_);
}
// Gets a non-owning reference to the pointer. MasterPtr::join() does *NOT*
// wait for outstanding MasterPtrRef objects to be released.
// Gets a non-owning const reference to the pointer. MasterPtr::join() and the
// MasterPtr::cleanup() work do *NOT* wait for outstanding MasterPtrRef
// objects to be released.
MasterPtrRef<const T> masterRefFromThis() const {
return MasterPtrRef<const T>(outerPtrWeak_);
}
// Attempts to lock a pointer to this. Returns null if pointer is not set or
// if join() was called (even if the call to join() hasn't returned yet).
// Attempts to lock a pointer. Returns null if pointer is not set or if
// MasterPtr::join() was called or the MasterPtr::cleanup() task was started
// (even if the call to MasterPtr::join() hasn't returned yet and the
// MasterPtr::cleanup() task has not completed yet).
std::shared_ptr<T> masterLockFromThis() {
if (auto outerPtr = outerPtrWeak_.lock()) {
return *outerPtr;
}
return nullptr;
}
// Attempts to lock a pointer to this. Returns null if pointer is not set or
// if join() was called (even if the call to join() hasn't returned yet).
// Attempts to lock a pointer. Returns null if pointer is not set or if
// MasterPtr::join() was called or the MasterPtr::cleanup() task was started
// (even if the call to MasterPtr::join() hasn't returned yet and the
// MasterPtr::cleanup() task has not completed yet).
std::shared_ptr<T const> masterLockFromThis() const {
if (!*this) {
return nullptr;
}
if (auto outerPtr = outerPtrWeak_.lock()) {
return *outerPtr;
}
return nullptr;
}
// returns the cached weak_ptr<T>
std::weak_ptr<T> masterWeakFromThis() noexcept {
return lockedPtrWeak_;
}
// returns the cached weak_ptr<T>
std::weak_ptr<T const> masterWeakFromThis() const noexcept {
return lockedPtrWeak_;
}
private:
template <class>
friend class MasterPtr;
std::weak_ptr<std::shared_ptr<T>> outerPtrWeak_;
std::weak_ptr<T> lockedPtrWeak_;
};
/**
......@@ -102,12 +127,26 @@ class EnableMasterFromThis {
* shared ownership.
* Once an object is managed by a MasterPtr, shared_ptrs can be obtained
* pointing to that object. However destroying those shared_ptrs will never call
* the object destructor inline. To destroy the object, join() method should be
* called on MasterPtr which will wait for all shared_ptrs to be released and
* then call the object destructor inline.
* the object destructor inline. To destroy the object, join() method must be
* called on MasterPtr or the task returned from cleanup() must be completed,
* which will wait for all shared_ptrs to be released and then call the object
* destructor on the caller supplied execution context.
*/
template <typename T>
class MasterPtr {
// retrieves nested cleanup() work from innerPtr_. Called when the MasterPtr
// cleanup() task has finished waiting for outstanding references
//
template <class Cleanup, std::enable_if_t<is_cleanup_v<Cleanup>, int> = 0>
static folly::SemiFuture<folly::Unit> getCleanup(Cleanup* cleanup) {
return std::move(*cleanup).cleanup();
}
template <class O, std::enable_if_t<!is_cleanup_v<O>, int> = 0>
static folly::SemiFuture<folly::Unit> getCleanup(O*) {
return folly::makeSemiFuture();
}
public:
MasterPtr() = delete;
template <class T2, class Deleter>
......@@ -125,7 +164,8 @@ class MasterPtr {
}
// Attempts to lock a pointer. Returns null if pointer is not set or if join()
// was called (even if the call to join() hasn't returned yet).
// was called or the cleanup() task was started (even if the call to join()
// hasn't returned yet and the cleanup() task has not completed yet).
std::shared_ptr<T> lock() const {
if (!*this) {
return nullptr;
......@@ -143,14 +183,43 @@ class MasterPtr {
if (!*this) {
return;
}
this->cleanup().get();
}
outerPtrShared_.reset();
joinBaton_.wait();
innerPtr_.reset();
// Returns: a SemiFuture that waits until all the refereces obtained via
// lock() are released. Then destroys the object on the Executor provided to
// the SemiFuture.
//
// The returned SemiFuture must run to completion before calling set()
//
folly::SemiFuture<folly::Unit> cleanup() {
return folly::makeSemiFuture()
// clear outerPtrShared_ after cleanup is started
// to disable further calls to lock().
// then wait for outstanding references.
.deferValue([this](folly::Unit) {
if (!this->outerPtrShared_) {
LOG(FATAL)
<< "Cleanup already run - lock() was previouly disabled.";
}
this->outerPtrShared_.reset();
return std::move(this->unreferenced_);
})
// start cleanup tasks
.deferValue([this](folly::Unit) {
auto cleanup = getCleanup(innerPtr_.get());
return std::move(cleanup);
})
.defer([this](folly::Try<folly::Unit> r) {
if (r.hasException()) {
LOG(FATAL) << "Cleanup actions must be noexcept.";
}
this->innerPtr_.reset();
});
}
// Sets the pointer. Can not be called concurrently with lock() or join() or
// ref().
// ref() or while the SemiFuture returned from cleanup() is running.
template <class T2, class Deleter>
void set(std::unique_ptr<T2, Deleter> ptr) {
if (*this) {
......@@ -165,24 +234,35 @@ class MasterPtr {
innerPtr_ = std::unique_ptr<T, folly::Function<void(T*)>>{
ptr.release(),
[d = ptr.get_deleter(), rawPtr](T*) mutable { d(rawPtr); }};
joinBaton_.reset();
auto innerPtrShared =
std::shared_ptr<T>(rawPtr, [&](T*) { joinBaton_.post(); });
outerPtrShared_ =
std::make_shared<std::shared_ptr<T>>(std::move(innerPtrShared));
outerPtrWeak_ = outerPtrShared_;
EnableMasterFromThis<T>::set(rawPtr, outerPtrShared_);
auto referencesContract = folly::makePromiseContract<folly::Unit>();
unreferenced_ = std::move(std::get<1>(referencesContract));
auto innerPtrShared = std::shared_ptr<T>(
innerPtr_.get(),
[lastReference = std::move(std::get<0>(referencesContract))](
T*) mutable { lastReference.setValue(folly::Unit{}); });
outerPtrWeak_ = outerPtrShared_ =
std::make_shared<std::shared_ptr<T>>(innerPtrShared);
// attaches optional EnableMasterFromThis base of innerPtr_ to this
// MasterPtr
EnableMasterFromThis<T>::set(innerPtr_.get(), *this);
}
// Gets a non-owning reference to the pointer. join() does *NOT* wait for
// outstanding MasterPtrRef objects to be released.
// Gets a non-owning reference to the pointer. join() and the cleanup() work
// do *NOT* wait for outstanding MasterPtrRef objects to be released.
MasterPtrRef<T> ref() const {
return MasterPtrRef<T>(outerPtrWeak_);
}
private:
template <class>
friend class EnableMasterFromThis;
friend class MasterPtrRef<T>;
folly::Baton<> joinBaton_;
folly::SemiFuture<folly::Unit> unreferenced_;
std::shared_ptr<std::shared_ptr<T>> outerPtrShared_;
std::weak_ptr<std::shared_ptr<T>> outerPtrWeak_;
std::unique_ptr<T, folly::Function<void(T*)>> innerPtr_;
......@@ -190,13 +270,15 @@ class MasterPtr {
/**
* MasterPtrRef is a non-owning reference to the pointer. MasterPtr::join()
* does *NOT* wait for outstanding MasterPtrRef objects to be released.
* and the MasterPtr::cleanup() work do *NOT* wait for outstanding MasterPtrRef
* objects to be released.
*/
template <typename T>
class MasterPtrRef {
public:
// Attempts to lock a pointer. Returns null if pointer is not set or if
// join() was called (even if the call to join() hasn't returned yet).
// join() was called or cleanup() work was started (even if the call to join()
// hasn't returned yet or the cleanup() work has not completed yet).
std::shared_ptr<T> lock() const {
if (auto outerPtr = outerPtrWeak_.lock()) {
return *outerPtr;
......
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/experimental/Cleanup.h>
#include <folly/executors/CPUThreadPoolExecutor.h>
#include <folly/executors/ManualExecutor.h>
#include <folly/portability/GTest.h>
using namespace std::literals::chrono_literals;
class Cleaned : public folly::Cleanup {
folly::CPUThreadPoolExecutor pool_;
public:
Cleaned() : pool_(4) {
addCleanup(
folly::makeSemiFuture().defer([this](auto&&) { this->pool_.join(); }));
}
using folly::Cleanup::addCleanup;
};
TEST(CleanupTest, Basic) {
EXPECT_TRUE(folly::is_cleanup_v<Cleaned>);
Cleaned cleaned;
int phase = 0;
int index = 0;
cleaned.addCleanup(
folly::makeSemiFuture().deferValue([&, expected = index++](folly::Unit) {
EXPECT_EQ(phase, 1);
EXPECT_EQ(--index, expected);
}));
cleaned.addCleanup(
folly::makeSemiFuture().deferValue([&, expected = index++](folly::Unit) {
EXPECT_EQ(phase, 1);
EXPECT_EQ(--index, expected);
}));
EXPECT_EQ(index, 2);
folly::ManualExecutor exec;
phase = 1;
cleaned.cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
phase = 2;
EXPECT_EQ(index, 0);
}
TEST(CleanupTest, Errors) {
auto cleaned = std::make_unique<Cleaned>();
cleaned->addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); }));
cleaned->addCleanup(
folly::makeSemiFuture<folly::Unit>(std::runtime_error("failed cleanup")));
folly::ManualExecutor exec;
EXPECT_EXIT(
cleaned->cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec),
testing::KilledBySignal(SIGABRT),
".*noexcept.*");
EXPECT_EXIT(
cleaned.reset(), testing::KilledBySignal(SIGABRT), ".*destructed.*");
// must leak the Cleaned as its destructor will abort.
(void)cleaned.release();
}
TEST(CleanupTest, Invariants) {
Cleaned cleaned;
auto ranCleanup = false;
cleaned.addCleanup(folly::makeSemiFuture().deferValue(
[&](folly::Unit) { ranCleanup = true; }));
EXPECT_FALSE(ranCleanup);
{
folly::ManualExecutor exec;
cleaned.cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
}
EXPECT_TRUE(ranCleanup);
EXPECT_EXIT(
cleaned.addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); })),
testing::KilledBySignal(SIGABRT),
".*addCleanup.*");
{
folly::ManualExecutor exec;
EXPECT_EXIT(
cleaned.cleanup().via(folly::getKeepAliveToken(exec)).getVia(&exec),
testing::KilledBySignal(SIGABRT),
".*already.*");
}
}
......@@ -16,16 +16,23 @@
#include <future>
#include <folly/executors/CPUThreadPoolExecutor.h>
#include <folly/executors/ManualExecutor.h>
#include <folly/experimental/MasterPtr.h>
#include <folly/portability/GTest.h>
#include <folly/synchronization/Baton.h>
using namespace std::literals::chrono_literals;
TEST(MasterPtrTest, Basic) {
EXPECT_TRUE(folly::is_cleanup_v<folly::MasterPtr<int>>);
auto ptr = std::make_unique<int>(42);
auto rawPtr = ptr.get();
folly::MasterPtr<int> masterPtr(std::move(ptr));
auto masterPtrRef = masterPtr.ref();
EXPECT_TRUE(!!masterPtr);
auto lockedPtr1 = masterPtr.lock();
auto lockedPtr2 = masterPtrRef.lock();
......@@ -35,7 +42,12 @@ TEST(MasterPtrTest, Basic) {
EXPECT_EQ(lockedPtr1.use_count(), 3);
EXPECT_EQ(lockedPtr2.use_count(), 3);
auto joinFuture = std::async(std::launch::async, [&] { masterPtr.join(); });
EXPECT_TRUE(!!masterPtr);
auto joinFuture = std::async(std::launch::async, [&] {
masterPtr.join();
EXPECT_TRUE(!masterPtr);
});
auto lockFailFuture = std::async(std::launch::async, [&] {
while (masterPtr.lock()) {
......@@ -63,14 +75,145 @@ TEST(MasterPtrTest, Basic) {
EXPECT_EQ(
joinFuture.wait_for(std::chrono::milliseconds{100}),
std::future_status::ready);
EXPECT_TRUE(!masterPtr);
ptr = std::make_unique<int>(42);
rawPtr = ptr.get();
masterPtr.set(std::move(ptr));
EXPECT_TRUE(!!masterPtr);
lockedPtr1 = masterPtr.lock();
EXPECT_EQ(lockedPtr1.get(), rawPtr);
lockedPtr1.reset();
masterPtr.join();
EXPECT_EQ(masterPtr.lock().get(), nullptr);
EXPECT_TRUE(!masterPtr);
}
struct Mastered : folly::EnableMasterFromThis<Mastered> {
struct Mastered : folly::Cleanup, folly::EnableMasterFromThis<Mastered> {
folly::MasterPtr<int> nested_;
folly::CPUThreadPoolExecutor pool_;
Mastered() : nested_(std::make_unique<int>(42)), pool_(4) {
addCleanup(nested_);
addCleanup(
folly::makeSemiFuture().defer([this](auto&&) { this->pool_.join(); }));
}
using folly::Cleanup::addCleanup;
std::shared_ptr<Mastered> get_shared() {
return masterLockFromThis();
}
};
TEST(MasterPtrTest, BasicCleanup) {
auto ptr = std::make_unique<Mastered>();
folly::MasterPtr<Mastered> masterPtr(std::move(ptr));
int phase = 0;
int index = 0;
masterPtr.lock()->addCleanup(
folly::makeSemiFuture().deferValue([&, expected = index++](folly::Unit) {
EXPECT_EQ(phase, 1);
EXPECT_EQ(--index, expected);
}));
masterPtr.lock()->addCleanup(
folly::makeSemiFuture().deferValue([&, expected = index++](folly::Unit) {
EXPECT_EQ(phase, 1);
EXPECT_EQ(--index, expected);
}));
EXPECT_EQ(index, 2);
folly::ManualExecutor exec;
phase = 1;
masterPtr.cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
phase = 2;
EXPECT_EQ(index, 0);
}
#if defined(__has_feature)
#if !__has_feature(address_sanitizer)
TEST(MasterPtrTest, Errors) {
auto ptr = std::make_unique<Mastered>();
auto masterPtr = std::make_unique<folly::MasterPtr<Mastered>>(std::move(ptr));
masterPtr->lock()->addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); }));
masterPtr->lock()->addCleanup(
folly::makeSemiFuture<folly::Unit>(std::runtime_error("failed cleanup")));
EXPECT_EXIT(
masterPtr->set(std::unique_ptr<Mastered>{}),
testing::KilledBySignal(SIGABRT),
".*joined before.*");
folly::ManualExecutor exec;
EXPECT_EXIT(
masterPtr->cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec),
testing::KilledBySignal(SIGABRT),
".*noexcept.*");
EXPECT_EXIT(
masterPtr.reset(), testing::KilledBySignal(SIGABRT), ".*MasterPtr.*");
// must leak the MasterPtr as its destructor will abort.
(void)masterPtr.release();
}
#endif
#endif
TEST(MasterPtrTest, Invariants) {
struct BadDerived : Mastered {
~BadDerived() {
EXPECT_EXIT(
addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); })),
testing::KilledBySignal(SIGABRT),
".*addCleanup.*");
EXPECT_EXIT(
addCleanup(folly::makeSemiFuture().deferValue(
[](folly::Unit) { EXPECT_TRUE(false); })),
testing::KilledBySignal(SIGABRT),
".*addCleanup.*");
}
};
auto ptr = std::make_unique<BadDerived>();
folly::MasterPtr<Mastered> masterPtr(std::move(ptr));
auto ranCleanup = false;
masterPtr.lock()->addCleanup(folly::makeSemiFuture().deferValue(
[&](folly::Unit) { ranCleanup = true; }));
EXPECT_FALSE(ranCleanup);
{
folly::ManualExecutor exec;
masterPtr.cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
}
EXPECT_TRUE(ranCleanup);
{
folly::ManualExecutor exec;
EXPECT_EXIT(
masterPtr.cleanup().via(folly::getKeepAliveToken(exec)).getVia(&exec),
testing::KilledBySignal(SIGABRT),
".*already.*");
}
}
struct Derived : Mastered {};
TEST(MasterPtrTest, EnableMasterFromThis) {
......@@ -92,17 +235,18 @@ TEST(MasterPtrTest, EnableMasterFromThis) {
EXPECT_EQ(lockedPtr3.use_count(), 4);
EXPECT_EQ(lockedPtr3.get(), rawPtr);
auto joinFuture = std::async(std::launch::async, [&] { masterPtr.join(); });
auto lockFailFuture = std::async(std::launch::async, [&] {
while (masterPtr.lock()) {
std::this_thread::yield();
}
auto cleanupFuture = std::async(std::launch::async, [&] {
folly::ManualExecutor exec;
masterPtr.cleanup()
.within(1s)
.via(folly::getKeepAliveToken(exec))
.getVia(&exec);
EXPECT_TRUE(!masterPtr);
});
EXPECT_EQ(
lockFailFuture.wait_for(std::chrono::milliseconds{100}),
std::future_status::ready);
cleanupFuture.wait_for(std::chrono::milliseconds{100}),
std::future_status::timeout);
EXPECT_EQ(lockedPtr1.use_count(), 3);
EXPECT_EQ(lockedPtr2.use_count(), 3);
......@@ -112,7 +256,7 @@ TEST(MasterPtrTest, EnableMasterFromThis) {
EXPECT_EQ(masterPtrRef.lock().get(), nullptr);
EXPECT_EQ(
joinFuture.wait_for(std::chrono::milliseconds{100}),
cleanupFuture.wait_for(std::chrono::milliseconds{100}),
std::future_status::timeout);
lockedPtr1.reset();
......@@ -120,6 +264,8 @@ TEST(MasterPtrTest, EnableMasterFromThis) {
lockedPtr3.reset();
EXPECT_EQ(
joinFuture.wait_for(std::chrono::milliseconds{100}),
cleanupFuture.wait_for(std::chrono::milliseconds{100}),
std::future_status::ready);
EXPECT_TRUE(!masterPtr);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment