Commit d42832d2 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

Rewrite from experimental, use of deterministic schedule, improvements

Summary:
For history of the experimental version see folly/experimental/hazptr

Added:
- Support for deterministic schedule testing.

Fixes:
- Eliminates performance pathological cases for hazptr_array and hazptr_local construction when a thread cache can only partially satisfy the needed number of hazptr_holder-s.
- try_protect sets ptr to the value read from src on success. Otherwise, ptr may be used by the client code when it is invalid.

Code improvements including:
- Using separate files for different components.
- Eliminated all macro definitions except one.
- Simplified thread local structures.

Reviewed By: davidtgoldblatt

Differential Revision: D7565357

fbshipit-source-id: fb00fad7395148e66d6231a374b5b717574ed270
parent 0e066dda
......@@ -467,6 +467,13 @@ nobase_follyinclude_HEADERS = \
synchronization/AtomicStruct.h \
synchronization/Baton.h \
synchronization/CallOnce.h \
synchronization/Hazptr.h \
synchronization/Hazptr-fwd.h \
synchronization/HazptrDomain.h \
synchronization/HazptrHolder.h \
synchronization/HazptrObj.h \
synchronization/HazptrRec.h \
synchronization/HazptrThrLocal.h \
synchronization/LifoSem.h \
synchronization/MicroSpinLock.h \
synchronization/ParkingLot.h \
......
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <memory>
///
/// Forward declatations and implicit documentation of all hazptr
/// top-level classes, functions, macros, default values, and globals.
///
/** FOLYY_HAZPTR_THR_LOCAL */
#if FOLLY_MOBILE
#define FOLLY_HAZPTR_THR_LOCAL false
#else
#define FOLLY_HAZPTR_THR_LOCAL true
#endif
namespace folly {
///
/// Hazard pointer record.
/// Defined in HazptrRec.h
///
/** hazptr_rec */
template <template <typename> class Atom = std::atomic>
class hazptr_rec;
///
/// Classes related to objects protected by hazard pointers.
/// Defined in HazptrObj.h
///
/** hazptr_obj */
template <template <typename> class Atom = std::atomic>
class hazptr_obj;
/** hazptr_obj_base */
template <
typename T,
template <typename> class Atom = std::atomic,
typename Deleter = std::default_delete<T>>
class hazptr_obj_base;
/** hazptr_obj_base_refcounted */
template <
typename T,
template <typename> class Atom = std::atomic,
typename Deleter = std::default_delete<T>>
class hazptr_obj_base_refcounted;
///
/// Classes and functions related to thread local structures.
/// Defined in HazptrThrLocal.h
///
/** hazptr_tc_entry */
template <template <typename> class Atom = std::atomic>
class hazptr_tc_entry;
/** hazptr_tc */
template <template <typename> class Atom = std::atomic>
class hazptr_tc;
/** hazptr_tc_tls */
template <template <typename> class Atom = std::atomic>
hazptr_tc<Atom>& hazptr_tc_tls();
/** hazptr_priv */
template <template <typename> class Atom = std::atomic>
class hazptr_priv;
/** hazptr_priv_tls */
template <template <typename> class Atom = std::atomic>
hazptr_priv<Atom>& hazptr_priv_tls();
///
/// Hazard pointer domain
/// Defined in HazptrDomain.h
///
/** hazptr_domain */
template <template <typename> class Atom = std::atomic>
class hazptr_domain;
/** default_hazptr_domain */
template <template <typename> class Atom = std::atomic>
hazptr_domain<Atom>& default_hazptr_domain();
/** hazptr_domain_push_retired */
template <template <typename> class Atom = std::atomic>
void hazptr_domain_push_retired(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int rcount,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** hazptr_retire */
template <
template <typename> class Atom = std::atomic,
typename T,
typename D = std::default_delete<T>>
void hazptr_retire(T* obj, D reclaim = {});
/** hazptr_cleanup */
template <template <typename> class Atom = std::atomic>
void hazptr_cleanup(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** Global default domain defined in Hazptr.cpp */
extern hazptr_domain<std::atomic> default_domain;
///
/// Classes related to hazard pointer holders.
/// Defined in HazptrHolder.h
///
/** hazptr_holder */
template <template <typename> class Atom = std::atomic>
class hazptr_holder;
/** Free function swap of hazptr_holder-s */
template <template <typename> class Atom = std::atomic>
void swap(hazptr_holder<Atom>&, hazptr_holder<Atom>&) noexcept;
/** hazptr_array */
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
class hazptr_array;
/** hazptr_local */
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
class hazptr_local;
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/synchronization/Hazptr.h>
#include <atomic>
namespace folly {
FOLLY_STATIC_CTOR_PRIORITY_MAX hazptr_domain<std::atomic> default_domain;
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/synchronization/HazptrDomain.h>
#include <folly/synchronization/HazptrHolder.h>
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/synchronization/HazptrThrLocal.h>
/// Hazard pointers is a safe reclamation method. It protects objects
/// from being reclaimed while being accessed by one or more threads, but
/// allows objects to be removed concurrently while being accessed.
///
/// What is a Hazard Pointer?
/// -------------------------
/// A hazard pointer is a single-writer multi-reader pointer that can
/// be owned by at most one thread at a time. To protect an object A
/// from being reclaimed while in use, a thread X sets one of its
/// owned hazard pointers, P, to the address of A. If P is set to &A
/// before A is removed (i.e., it becomes unreachable) then A will not be
/// reclaimed as long as P continues to hold the value &A.
///
/// Why use hazard pointers?
/// ------------------------
/// - Speed and scalability.
/// - Can be used while blocking.
///
/// When not to use hazard pointers?
/// --------------------------------
/// - When thread local data is not supported efficiently.
///
/// Basic Interface
/// ---------------
/// - In the hazptr library, raw hazard pointers are not exposed to
/// users. Instead, each instance of the class hazptr_holder owns
/// and manages at most one hazard pointer.
/// - Typically classes of objects protected by hazard pointers are
/// derived from a class template hazptr_obj_base that provides a
/// member function retire(). When an object A is removed,
/// A.retire() is called to pass responsibility for reclaiming A to
/// the hazptr library. A will be reclaimed only after it is not
/// protected by hazard pointers.
/// - The essential components of the hazptr API are:
/// o hazptr_holder: Class that owns and manages a hazard pointer.
/// o get_protected: Mmember function of hazptr_holder. Protects
/// an object pointed to by an atomic source (if not null).
/// T* get_protected(const atomic<T*>& src);
/// o hazptr_obj_base<T>: Base class for protected objects.
/// o retire: Member function of hazptr_obj_base that automatically
/// reclaims the object when safe.
/// void retire();
///
/// Default Domain and Default Deleters
/// -----------------------------------
/// - Most uses do not need to specify custom domains and custom
/// deleters, and by default use the default domain and default
/// deleters.
///
/// Simple usage example
/// --------------------
/// class Config : public hazptr_obj_base<Config> {
/// /* ... details ... */
/// U get_config(V v);
/// };
///
/// std::atomic<Config*> config_;
///
/// // Called frequently
/// U get_config(V v) {
/// hazptr_holder h; /* h owns a hazard pointer */
/// Config* ptr = h.get_protected(config_);
/// /* safe to access *ptr as long as it is protected by h */
/// return ptr->get_config(v);
/// /* h dtor resets and releases the owned hazard pointer,
/// *ptr will be no longer protected by this hazard pointer */
/// }
///
/// // called rarely
/// void update_config(Config* new_config) {
/// Config* ptr = config_.exchange(new_config);
/// ptr->retire() // Member function of hazptr_obj_base<Config>
/// }
///
/// Optimized Holders
/// -----------------
/// - The template hazptr_array<M> provides most of the functionality
/// of M hazptr_holder-s but with faster construction/destruction
/// (for M > 1), at the cost of restrictions (on move and swap).
/// - The template hazptr_local<M> provides greater speed even when
/// M=1 (~2 ns vs ~5 ns for construction/destruction) but it is
/// unsafe for the current thread to construct any other holder-type
/// objects (hazptr_holder, hazptr_array and other hazptr_local)
/// while the current instance exists.
/// - In the above example, if Config::get_config() and all of its
/// descendants are guaranteed not to use hazard pointers, then it
/// can be faster (by ~3 ns.) to use
/// hazptr_local<1> h;
/// Config* ptr = h[0].get_protected(config_);
/// than
/// hazptr_holder h;
/// Config* ptr = h.get_protected(config_);
///
/// Memory Usage
/// ------------
/// - The size of the metadata for the hazptr library is linear in the
/// number of threads using hazard pointers, assuming a constant
/// number of hazard pointers per thread, which is typical.
/// - The typical number of reclaimable but not yet reclaimed of
/// objects is linear in the number of hazard pointers, which
/// typically is linear in the number of threads using hazard
/// pointers.
///
/// Alternative Safe Reclamation Methods
/// ------------------------------------
/// - Locking (exclusive or shared):
/// o Pros: simple to reason about.
/// o Cons: serialization, high reader overhead, high contention, deadlock.
/// o When to use: When speed and contention are not critical, and
/// when deadlock avoidance is simple.
/// - Reference counting (atomic shared_ptr):
/// o Pros: automatic reclamation, thread-anonymous, independent of
/// support for thread local data, immune to deadlock.
/// o Cons: high reader (and writer) overhead, high reader (and
/// writer) contention.
/// o When to use: When thread local support is lacking and deadlock
/// can be a problem, or automatic reclamation is needed.
/// - Read-copy-update (RCU):
/// o Pros: simple, fast, scalable.
/// o Cons: sensitive to blocking
/// o When to use: When speed and scalability are important and
/// objects do not need to be protected while blocking.
///
/// Hazard Pointers vs RCU
/// ----------------------
/// - The differences between hazard pointers and RCU boil down to
/// that hazard pointers protect specific objects, whereas RCU
/// sections protect all protectable objects.
/// - Both have comparably low overheads for protection (i.e. reading
/// or traversal) in the order of low nanoseconds.
/// - Both support effectively perfect scalability of object
/// protection by read-only operations (barring other factors).
/// - Both rely on thread local data for performance.
/// - Hazard pointers can protect objects while blocking
/// indefinitely. Hazard pointers only prevent the reclamation of
/// the objects they are protecting.
/// - RCU sections do not allow indefinite blocking, because RCU
/// prevents the reclamation of all protectable objects, which
/// otherwise would lead to deadlock and/or running out of memory.
/// - Hazard pointers can support end-to-end lock-free operations,
/// including updates (provided lock-free allocator), regardless of
/// thread delays and scheduling constraints.
/// - RCU can support wait-free read operations, but reclamation of
/// unbounded objects can be delayed for as long as a single thread
/// is delayed.
/// - The number of unreclaimed objects is bounded when protected by
/// hazard pointers, but is unbounded when protected by RCU.
/// - RCU is simpler to use than hazard pointers (except for the
/// blocking and deadlock issues mentioned above). Hazard pointers
/// need to identify protected objects, whereas RCU does not need to
/// because it protects all protectable objects.
/// - Both can protect linked structures. Hazard pointers needs
/// additional link counting with low or moderate overhead for
/// update operations, and no overhead for readers. RCU protects
/// protects linked structures automatically, because it protects
/// everything.
///
/// Differences from the Standard Proposal
/// --------------------------------------
/// - The latest standard proposal is in wg21.link/p0566.
/// - This library's API differs from the standard proposal because:
/// (a) the standard proposal is changing based on committee
/// feedback, and (b) this library provides additional
/// fast-evolving features based on usage experience that do not
/// have corressponding proposed standard wording.
/// - The main differences are:
/// o This library uses an extra atomic template parameter for
/// testing and debugging.
/// o This library does not support a custom polymorphic allocator
/// (C++17) parameter for the hazptr_domain constructor, until
/// such support becomes widely available.
/// o The construction of empty and non-empty hazptr_holder-s are
/// reversed. This library will conform eventually.
/// o hazptr_holder member functions get_protected and reset are
/// called protect and reset_protected, respectively, in the
/// latest proposal. Will conform eventually.
/// o hazptr_array and hazptr_local are not part of the standard
/// proposal.
/// o Link counting support and protection of linked structures is
/// not part of the current standard proposal.
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/synchronization/HazptrThrLocal.h>
#include <folly/Portability.h>
#include <folly/Singleton.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <atomic>
#include <unordered_set> // for hash set in bulk_reclaim
///
/// Classes related to hazard pointer domains.
///
namespace folly {
/**
* hazptr_domain
*
* A domain manages a set of hazard pointers and a set of retired objects.
*
* Most user code need not specify any domains.
*/
template <template <typename> class Atom>
class hazptr_domain {
static constexpr int kThreshold = 1000;
static constexpr int kMultiplier = 2;
static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds
Atom<hazptr_rec<Atom>*> hazptrs_{nullptr};
Atom<hazptr_obj<Atom>*> retired_{nullptr};
Atom<uint64_t> sync_time_{0};
/* Using signed int for rcount_ because it may transiently be negative.
Using signed int for all integer variables that may be involved in
calculations related to the value of rcount_. */
Atom<int> hcount_{0};
Atom<int> rcount_{0};
public:
/** Constructor */
hazptr_domain() = default;
/** Destructor */
~hazptr_domain() {
reclaim_all_objects();
free_hazptr_recs();
}
hazptr_domain(const hazptr_domain&) = delete;
hazptr_domain(hazptr_domain&&) = delete;
hazptr_domain& operator=(const hazptr_domain&) = delete;
hazptr_domain& operator=(hazptr_domain&&) = delete;
public:
/** retire - nonintrusive - allocates memory */
template <typename T, typename D = std::default_delete<T>>
void retire(T* obj, D reclaim = {}) {
struct hazptr_retire_node : hazptr_obj<Atom> {
std::unique_ptr<T, D> obj_;
hazptr_retire_node(T* retireObj, D toReclaim)
: obj_{retireObj, std::move(toReclaim)} {}
};
auto node = new hazptr_retire_node(obj, std::move(reclaim));
node->reclaim_ = [](hazptr_obj<Atom>* p) {
delete static_cast<hazptr_retire_node*>(p);
};
push_retired(node, node, 1);
}
/** cleanup */
void cleanup() noexcept {
relaxed_cleanup();
}
private:
friend void hazptr_domain_push_retired<Atom>(
hazptr_obj<Atom>*,
hazptr_obj<Atom>*,
int,
hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL
friend class hazptr_tc<Atom>;
#endif
/** hprec_acquire */
hazptr_rec<Atom>* hprec_acquire() {
auto rec = try_acquire_existing_hprec();
return rec != nullptr ? rec : acquire_new_hprec();
}
/** hprec_release */
void hprec_release(hazptr_rec<Atom>* hprec) noexcept {
hprec->release();
}
/** push_retired */
void push_retired(hazptr_obj<Atom>* head, hazptr_obj<Atom>* tail, int count) {
/*** Full fence ***/ asymmetricLightBarrier();
while (true) {
auto r = retired();
tail->set_next(r);
if (retired_.compare_exchange_weak(
r, head, std::memory_order_release, std::memory_order_acquire)) {
break;
}
}
rcount_.fetch_add(count, std::memory_order_release);
if (try_timed_cleanup()) {
return;
}
if (reached_threshold(rcount(), hcount())) {
try_bulk_reclaim();
}
}
hazptr_rec<Atom>* head() const noexcept {
return hazptrs_.load(std::memory_order_acquire);
}
hazptr_obj<Atom>* retired() const noexcept {
return retired_.load(std::memory_order_acquire);
}
int hcount() const noexcept {
return hcount_.load(std::memory_order_acquire);
}
int rcount() const noexcept {
return rcount_.load(std::memory_order_acquire);
}
bool reached_threshold(int rc, int hc) const noexcept {
return rc >= kThreshold && rc >= kMultiplier * hc;
}
void reclaim_all_objects() {
auto retired = retired_.exchange(nullptr);
while (retired) {
auto obj = retired;
while (obj) {
auto next = obj->next();
DCHECK(obj != next);
(*(obj->reclaim()))(obj);
obj = next;
}
retired = retired_.exchange(nullptr);
}
}
void free_hazptr_recs() {
/* Leak the hazard pointers for the default domain to avoid
destruction order issues with thread caches. */
if (this == &default_hazptr_domain<Atom>()) {
return;
}
auto rec = head();
while (rec) {
auto next = rec->next();
DCHECK(!rec->active());
delete rec;
rec = next;
}
}
void relaxed_cleanup() noexcept {
#if FOLLY_HAZPTR_THR_LOCAL
hazptr_obj<Atom>* h = nullptr;
hazptr_obj<Atom>* t = nullptr;
for (hazptr_priv<Atom>& priv :
hazptr_priv_singleton<Atom>::accessAllThreads()) {
priv.collect(h, t);
}
if (h) {
DCHECK(t);
push_retired(h, t, 0);
}
#endif
rcount_.store(0, std::memory_order_release);
bulk_reclaim();
}
void try_bulk_reclaim() {
auto hc = hcount();
auto rc = rcount();
if (!reached_threshold(rc, hc)) {
return;
}
rc = rcount_.exchange(0, std::memory_order_release);
if (!reached_threshold(rc, hc)) {
/* No need to add rc back to rcount_. At least one concurrent
try_bulk_reclaim will proceed to bulk_reclaim. */
return;
}
bulk_reclaim();
}
void bulk_reclaim() {
auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
auto rec = hazptrs_.load(std::memory_order_acquire);
/* Part 1 - read hazard pointer values into private search structure */
std::unordered_set<const void*> hashset; // TOTO: lock-free fixed hash set
for (; rec; rec = rec->next()) {
hashset.insert(rec->hazptr());
}
/* Part 2 - for each retired object, reclaim if no match */
bulk_lookup_and_reclaim(obj, hashset);
}
void bulk_lookup_and_reclaim(
hazptr_obj<Atom>* obj,
const std::unordered_set<const void*>& hashset) {
int rcount = 0;
hazptr_obj<Atom>* head = nullptr;
hazptr_obj<Atom>* tail = nullptr;
while (obj) {
auto next = obj->next();
DCHECK_NE(obj, next);
if (hashset.count(obj->raw_ptr()) == 0) {
(*(obj->reclaim()))(obj);
} else {
obj->set_next(head);
head = obj;
if (tail == nullptr) {
tail = obj;
}
++rcount;
}
obj = next;
}
if (tail) {
push_retired(head, tail, rcount);
}
}
bool try_timed_cleanup() {
uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
auto prevtime = sync_time_.load(std::memory_order_relaxed);
if (time < prevtime ||
!sync_time_.compare_exchange_strong(
prevtime, time + kSyncTimePeriod, std::memory_order_relaxed)) {
return false;
}
relaxed_cleanup(); // calling regular cleanup may self deadlock
return true;
}
hazptr_rec<Atom>* try_acquire_existing_hprec() {
auto rec = head();
while (rec) {
auto next = rec->next();
if (rec->try_acquire()) {
return rec;
}
rec = next;
}
return nullptr;
}
hazptr_rec<Atom>* acquire_new_hprec() {
auto rec = new hazptr_rec<Atom>;
rec->set_active();
while (true) {
auto h = head();
rec->set_next(h);
if (hazptrs_.compare_exchange_weak(
h, rec, std::memory_order_release, std::memory_order_acquire)) {
break;
}
}
hcount_.fetch_add(1);
return rec;
}
}; // hazptr_domain
/**
* Free functions related to hazptr domains
*/
/** default_hazptr_domain: Returns reference to the default domain */
template <template <typename> class Atom>
struct hazptr_default_domain_helper {
static FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& get() {
static hazptr_domain<Atom> domain;
return domain;
}
};
template <>
struct hazptr_default_domain_helper<std::atomic> {
static FOLLY_ALWAYS_INLINE hazptr_domain<std::atomic>& get() {
return default_domain;
}
};
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& default_hazptr_domain() {
return hazptr_default_domain_helper<Atom>::get();
}
/** hazptr_domain_push_retired: push a list of retired objects into a domain */
template <template <typename> class Atom>
void hazptr_domain_push_retired(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int rcount,
hazptr_domain<Atom>& domain) noexcept {
domain.push_retired(head, tail, rcount);
}
template <template <typename> class Atom, typename T, typename D>
FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
default_hazptr_domain<Atom>().retire(obj, std::move(reclaim));
}
/** hazptr_cleanup: Reclaims all reclaimable objects retired to the domain */
template <template <typename> class Atom>
void hazptr_cleanup(hazptr_domain<Atom>& domain) noexcept {
domain.cleanup();
}
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/synchronization/HazptrDomain.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/synchronization/HazptrThrLocal.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
namespace folly {
///
/// Classes related to hazard pointer holders.
///
/**
* hazptr_holder
*
* Class for automatic acquisition and release of hazard pointers,
* and interface for hazard pointer operations.
*
* Usage example:
* T* ptr;
* {
* hazptr_holder h;
* ptr = h.get_protected(src);
* // ... *ptr is protected ...
* h.reset();
* // ... *ptr is not protected ...
* ptr = src.load();
* while (!h.try_protect(ptr, src)) {}
* // ... *ptr is protected ...
* }
* // ... *ptr is not protected
*/
template <template <typename> class Atom>
class hazptr_holder {
hazptr_rec<Atom>* hprec_;
hazptr_domain<Atom>* domain_;
public:
/** Constructor - automatically acquires a hazard pointer. */
FOLLY_ALWAYS_INLINE explicit hazptr_holder(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>())
: domain_(&domain) {
#if FOLLY_HAZPTR_THR_LOCAL
if (LIKELY(&domain == &default_hazptr_domain<Atom>())) {
auto hprec = hazptr_tc_tls<Atom>().try_get();
if (LIKELY(hprec != nullptr)) {
hprec_ = hprec;
return;
}
}
#endif
hprec_ = domain_->hprec_acquire();
}
/** Empty constructor */
FOLLY_ALWAYS_INLINE explicit hazptr_holder(std::nullptr_t) noexcept
: hprec_(nullptr), domain_(nullptr) {}
/** Move constructor */
FOLLY_ALWAYS_INLINE hazptr_holder(hazptr_holder&& rhs) noexcept {
domain_ = rhs.domain_;
hprec_ = rhs.hprec_;
rhs.domain_ = nullptr;
rhs.hprec_ = nullptr;
}
hazptr_holder(const hazptr_holder&) = delete;
hazptr_holder& operator=(const hazptr_holder&) = delete;
/** Destructor */
FOLLY_ALWAYS_INLINE ~hazptr_holder() {
if (LIKELY(hprec_ != nullptr)) {
DCHECK(domain_ != nullptr);
hprec_->reset_hazptr();
#if FOLLY_HAZPTR_THR_LOCAL
if (LIKELY(domain_ == &default_hazptr_domain<Atom>())) {
if (LIKELY(hazptr_tc_tls<Atom>().try_put(hprec_))) {
return;
}
}
#endif
domain_->hprec_release(hprec_);
}
}
/** Move operator */
FOLLY_ALWAYS_INLINE hazptr_holder& operator=(hazptr_holder&& rhs) noexcept {
/* Self-move is a no-op. */
if (LIKELY(this != &rhs)) {
this->~hazptr_holder();
new (this) hazptr_holder(nullptr);
domain_ = rhs.domain_;
hprec_ = rhs.hprec_;
rhs.domain_ = nullptr;
rhs.hprec_ = nullptr;
}
return *this;
}
/** Hazard pointer operations */
/** try_protect */
template <typename T>
FOLLY_ALWAYS_INLINE bool try_protect(T*& ptr, const Atom<T*>& src) noexcept {
return try_protect(ptr, src, [](T* t) { return t; });
}
template <typename T, typename Func>
FOLLY_ALWAYS_INLINE bool
try_protect(T*& ptr, const Atom<T*>& src, Func f) noexcept {
/* Filtering the protected pointer through function Func is useful
for stealing bits of the pointer word */
auto p = ptr;
reset(f(p));
/*** Full fence ***/ folly::asymmetricLightBarrier();
ptr = src.load(std::memory_order_acquire);
if (UNLIKELY(p != ptr)) {
reset();
return false;
}
return true;
}
/** get_protected */
template <typename T>
FOLLY_ALWAYS_INLINE T* get_protected(const Atom<T*>& src) noexcept {
return get_protected(src, [](T* t) { return t; });
}
template <typename T, typename Func>
FOLLY_ALWAYS_INLINE T* get_protected(const Atom<T*>& src, Func f) noexcept {
T* ptr = src.load(std::memory_order_relaxed);
while (!try_protect(ptr, src, f)) {
/* Keep trying */;
}
return ptr;
}
/** reset */
template <typename T>
FOLLY_ALWAYS_INLINE void reset(const T* ptr) noexcept {
auto p = static_cast<hazptr_obj<Atom>*>(const_cast<T*>(ptr));
DCHECK(hprec_); // UB if *this is empty
hprec_->reset_hazptr(p);
}
FOLLY_ALWAYS_INLINE void reset(std::nullptr_t = nullptr) noexcept {
DCHECK(hprec_); // UB if *this is empty
hprec_->reset_hazptr();
}
/* Swap ownership of hazard pointers between hazptr_holder-s. */
/* Note: The owned hazard pointers remain unmodified during the swap
* and continue to protect the respective objects that they were
* protecting before the swap, if any. */
FOLLY_ALWAYS_INLINE void swap(hazptr_holder<Atom>& rhs) noexcept {
std::swap(this->domain_, rhs.domain_);
std::swap(this->hprec_, rhs.hprec_);
}
/** Returns a pointer to the owned hazptr_rec */
FOLLY_ALWAYS_INLINE hazptr_rec<Atom>* hprec() const noexcept {
return hprec_;
}
/** Set the pointer to the owned hazptr_rec */
FOLLY_ALWAYS_INLINE void set_hprec(hazptr_rec<Atom>* hprec) noexcept {
hprec_ = hprec;
}
/** Set the pointer to the domain for the owned hazptr_rec */
void set_domain(hazptr_domain<Atom>* domain) noexcept {
domain_ = domain;
}
}; // hazptr_holder
/**
* Free function swap of hazptr_holder-s.
*/
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE void swap(
hazptr_holder<Atom>& lhs,
hazptr_holder<Atom>& rhs) noexcept {
lhs.swap(rhs);
}
/**
* Type used by hazptr_array and hazptr_local.
*/
template <template <typename> class Atom>
using aligned_hazptr_holder = typename std::aligned_storage<
sizeof(hazptr_holder<Atom>),
alignof(hazptr_holder<Atom>)>::type;
/**
* hazptr_array
*
* Optimized template for bulk construction and destruction of hazard
* pointers.
*
* WARNING: Do not move from or to individual hazptr_holder-s.
* Only move the whole hazptr_array.
*/
template <uint8_t M, template <typename> class Atom>
class hazptr_array {
static_assert(M > 0, "M must be a positive integer.");
aligned_hazptr_holder<Atom> raw_[M];
bool empty_{false};
public:
/** Constructor */
FOLLY_ALWAYS_INLINE hazptr_array() {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
#if FOLLY_HAZPTR_THR_LOCAL
static_assert(
M <= hazptr_tc<Atom>::capacity(),
"M must be within the thread cache capacity.");
auto& tc = hazptr_tc_tls<Atom>();
auto count = tc.count();
if (UNLIKELY(M > count)) {
tc.fill(M - count);
count = M;
}
uint8_t offset = count - M;
for (uint8_t i = 0; i < M; ++i) {
auto hprec = tc[offset + i].get();
DCHECK(hprec != nullptr);
new (&h[i]) hazptr_holder<Atom>(nullptr);
h[i].set_hprec(hprec);
}
tc.set_count(offset);
#else
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>;
}
#endif
}
/** Empty constructor */
FOLLY_ALWAYS_INLINE hazptr_array(std::nullptr_t) noexcept {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>(nullptr);
}
empty_ = true;
}
/** Move constructor */
FOLLY_ALWAYS_INLINE hazptr_array(hazptr_array&& other) noexcept {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
auto hother = reinterpret_cast<hazptr_holder<Atom>*>(&other.raw_);
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>(std::move(hother[i]));
}
empty_ = other.empty_;
other.empty_ = true;
}
hazptr_array(const hazptr_array&) = delete;
hazptr_array& operator=(const hazptr_array&) = delete;
/** Destructor */
FOLLY_ALWAYS_INLINE ~hazptr_array() {
if (empty_) {
return;
}
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
#if FOLLY_HAZPTR_THR_LOCAL
auto& tc = hazptr_tc_tls<Atom>();
auto count = tc.count();
auto cap = hazptr_tc<Atom>::capacity();
if (UNLIKELY((M + count) > cap)) {
tc.evict((M + count) - cap);
count = cap - M;
}
for (uint8_t i = 0; i < M; ++i) {
h[i].reset();
tc[count + i].fill(h[i].hprec());
new (&h[i]) hazptr_holder<Atom>(nullptr);
}
tc.set_count(count + M);
#else
for (uint8_t i = 0; i < M; ++i) {
h[i].~hazptr_holder();
}
#endif
}
/** Move operator */
FOLLY_ALWAYS_INLINE hazptr_array& operator=(hazptr_array&& other) noexcept {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
for (uint8_t i = 0; i < M; ++i) {
h[i] = std::move(other[i]);
}
empty_ = other.empty_;
other.empty_ = true;
return *this;
}
/** [] operator */
FOLLY_ALWAYS_INLINE hazptr_holder<Atom>& operator[](uint8_t i) noexcept {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
DCHECK(i < M);
return h[i];
}
}; // hazptr_array
/**
* hazptr_local
*
* Optimized for construction and destruction of one or more
* hazptr_holder-s with local scope.
*
* WARNING 1: Do not move from or to individual hazptr_holder-s.
*
* WARNING 2: There can only be one hazptr_local active for the same
* thread at any time. This is not tracked and checked by the
* implementation (except in debug mode) because it would negate the
* performance gains of this class.
*/
template <uint8_t M, template <typename> class Atom>
class hazptr_local {
static_assert(M > 0, "M must be a positive integer.");
aligned_hazptr_holder<Atom> raw_[M];
public:
/** Constructor */
FOLLY_ALWAYS_INLINE hazptr_local() {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
#if FOLLY_HAZPTR_THR_LOCAL
static_assert(
M <= hazptr_tc<Atom>::capacity(),
"M must be <= hazptr_tc::capacity().");
auto& tc = hazptr_tc_tls<Atom>();
auto count = tc.count();
if (UNLIKELY(M > count)) {
tc.fill(M - count);
}
if (kIsDebug) {
DCHECK(!tc.local());
tc.set_local(true);
}
for (uint8_t i = 0; i < M; ++i) {
auto hprec = tc[i].get();
DCHECK(hprec != nullptr);
new (&h[i]) hazptr_holder<Atom>(nullptr);
h[i].set_hprec(hprec);
}
#else
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>;
}
#endif
}
hazptr_local(const hazptr_local&) = delete;
hazptr_local& operator=(const hazptr_local&) = delete;
hazptr_local(hazptr_local&&) = delete;
hazptr_local& operator=(hazptr_local&&) = delete;
/** Destructor */
FOLLY_ALWAYS_INLINE ~hazptr_local() {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
#if FOLLY_HAZPTR_THR_LOCAL
if (kIsDebug) {
auto& tc = hazptr_tc_tls<Atom>();
DCHECK(tc.local());
tc.set_local(false);
}
for (uint8_t i = 0; i < M; ++i) {
h[i].reset();
}
#else
for (uint8_t i = 0; i < M; ++i) {
h[i].~hazptr_holder();
}
#endif
}
/** [] operator */
FOLLY_ALWAYS_INLINE hazptr_holder<Atom>& operator[](uint8_t i) noexcept {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
DCHECK(i < M);
return h[i];
}
}; // hazptr_local
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/CPortability.h>
#include <folly/Portability.h>
#include <glog/logging.h>
#include <atomic>
#include <memory>
///
/// Classes related to objects protected by hazard pointers.
///
namespace folly {
/**
* hazptr_obj
*
* Object protected by hazard pointers.
*/
template <template <typename> class Atom>
class hazptr_obj {
using ReclaimFnPtr = void (*)(hazptr_obj*);
ReclaimFnPtr reclaim_;
hazptr_obj<Atom>* next_;
public:
/** Constructors */
/* All constructors set next_ to this in order to catch misuse bugs
such as double retire. */
hazptr_obj() noexcept : next_(this) {}
hazptr_obj(const hazptr_obj<Atom>&) noexcept : next_(this) {}
hazptr_obj(hazptr_obj<Atom>&&) noexcept : next_(this) {}
/** Copy operator */
hazptr_obj<Atom>& operator=(const hazptr_obj<Atom>&) noexcept {
return *this;
}
/** Move operator */
hazptr_obj<Atom>& operator=(hazptr_obj<Atom>&&) noexcept {
return *this;
}
private:
friend class hazptr_domain<Atom>;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base_refcounted;
friend class hazptr_priv<Atom>;
hazptr_obj<Atom>* next() const noexcept {
return next_;
}
void set_next(hazptr_obj* obj) noexcept {
next_ = obj;
}
ReclaimFnPtr reclaim() noexcept {
return reclaim_;
}
const void* raw_ptr() const {
return this;
}
void pre_retire_check() noexcept {
// Only for catching misuse bugs like double retire
if (next_ != this) {
pre_retire_check_fail();
}
}
void do_retire(hazptr_domain<Atom>& domain) {
#if FOLLY_HAZPTR_THR_LOCAL
if (&domain == &default_hazptr_domain<Atom>()) {
hazptr_priv_tls<Atom>().push(this);
return;
}
#endif
hazptr_domain_push_retired(this, this, 1, domain);
}
FOLLY_NOINLINE void pre_retire_check_fail() noexcept {
CHECK_EQ(next_, this);
}
}; // hazptr_obj
/**
* hazptr_obj_base
*
* Base template for objects protected by hazard pointers.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base : public hazptr_obj<Atom> {
D deleter_; // TODO: EBO
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(
D deleter = {},
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
pre_retire(std::move(deleter));
set_reclaim();
this->do_retire(domain); // defined in hazptr_obj
}
void retire(hazptr_domain<Atom>& domain) {
retire({}, domain);
}
private:
void pre_retire(D deleter) {
this->pre_retire_check(); // defined in hazptr_obj
deleter_ = std::move(deleter);
}
void set_reclaim() {
this->reclaim_ = [](hazptr_obj<Atom>* p) {
auto hobp = static_cast<hazptr_obj_base<T, Atom, D>*>(p);
auto obj = static_cast<T*>(hobp);
hobp->deleter_(obj);
};
}
}; // hazptr_obj_base
/**
* hazptr_obj_base_refcounted
*
* Base template for reference counted objects protected by hazard
* pointers.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base_refcounted : public hazptr_obj<Atom> {
Atom<uint32_t> refcount_{0};
D deleter_;
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(
D deleter = {},
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
this->pre_retire(std::move(deleter)); // defined in hazptr_obj
set_reclaim();
this->do_retire(domain); // defined in hazptr_obj
}
void retire(hazptr_domain<Atom>& domain) {
retire({}, domain);
}
/* Increments the reference count. */
void acquire_ref() noexcept {
refcount_.fetch_add(1u, std::memory_order_acq_rel);
}
/* The same as acquire_ref() except that in addition the caller
* guarantees that the call is made in a thread-safe context, e.g.,
* the object is not yet shared. This is just an optimization to
* save an atomic read-modify-write operation. */
void acquire_ref_safe() noexcept {
auto oldval = refcount_.load(std::memory_order_acquire);
refcount_.store(oldval + 1u, std::memory_order_release);
}
/* Decrements the reference count and returns true if the object is
* safe to reclaim. */
bool release_ref() noexcept {
auto oldval = refcount_.load(std::memory_order_acquire);
if (oldval > 0u) {
oldval = refcount_.fetch_sub(1u, std::memory_order_acq_rel);
} else {
if (kIsDebug) {
refcount_.store(~0u);
}
}
return oldval == 0;
}
private:
void pre_retire(D deleter) {
this->pre_retire_check(); // defined in hazptr_obj
deleter_ = std::move(deleter);
}
void set_reclaim() {
this->reclaim_ = [](hazptr_obj<Atom>* p) {
auto hrobp = static_cast<hazptr_obj_base_refcounted<T, Atom, D>*>(p);
if (hrobp->release_ref()) {
auto obj = static_cast<T*>(hrobp);
hrobp->deleter_(obj);
}
};
}
}; // hazptr_obj_base_refcounted
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/concurrency/CacheLocality.h>
#include <atomic>
namespace folly {
/**
* hazptr_rec:
*
* Contains the actual hazard pointer.
*/
template <template <typename> class Atom>
class alignas(hardware_destructive_interference_size) hazptr_rec {
Atom<const void*> hazptr_{nullptr}; // the hazard pointer
hazptr_rec* next_{nullptr};
Atom<bool> active_{false};
friend class hazptr_domain<Atom>;
friend class hazptr_holder<Atom>;
friend class hazptr_tc_entry<Atom>;
const void* hazptr() const noexcept {
return hazptr_.load(std::memory_order_acquire);
}
FOLLY_ALWAYS_INLINE void reset_hazptr(const void* p = nullptr) noexcept {
hazptr_.store(p, std::memory_order_release);
}
bool active() const noexcept {
return active_.load(std::memory_order_acquire);
}
void set_active() noexcept {
active_.store(true, std::memory_order_relaxed);
}
bool try_acquire() noexcept {
bool a = active();
return !a &&
active_.compare_exchange_strong(
a, true, std::memory_order_release, std::memory_order_relaxed);
}
void release() noexcept {
active_.store(false, std::memory_order_release);
}
hazptr_rec<Atom>* next() {
return next_;
}
void set_next(hazptr_rec<Atom>* rec) {
next_ = rec;
}
}; // hazptr_rec
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#if FOLLY_HAZPTR_THR_LOCAL
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/SingletonThreadLocal.h>
#include <glog/logging.h>
#include <atomic>
/**
* Thread local classes and singletons
*/
namespace folly {
/**
* hazptr_tc_entry
*
* Thread cache entry.
*/
template <template <typename> class Atom>
class hazptr_tc_entry {
hazptr_rec<Atom>* hprec_;
template <uint8_t, template <typename> class>
friend class hazptr_array;
template <uint8_t, template <typename> class>
friend class hazptr_local;
friend class hazptr_tc<Atom>;
FOLLY_ALWAYS_INLINE void fill(hazptr_rec<Atom>* hprec) noexcept {
hprec_ = hprec;
}
FOLLY_ALWAYS_INLINE hazptr_rec<Atom>* get() const noexcept {
return hprec_;
}
void evict() {
hprec_->release();
}
}; // hazptr_tc_entry
/**
* hazptr_tc:
*
* Thread cache of hazptr_rec-s that belong to the default domain.
*/
template <template <typename> class Atom>
class hazptr_tc {
static constexpr uint8_t kCapacity = 3;
hazptr_tc_entry<Atom> entry_[kCapacity];
uint8_t count_{0};
bool local_{false}; // for debug mode only
public:
~hazptr_tc() {
for (uint8_t i = 0; i < count(); ++i) {
entry_[i].evict();
}
}
static constexpr uint8_t capacity() noexcept {
return kCapacity;
}
private:
template <uint8_t, template <typename> class>
friend class hazptr_array;
friend class hazptr_holder<Atom>;
template <uint8_t, template <typename> class>
friend class hazptr_local;
FOLLY_ALWAYS_INLINE
hazptr_tc_entry<Atom>& operator[](uint8_t i) noexcept {
DCHECK(i <= capacity());
return entry_[i];
}
FOLLY_ALWAYS_INLINE hazptr_rec<Atom>* try_get() noexcept {
if (LIKELY(count_ > 0)) {
auto hprec = entry_[--count_].get();
return hprec;
}
return nullptr;
}
FOLLY_ALWAYS_INLINE bool try_put(hazptr_rec<Atom>* hprec) noexcept {
if (LIKELY(count_ < capacity())) {
entry_[count_++].fill(hprec);
return true;
}
return false;
}
FOLLY_ALWAYS_INLINE uint8_t count() const noexcept {
return count_;
}
FOLLY_ALWAYS_INLINE void set_count(uint8_t val) noexcept {
count_ = val;
}
FOLLY_NOINLINE void fill(uint8_t num) {
DCHECK_LE(count_ + num, capacity());
auto& domain = default_hazptr_domain<Atom>();
for (uint8_t i = 0; i < num; ++i) {
auto hprec = domain.hprec_acquire();
entry_[count_++].fill(hprec);
}
}
FOLLY_NOINLINE void evict(uint8_t num) {
DCHECK_GE(count_, num);
for (uint8_t i = 0; i < num; ++i) {
entry_[--count_].evict();
}
}
bool local() const noexcept { // for debugging only
return local_;
}
void set_local(bool b) noexcept { // for debugging only
local_ = b;
}
}; // hazptr_tc
/** hazptr_tc_tls */
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_tc<Atom>& hazptr_tc_tls() {
return folly::SingletonThreadLocal<hazptr_tc<Atom>, void>::get();
}
/**
* hazptr_priv
*
* Per-thread list of retired objects to be pushed in bulk to domain.
*/
template <template <typename> class Atom>
class hazptr_priv {
static constexpr int kThreshold = 20;
Atom<hazptr_obj<Atom>*> head_;
Atom<hazptr_obj<Atom>*> tail_;
int rcount_;
public:
hazptr_priv() : head_(nullptr), tail_(nullptr), rcount_(0) {}
~hazptr_priv() {
if (!empty()) {
push_all_to_domain();
}
}
private:
friend class hazptr_domain<Atom>;
friend class hazptr_obj<Atom>;
bool empty() const noexcept {
return head() == nullptr;
}
void push(hazptr_obj<Atom>* obj) {
while (true) {
if (tail()) {
if (push_in_non_empty_list(obj)) {
break;
}
} else {
if (push_in_empty_list(obj)) {
break;
}
}
}
if (++rcount_ >= kThreshold) {
push_all_to_domain();
}
}
void push_all_to_domain() {
hazptr_obj<Atom>* h = nullptr;
hazptr_obj<Atom>* t = nullptr;
collect(h, t);
if (h) {
DCHECK(t);
hazptr_domain_push_retired<Atom>(h, t, rcount_);
rcount_ = 0;
}
}
void collect(
hazptr_obj<Atom>*& colHead,
hazptr_obj<Atom>*& colTail) noexcept {
// This function doesn't change rcount_.
// The value rcount_ is accurate excluding the effects of calling collect().
auto h = exchange_head();
if (h) {
auto t = exchange_tail();
DCHECK(t);
if (colTail) {
colTail->set_next(h);
} else {
colHead = h;
}
colTail = t;
}
}
hazptr_obj<Atom>* head() const noexcept {
return head_.load(std::memory_order_acquire);
}
hazptr_obj<Atom>* tail() const noexcept {
return tail_.load(std::memory_order_acquire);
}
void set_head(hazptr_obj<Atom>* obj) noexcept {
head_.store(obj, std::memory_order_release);
}
bool cas_head(hazptr_obj<Atom>* expected, hazptr_obj<Atom>* obj) noexcept {
return head_.compare_exchange_weak(
expected, obj, std::memory_order_acq_rel, std::memory_order_relaxed);
}
bool cas_tail(hazptr_obj<Atom>* expected, hazptr_obj<Atom>* obj) noexcept {
return tail_.compare_exchange_weak(
expected, obj, std::memory_order_acq_rel, std::memory_order_relaxed);
}
hazptr_obj<Atom>* exchange_head() noexcept {
return head_.exchange(nullptr, std::memory_order_acq_rel);
}
hazptr_obj<Atom>* exchange_tail() noexcept {
return tail_.exchange(nullptr, std::memory_order_acq_rel);
}
bool push_in_non_empty_list(hazptr_obj<Atom>* obj) noexcept {
auto h = head();
if (h) {
obj->set_next(h);
if (cas_head(h, obj)) {
return true;
}
}
return false;
}
bool push_in_empty_list(hazptr_obj<Atom>* obj) noexcept {
hazptr_obj<Atom>* t = nullptr;
obj->set_next(nullptr);
if (cas_tail(t, obj)) {
set_head(obj);
return true;
}
return false;
}
}; // hazptr_priv
/** hazptr_priv_tls */
struct HazptrTag {};
template <template <typename> class Atom>
using hazptr_priv_singleton =
folly::SingletonThreadLocal<hazptr_priv<Atom>, HazptrTag>;
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_priv<Atom>& hazptr_priv_tls() {
return hazptr_priv_singleton<Atom>::get();
}
} // namespace folly
#endif // FOLLY_HAZPTR_THR_LOCAL
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr.h>
namespace folly {
template <typename T, template <typename> class Atom = std::atomic>
class HazptrLockFreeLIFO {
struct Node;
Atom<Node*> head_;
public:
HazptrLockFreeLIFO() : head_(nullptr) {}
~HazptrLockFreeLIFO() {
Node* next;
for (auto node = head(); node; node = next) {
next = node->next();
node->retire();
}
hazptr_cleanup<Atom>();
}
void push(T val) {
auto node = new Node(val, head());
while (!cas_head(node->next_, node)) {
/* try again */;
}
}
bool pop(T& val) {
hazptr_local<1, Atom> h;
hazptr_holder<Atom>& hptr = h[0];
Node* node;
while (true) {
node = hptr.get_protected(head_);
if (node == nullptr) {
return false;
}
auto next = node->next();
if (cas_head(node, next)) {
break;
}
}
hptr.reset();
val = node->value();
node->retire();
return true;
}
private:
Node* head() {
return head_.load(std::memory_order_acquire);
}
bool cas_head(Node*& expected, Node* newval) {
return head_.compare_exchange_weak(
expected, newval, std::memory_order_acq_rel, std::memory_order_acquire);
}
struct Node : public hazptr_obj_base<Node, Atom> {
T value_;
Node* next_;
Node(T v, Node* n) : value_(v), next_(n) {}
Node* next() {
return next_;
}
T value() {
return value_;
}
};
};
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr.h>
#include <atomic>
namespace folly {
/** Set implemented as an ordered singly-linked list.
*
* A single writer thread may add or remove elements. Multiple reader
* threads may search the set concurrently with each other and with
* the writer's operations.
*/
template <typename T, template <typename> class Atom = std::atomic>
class HazptrSWMRSet {
template <typename Node>
struct Reclaimer {
void operator()(Node* p) {
delete p;
}
};
struct Node : public hazptr_obj_base<Node, Atom, Reclaimer<Node>> {
T elem_;
Atom<Node*> next_;
Node(T e, Node* n) : elem_(e), next_(n) {}
};
Atom<Node*> head_{nullptr};
public:
HazptrSWMRSet() : head_(nullptr) {}
~HazptrSWMRSet() {
auto p = head_.load();
while (p) {
auto next = p->next_.load();
delete p;
p = next;
}
}
bool add(T v) {
auto prev = &head_;
locate_lower_bound(v, prev);
auto curr = prev->load(std::memory_order_relaxed);
if (curr && curr->elem_ == v) {
return false;
}
prev->store(new Node(std::move(v), curr));
return true;
}
bool remove(const T& v) {
auto prev = &head_;
locate_lower_bound(v, prev);
auto curr = prev->load(std::memory_order_relaxed);
if (!curr || curr->elem_ != v) {
return false;
}
Node* curr_next = curr->next_.load();
// Patch up the actual list...
prev->store(curr_next, std::memory_order_release);
// ...and only then null out the removed node.
curr->next_.store(nullptr, std::memory_order_release);
curr->retire();
return true;
}
/* Used by readers */
bool contains(const T& val) const {
/* Two hazard pointers for hand-over-hand traversal. */
hazptr_local<2, Atom> hptr;
hazptr_holder<Atom>* hptr_prev = &hptr[0];
hazptr_holder<Atom>* hptr_curr = &hptr[1];
while (true) {
auto prev = &head_;
auto curr = prev->load(std::memory_order_acquire);
while (true) {
if (!curr) {
return false;
}
if (!hptr_curr->try_protect(curr, *prev)) {
break;
}
auto next = curr->next_.load(std::memory_order_acquire);
if (prev->load(std::memory_order_acquire) != curr) {
break;
}
if (curr->elem_ == val) {
return true;
} else if (!(curr->elem_ < val)) {
return false; // because the list is sorted
}
prev = &(curr->next_);
curr = next;
std::swap(hptr_curr, hptr_prev);
}
}
}
private:
/* Used by the single writer */
void locate_lower_bound(const T& v, Atom<Node*>*& prev) const {
auto curr = prev->load(std::memory_order_relaxed);
while (curr) {
if (curr->elem_ >= v) {
break;
}
prev = &(curr->next_);
curr = curr->next_.load(std::memory_order_relaxed);
}
return;
}
};
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr.h>
#include <string>
namespace folly {
/** Wide CAS.
*/
template <typename T, template <typename> class Atom = std::atomic>
class HazptrWideCAS {
struct Node : public hazptr_obj_base<Node, Atom> {
T val_;
explicit Node(T v = {}) : val_(v) {}
};
Atom<Node*> node_;
public:
HazptrWideCAS() : node_(new Node()) {}
~HazptrWideCAS() {
delete node_.load(std::memory_order_relaxed);
}
bool cas(T& u, T& v) {
Node* n = new Node(v);
hazptr_holder<Atom> hptr;
Node* p;
while (true) {
p = hptr.get_protected(node_);
if (p->val_ != u) {
delete n;
return false;
}
if (node_.compare_exchange_weak(
p, n, std::memory_order_relaxed, std::memory_order_release)) {
break;
}
}
hptr.reset();
p->retire();
return true;
}
};
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/synchronization/Hazptr.h>
#include <folly/synchronization/example/HazptrLockFreeLIFO.h>
#include <folly/synchronization/example/HazptrSWMRSet.h>
#include <folly/synchronization/example/HazptrWideCAS.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <folly/test/DeterministicSchedule.h>
#include <atomic>
#include <thread>
DEFINE_bool(bench, false, "run benchmark");
DEFINE_int64(num_reps, 10, "Number of test reps");
DEFINE_int32(num_threads, 6, "Number of threads");
DEFINE_int64(num_ops, 1003, "Number of ops or pairs of ops per rep");
using folly::default_hazptr_domain;
using folly::hazptr_array;
using folly::hazptr_cleanup;
using folly::hazptr_domain;
using folly::hazptr_holder;
using folly::hazptr_local;
using folly::hazptr_obj_base;
using folly::hazptr_obj_base_refcounted;
using folly::hazptr_retire;
using folly::hazptr_tc;
using folly::HazptrLockFreeLIFO;
using folly::HazptrSWMRSet;
using folly::HazptrWideCAS;
using folly::test::DeterministicAtomic;
using DSched = folly::test::DeterministicSchedule;
// Structures
class Count {
std::atomic<int> ctors_{0};
std::atomic<int> dtors_{0};
std::atomic<int> retires_{0};
public:
void clear() noexcept {
ctors_.store(0);
dtors_.store(0);
retires_.store(0);
}
int ctors() const noexcept {
return ctors_.load();
}
int dtors() const noexcept {
return dtors_.load();
}
int retires() const noexcept {
return retires_.load();
}
void inc_ctors() noexcept {
ctors_.fetch_add(1);
}
void inc_dtors() noexcept {
dtors_.fetch_add(1);
}
void inc_retires() noexcept {
retires_.fetch_add(1);
}
};
static Count c_;
template <template <typename> class Atom = std::atomic>
class Node : public hazptr_obj_base<Node<Atom>, Atom> {
int val_;
Atom<Node<Atom>*> next_;
public:
explicit Node(int v = 0, Node* n = nullptr) noexcept : val_(v), next_(n) {
c_.inc_ctors();
}
~Node() {
c_.inc_dtors();
}
int value() const noexcept {
return val_;
}
Node<Atom>* next() const noexcept {
return next_.load(std::memory_order_acquire);
}
Atom<Node<Atom>*>* ptr_next() noexcept {
return &next_;
}
};
template <template <typename> class Atom = std::atomic>
class NodeRC : public hazptr_obj_base_refcounted<NodeRC<Atom>, Atom> {
int val_;
Atom<NodeRC<Atom>*> next_;
bool marked_;
public:
explicit NodeRC(int v = 0, NodeRC* n = nullptr) noexcept
: val_(v), next_(n), marked_(false) {
c_.inc_ctors();
this->acquire_ref_safe();
}
~NodeRC() {
c_.inc_dtors();
if (!marked_) {
auto n = next();
while (n) {
if (!n->release_ref()) {
return;
}
auto p = n;
n = p->next();
p->marked_ = true;
delete p;
}
}
}
int value() const noexcept {
return val_;
}
NodeRC<Atom>* next() const noexcept {
return next_.load(std::memory_order_acquire);
}
};
template <typename T, template <typename> class Atom = std::atomic>
struct List {
Atom<T*> head_{nullptr};
public:
explicit List(int size) {
auto p = head_.load(std::memory_order_relaxed);
for (int i = 0; i < size; ++i) {
p = new T(i + 10000, p);
}
head_.store(p, std::memory_order_relaxed);
}
~List() {
auto curr = head_.load(std::memory_order_relaxed);
while (curr) {
auto next = curr->next();
curr->retire();
curr = next;
}
}
bool hand_over_hand(
int val,
hazptr_holder<Atom>* hptr_prev,
hazptr_holder<Atom>* hptr_curr) {
while (true) {
auto prev = &head_;
auto curr = prev->load(std::memory_order_acquire);
while (true) {
if (!curr) {
return false;
}
if (!hptr_curr->try_protect(curr, *prev)) {
break;
}
auto next = curr->next();
if (prev->load(std::memory_order_acquire) != curr) {
break;
}
if (curr->value() == val) {
return true;
}
prev = curr->ptr_next();
curr = next;
std::swap(hptr_curr, hptr_prev);
}
}
}
bool hand_over_hand(int val) {
hazptr_local<2, Atom> hptr;
return hand_over_hand(val, &hptr[0], &hptr[1]);
}
bool protect_all(int val, hazptr_holder<Atom>& hptr) {
auto curr = hptr.get_protected(head_);
while (curr) {
auto next = curr->next();
if (curr->value() == val) {
return true;
}
curr = next;
}
return false;
}
bool protect_all(int val) {
hazptr_local<1, Atom> hptr;
return protect_all(val, hptr[0]);
}
};
// Test Functions
template <template <typename> class Atom = std::atomic>
void basic_objects_test() {
c_.clear();
int num = 0;
{
++num;
auto obj = new Node<Atom>;
obj->retire();
}
{
++num;
auto obj = new NodeRC<Atom>;
obj->release_ref();
obj->retire();
}
{
++num;
auto obj = new NodeRC<Atom>;
obj->acquire_ref();
obj->acquire_ref_safe();
obj->release_ref();
obj->retire();
obj->release_ref();
obj->release_ref();
}
ASSERT_EQ(c_.ctors(), num);
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), num);
}
template <template <typename> class Atom = std::atomic>
void copy_and_move_test() {
struct Obj : hazptr_obj_base<Obj, Atom> {
int a;
};
auto p1 = new Obj();
auto p2 = new Obj(*p1);
p1->retire();
p2->retire();
p1 = new Obj();
p2 = new Obj(std::move(*p1));
p1->retire();
p2->retire();
p1 = new Obj();
p2 = new Obj();
*p2 = *p1;
p1->retire();
p2->retire();
p1 = new Obj();
p2 = new Obj();
*p2 = std::move(*p1);
p1->retire();
p2->retire();
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void basic_holders_test() {
{ hazptr_holder<Atom> h; }
{ hazptr_array<2, Atom> h; }
{ hazptr_local<2, Atom> h; }
}
template <template <typename> class Atom = std::atomic>
void basic_protection_test() {
c_.clear();
auto obj = new Node<Atom>;
hazptr_holder<Atom> h;
h.reset(obj);
obj->retire();
ASSERT_EQ(c_.ctors(), 1);
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 0);
h.reset();
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 1);
}
template <template <typename> class Atom = std::atomic>
void virtual_test() {
struct Thing : public hazptr_obj_base<Thing, Atom> {
virtual ~Thing() {}
int a;
};
for (int i = 0; i < 100; i++) {
auto bar = new Thing;
bar->a = i;
hazptr_holder<Atom> hptr;
hptr.reset(bar);
bar->retire();
ASSERT_EQ(bar->a, i);
}
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void destruction_test(hazptr_domain<Atom>& domain) {
struct Thing : public hazptr_obj_base<Thing, Atom> {
Thing* next;
hazptr_domain<Atom>* domain;
int val;
Thing(int v, Thing* n, hazptr_domain<Atom>* d)
: next(n), domain(d), val(v) {}
~Thing() {
if (next) {
next->retire(*domain);
}
}
};
Thing* last{nullptr};
for (int i = 0; i < 2000; i++) {
last = new Thing(i, last, &domain);
}
last->retire(domain);
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void move_test() {
for (int i = 0; i < 100; ++i) {
auto x = new Node<Atom>(i);
hazptr_holder<Atom> hptr0;
// Protect object
hptr0.reset(x);
// Retire object
x->retire();
// Move constructor - still protected
hazptr_holder<Atom> hptr1(std::move(hptr0));
// Self move is no-op - still protected
auto phptr1 = &hptr1;
ASSERT_EQ(phptr1, &hptr1);
hptr1 = std::move(*phptr1);
// Empty constructor
hazptr_holder<Atom> hptr2(nullptr);
// Move assignment - still protected
hptr2 = std::move(hptr1);
// Access object
ASSERT_EQ(x->value(), i);
// Unprotect object - hptr2 is nonempty
hptr2.reset();
}
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void array_test() {
for (int i = 0; i < 100; ++i) {
auto x = new Node<Atom>(i);
hazptr_array<3, Atom> hptr;
// Protect object
hptr[2].reset(x);
// Empty array
hazptr_array<3, Atom> h(nullptr);
// Move assignment
h = std::move(hptr);
// Retire object
x->retire();
ASSERT_EQ(x->value(), i);
// Unprotect object - hptr2 is nonempty
h[2].reset();
}
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void array_dtor_full_tc_test() {
#if FOLLY_HAZPTR_THR_LOCAL
const uint8_t M = hazptr_tc<Atom>::capacity();
#else
const uint8_t M = 3;
#endif
{
// Fill the thread cache
hazptr_array<M, Atom> w;
}
{
// Empty array x
hazptr_array<M, Atom> x(nullptr);
{
// y ctor gets elements from the thread cache filled by w dtor.
hazptr_array<M, Atom> y;
// z ctor gets elements from the default domain.
hazptr_array<M, Atom> z;
// Elements of y are moved to x.
x = std::move(y);
// z dtor fills the thread cache.
}
// x dtor finds the thread cache full. It has to call
// ~hazptr_holder() for each of its elements, which were
// previously taken from the thread cache by y ctor.
}
}
template <template <typename> class Atom = std::atomic>
void local_test() {
for (int i = 0; i < 100; ++i) {
auto x = new Node<Atom>(i);
hazptr_local<3, Atom> hptr;
// Protect object
hptr[2].reset(x);
// Retire object
x->retire();
// Unprotect object - hptr2 is nonempty
hptr[2].reset();
}
hazptr_cleanup<Atom>();
}
template <template <typename> class Atom = std::atomic>
void refcount_test() {
c_.clear();
NodeRC<Atom>* p = nullptr;
int num = 193;
for (int i = 0; i < num; ++i) {
p = new NodeRC<Atom>(i, p);
}
hazptr_holder<Atom> hptr;
hptr.reset(p);
for (auto q = p->next(); q; q = q->next()) {
q->retire();
}
int v = num;
for (auto q = p; q; q = q->next()) {
ASSERT_GT(v, 0);
--v;
ASSERT_EQ(q->value(), v);
}
ASSERT_TRUE(!p->release_ref());
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.ctors(), num);
ASSERT_EQ(c_.dtors(), 0);
p->retire();
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 0);
hptr.reset();
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), num);
}
template <template <typename> class Atom = std::atomic>
void mt_refcount_test() {
c_.clear();
Atom<bool> ready(false);
Atom<int> setHazptrs(0);
Atom<NodeRC<Atom>*> head;
int num = FLAGS_num_ops;
;
int nthr = FLAGS_num_threads;
ASSERT_GT(FLAGS_num_threads, 0);
std::vector<std::thread> thr(nthr);
for (int i = 0; i < nthr; ++i) {
thr[i] = DSched::thread([&] {
while (!ready.load()) {
/* spin */
}
hazptr_holder<Atom> hptr;
auto p = hptr.get_protected(head);
++setHazptrs;
/* Concurrent with removal */
int v = num;
for (auto q = p; q; q = q->next()) {
ASSERT_GT(v, 0);
--v;
ASSERT_EQ(q->value(), v);
}
ASSERT_EQ(v, 0);
});
}
NodeRC<Atom>* p = nullptr;
for (int i = 0; i < num; ++i) {
p = new NodeRC<Atom>(i, p);
}
ASSERT_EQ(c_.ctors(), num);
head.store(p);
ready.store(true);
while (setHazptrs.load() < nthr) {
/* spin */
}
/* this is concurrent with traversal by reader */
head.store(nullptr);
for (auto q = p; q; q = q->next()) {
q->retire();
}
ASSERT_EQ(c_.dtors(), 0);
if (p->release_ref()) {
delete p;
}
for (auto& t : thr) {
DSched::join(t);
}
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), num);
}
template <template <typename> class Atom = std::atomic>
void free_function_retire_test() {
auto foo = new int;
hazptr_retire<Atom>(foo);
auto foo2 = new int;
hazptr_retire<Atom>(foo2, [](int* obj) { delete obj; });
bool retired = false;
{
hazptr_domain<Atom> myDomain0;
struct delret {
bool* retired_;
explicit delret(bool* retire) : retired_(retire) {}
~delret() {
*retired_ = true;
}
};
auto foo3 = new delret(&retired);
myDomain0.retire(foo3);
}
ASSERT_TRUE(retired);
}
template <template <typename> class Atom = std::atomic>
void cleanup_test() {
int threadOps = 1007;
int mainOps = 19;
c_.clear();
Atom<int> threadsDone{0};
Atom<bool> mainDone{false};
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = DSched::thread([&, tid]() {
for (int j = tid; j < threadOps; j += FLAGS_num_threads) {
auto p = new Node<Atom>;
p->retire();
}
threadsDone.fetch_add(1);
while (!mainDone.load()) {
/* spin */;
}
});
}
{ // include the main thread in the test
for (int i = 0; i < mainOps; ++i) {
auto p = new Node<Atom>;
p->retire();
}
}
while (threadsDone.load() < FLAGS_num_threads) {
/* spin */;
}
ASSERT_EQ(c_.ctors(), threadOps + mainOps);
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), threadOps + mainOps);
mainDone.store(true);
for (auto& t : threads) {
DSched::join(t);
}
{ // Cleanup after using array
c_.clear();
{ hazptr_array<2, Atom> h; }
{
hazptr_array<2, Atom> h;
auto p0 = new Node<Atom>;
auto p1 = new Node<Atom>;
h[0].reset(p0);
h[1].reset(p1);
p0->retire();
p1->retire();
}
ASSERT_EQ(c_.ctors(), 2);
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 2);
}
{ // Cleanup after using local
c_.clear();
{ hazptr_local<2, Atom> h; }
{
hazptr_local<2, Atom> h;
auto p0 = new Node<Atom>;
auto p1 = new Node<Atom>;
h[0].reset(p0);
h[1].reset(p1);
p0->retire();
p1->retire();
}
ASSERT_EQ(c_.ctors(), 2);
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 2);
}
}
template <template <typename> class Atom = std::atomic>
void lifo_test() {
for (int i = 0; i < FLAGS_num_reps; ++i) {
Atom<int> sum{0};
HazptrLockFreeLIFO<int, Atom> s;
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = DSched::thread([&, tid]() {
int local = 0;
for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
s.push(j);
int v;
ASSERT_TRUE(s.pop(v));
local += v;
}
sum.fetch_add(local);
});
}
for (auto& t : threads) {
DSched::join(t);
}
hazptr_cleanup<Atom>();
int expected = FLAGS_num_ops * (FLAGS_num_ops - 1) / 2;
ASSERT_EQ(sum.load(), expected);
}
}
template <template <typename> class Atom = std::atomic>
void swmr_test() {
using T = uint64_t;
for (int i = 0; i < FLAGS_num_reps; ++i) {
HazptrSWMRSet<T, Atom> s;
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = DSched::thread([&s, tid]() {
for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
s.contains(j);
}
});
}
for (int j = 0; j < 10; ++j) {
s.add(j);
}
for (int j = 0; j < 10; ++j) {
s.remove(j);
}
for (auto& t : threads) {
DSched::join(t);
}
hazptr_cleanup<Atom>();
}
}
template <template <typename> class Atom = std::atomic>
void wide_cas_test() {
HazptrWideCAS<std::string, Atom> s;
std::string u = "";
std::string v = "11112222";
auto ret = s.cas(u, v);
ASSERT_TRUE(ret);
u = "";
v = "11112222";
ret = s.cas(u, v);
ASSERT_FALSE(ret);
u = "11112222";
v = "22223333";
ret = s.cas(u, v);
ASSERT_TRUE(ret);
u = "22223333";
v = "333344445555";
ret = s.cas(u, v);
ASSERT_TRUE(ret);
hazptr_cleanup<Atom>();
}
// Tests
TEST(HazptrTest, basic_objects) {
basic_objects_test();
}
TEST(HazptrTest, dsched_basic_objects) {
DSched sched(DSched::uniform(0));
basic_objects_test<DeterministicAtomic>();
}
TEST(HazptrTest, copy_and_move) {
copy_and_move_test();
}
TEST(HazptrTest, dsched_copy_and_move) {
DSched sched(DSched::uniform(0));
copy_and_move_test<DeterministicAtomic>();
}
TEST(HazptrTest, basic_holders) {
basic_holders_test();
}
TEST(HazptrTest, dsched_basic_holders) {
DSched sched(DSched::uniform(0));
basic_holders_test<DeterministicAtomic>();
}
TEST(HazptrTest, basic_protection) {
basic_protection_test();
}
TEST(HazptrTest, dsched_basic_protection) {
DSched sched(DSched::uniform(0));
basic_protection_test<DeterministicAtomic>();
}
TEST(HazptrTest, virtual) {
virtual_test();
}
TEST(HazptrTest, dsched_virtual) {
DSched sched(DSched::uniform(0));
virtual_test<DeterministicAtomic>();
}
TEST(HazptrTest, destruction) {
{
hazptr_domain<> myDomain0;
destruction_test(myDomain0);
}
destruction_test(default_hazptr_domain<std::atomic>());
}
TEST(HazptrTest, dsched_destruction) {
DSched sched(DSched::uniform(0));
{
hazptr_domain<DeterministicAtomic> myDomain0;
destruction_test<DeterministicAtomic>(myDomain0);
}
destruction_test<DeterministicAtomic>(
default_hazptr_domain<DeterministicAtomic>());
}
TEST(HazptrTest, move) {
move_test();
}
TEST(HazptrTest, dsched_move) {
DSched sched(DSched::uniform(0));
move_test<DeterministicAtomic>();
}
TEST(HazptrTest, array) {
array_test();
}
TEST(HazptrTest, dsched_array) {
DSched sched(DSched::uniform(0));
array_test<DeterministicAtomic>();
}
TEST(HazptrTest, array_dtor_full_tc) {
array_dtor_full_tc_test();
}
TEST(HazptrTest, dsched_array_dtor_full_tc) {
DSched sched(DSched::uniform(0));
array_dtor_full_tc_test<DeterministicAtomic>();
}
TEST(HazptrTest, local) {
local_test();
}
TEST(HazptrTest, dsched_local) {
DSched sched(DSched::uniform(0));
local_test<DeterministicAtomic>();
}
TEST(HazptrTest, refcount) {
refcount_test();
}
TEST(HazptrTest, dsched_refcount) {
DSched sched(DSched::uniform(0));
refcount_test<DeterministicAtomic>();
}
TEST(HazptrTest, mt_refcount) {
mt_refcount_test();
}
TEST(HazptrTest, dsched_mt_refcount) {
DSched sched(DSched::uniform(0));
mt_refcount_test<DeterministicAtomic>();
}
TEST(HazptrTest, free_function_retire) {
free_function_retire_test();
}
TEST(HazptrTest, dsched_free_function_retire) {
DSched sched(DSched::uniform(0));
free_function_retire_test<DeterministicAtomic>();
}
TEST(HazptrTest, cleanup) {
cleanup_test();
}
TEST(HazptrTest, dsched_cleanup) {
DSched sched(DSched::uniform(0));
cleanup_test<DeterministicAtomic>();
}
TEST(HazptrTest, lifo) {
lifo_test();
}
TEST(HazptrTest, dsched_lifo) {
DSched sched(DSched::uniform(0));
lifo_test<DeterministicAtomic>();
}
TEST(HazptrTest, swmr) {
swmr_test();
}
TEST(HazptrTest, dsched_swmr) {
DSched sched(DSched::uniform(0));
swmr_test<DeterministicAtomic>();
}
TEST(HazptrTest, wide_cas) {
wide_cas_test();
}
TEST(HazptrTest, dsched_wide_cas) {
DSched sched(DSched::uniform(0));
wide_cas_test<DeterministicAtomic>();
}
// Benchmark drivers
template <
typename InitFunc,
typename Func,
typename EndFunc,
template <typename> class Atom = std::atomic>
uint64_t run_once(
int nthreads,
const InitFunc& init,
const Func& fn,
const EndFunc& endFn) {
Atom<bool> start{false};
Atom<int> started{0};
init();
std::vector<std::thread> threads(nthreads);
for (int tid = 0; tid < nthreads; ++tid) {
threads[tid] = DSched::thread([&, tid] {
started.fetch_add(1);
while (!start.load()) {
/* spin */;
}
fn(tid);
});
}
while (started.load() < nthreads) {
/* spin */;
}
// begin time measurement
auto tbegin = std::chrono::steady_clock::now();
start.store(true);
for (auto& t : threads) {
DSched::join(t);
}
hazptr_cleanup();
// end time measurement
auto tend = std::chrono::steady_clock::now();
endFn();
return std::chrono::duration_cast<std::chrono::nanoseconds>(tend - tbegin)
.count();
}
template <typename RepFunc>
uint64_t bench(std::string name, int ops, const RepFunc& repFn) {
int reps = 10;
uint64_t min = UINTMAX_MAX;
uint64_t max = 0;
uint64_t sum = 0;
repFn(); // sometimes first run is outlier
for (int r = 0; r < reps; ++r) {
uint64_t dur = repFn();
sum += dur;
min = std::min(min, dur);
max = std::max(max, dur);
}
const std::string unit = " ns";
uint64_t avg = sum / reps;
uint64_t res = min;
std::cout << name;
std::cout << " " << std::setw(4) << max / ops << unit;
std::cout << " " << std::setw(4) << avg / ops << unit;
std::cout << " " << std::setw(4) << res / ops << unit;
std::cout << std::endl;
return res;
}
//
// Benchmarks
//
// const int ops = 1000000;
const int ops = 1000000;
inline uint64_t holder_bench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_holder<> h;
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
template <size_t M>
inline uint64_t array_bench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_array<M> a;
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
template <size_t M>
inline uint64_t local_bench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_local<M> a;
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
inline uint64_t obj_bench(std::string name, int nthreads) {
struct Foo : public hazptr_obj_base<Foo> {};
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
auto p = new Foo;
p->retire();
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
uint64_t list_hoh_bench(
std::string name,
int nthreads,
int size,
bool provided = false) {
auto repFn = [&] {
List<Node<>> l(size);
auto init = [&] {};
auto fn = [&](int tid) {
if (provided) {
hazptr_local<2> hptr;
for (int j = tid; j < ops; j += nthreads) {
l.hand_over_hand(size, &hptr[0], &hptr[1]);
}
} else {
for (int j = tid; j < ops; j += nthreads) {
l.hand_over_hand(size);
}
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
uint64_t list_protect_all_bench(
std::string name,
int nthreads,
int size,
bool provided = false) {
auto repFn = [&] {
List<NodeRC<>> l(size);
auto init = [] {};
auto fn = [&](int tid) {
if (provided) {
hazptr_local<1> hptr;
for (int j = tid; j < ops; j += nthreads) {
l.protect_all(size, hptr[0]);
}
} else {
for (int j = tid; j < ops; j += nthreads) {
l.protect_all(size);
}
}
};
auto endFn = [] {};
return run_once(nthreads, init, fn, endFn);
};
return bench(name, ops, repFn);
}
const int nthr[] = {1, 10};
const int sizes[] = {10, 20};
void benches() {
for (int i : nthr) {
std::cout << "================================ " << std::setw(2) << i
<< " threads "
<< "================================" << std::endl;
std::cout << "10x construct/destruct hazptr_holder ";
holder_bench("", i);
std::cout << "10x construct/destruct hazptr_array<1> ";
array_bench<1>("", i);
std::cout << "10x construct/destruct hazptr_array<2> ";
array_bench<2>("", i);
std::cout << "10x construct/destruct hazptr_array<3> ";
array_bench<3>("", i);
std::cout << "10x construct/destruct hazptr_local<1> ";
local_bench<1>("", i);
std::cout << "10x construct/destruct hazptr_local<2> ";
local_bench<2>("", i);
std::cout << "10x construct/destruct hazptr_local<3> ";
local_bench<3>("", i);
std::cout << "allocate/retire/reclaim object ";
obj_bench("", i);
for (int j : sizes) {
std::cout << j << "-item list hand-over-hand - own hazptrs ";
list_hoh_bench("", i, j, true);
std::cout << j << "-item list hand-over-hand ";
list_hoh_bench("", i, j);
std::cout << j << "-item list protect all - own hazptr ";
list_protect_all_bench("", i, j, true);
std::cout << j << "-item list protect all ";
list_protect_all_bench("", i, j);
}
}
}
TEST(HazptrTest, bench) {
if (FLAGS_bench) {
benches();
}
}
/*
$ numactl -N 1 ./buck-out/gen/folly/synchronization/test/hazptr_test --bench
================================ 1 threads ================================
10x construct/destruct hazptr_holder 51 ns 51 ns 50 ns
10x construct/destruct hazptr_array<1> 54 ns 52 ns 52 ns
10x construct/destruct hazptr_array<2> 60 ns 59 ns 58 ns
10x construct/destruct hazptr_array<3> 141 ns 88 ns 82 ns
10x construct/destruct hazptr_local<1> 13 ns 12 ns 12 ns
10x construct/destruct hazptr_local<2> 15 ns 15 ns 15 ns
10x construct/destruct hazptr_local<3> 39 ns 39 ns 38 ns
allocate/retire/reclaim object 70 ns 68 ns 67 ns
10-item list hand-over-hand - own hazptrs 22 ns 20 ns 18 ns
10-item list hand-over-hand 28 ns 25 ns 22 ns
10-item list protect all - own hazptr 12 ns 11 ns 11 ns
10-item list protect all 22 ns 13 ns 12 ns
20-item list hand-over-hand - own hazptrs 42 ns 40 ns 38 ns
20-item list hand-over-hand 48 ns 43 ns 41 ns
20-item list protect all - own hazptr 28 ns 28 ns 28 ns
20-item list protect all 31 ns 29 ns 29 ns
================================ 10 threads ================================
10x construct/destruct hazptr_holder 11 ns 8 ns 8 ns
10x construct/destruct hazptr_array<1> 8 ns 7 ns 7 ns
10x construct/destruct hazptr_array<2> 9 ns 9 ns 9 ns
10x construct/destruct hazptr_array<3> 19 ns 17 ns 14 ns
10x construct/destruct hazptr_local<1> 8 ns 8 ns 8 ns
10x construct/destruct hazptr_local<2> 8 ns 8 ns 7 ns
10x construct/destruct hazptr_local<3> 11 ns 11 ns 10 ns
allocate/retire/reclaim object 20 ns 17 ns 16 ns
10-item list hand-over-hand - own hazptrs 3 ns 3 ns 3 ns
10-item list hand-over-hand 3 ns 3 ns 3 ns
10-item list protect all - own hazptr 2 ns 2 ns 2 ns
10-item list protect all 2 ns 2 ns 2 ns
20-item list hand-over-hand - own hazptrs 6 ns 6 ns 6 ns
20-item list hand-over-hand 6 ns 6 ns 6 ns
20-item list protect all - own hazptr 4 ns 4 ns 4 ns
20-item list protect all 5 ns 4 ns 4 ns
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment