Commit d42832d2 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

Rewrite from experimental, use of deterministic schedule, improvements

Summary:
For history of the experimental version see folly/experimental/hazptr

Added:
- Support for deterministic schedule testing.

Fixes:
- Eliminates performance pathological cases for hazptr_array and hazptr_local construction when a thread cache can only partially satisfy the needed number of hazptr_holder-s.
- try_protect sets ptr to the value read from src on success. Otherwise, ptr may be used by the client code when it is invalid.

Code improvements including:
- Using separate files for different components.
- Eliminated all macro definitions except one.
- Simplified thread local structures.

Reviewed By: davidtgoldblatt

Differential Revision: D7565357

fbshipit-source-id: fb00fad7395148e66d6231a374b5b717574ed270
parent 0e066dda
......@@ -467,6 +467,13 @@ nobase_follyinclude_HEADERS = \
synchronization/AtomicStruct.h \
synchronization/Baton.h \
synchronization/CallOnce.h \
synchronization/Hazptr.h \
synchronization/Hazptr-fwd.h \
synchronization/HazptrDomain.h \
synchronization/HazptrHolder.h \
synchronization/HazptrObj.h \
synchronization/HazptrRec.h \
synchronization/HazptrThrLocal.h \
synchronization/LifoSem.h \
synchronization/MicroSpinLock.h \
synchronization/ParkingLot.h \
......
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <memory>
///
/// Forward declatations and implicit documentation of all hazptr
/// top-level classes, functions, macros, default values, and globals.
///
/** FOLYY_HAZPTR_THR_LOCAL */
#if FOLLY_MOBILE
#define FOLLY_HAZPTR_THR_LOCAL false
#else
#define FOLLY_HAZPTR_THR_LOCAL true
#endif
namespace folly {
///
/// Hazard pointer record.
/// Defined in HazptrRec.h
///
/** hazptr_rec */
template <template <typename> class Atom = std::atomic>
class hazptr_rec;
///
/// Classes related to objects protected by hazard pointers.
/// Defined in HazptrObj.h
///
/** hazptr_obj */
template <template <typename> class Atom = std::atomic>
class hazptr_obj;
/** hazptr_obj_base */
template <
typename T,
template <typename> class Atom = std::atomic,
typename Deleter = std::default_delete<T>>
class hazptr_obj_base;
/** hazptr_obj_base_refcounted */
template <
typename T,
template <typename> class Atom = std::atomic,
typename Deleter = std::default_delete<T>>
class hazptr_obj_base_refcounted;
///
/// Classes and functions related to thread local structures.
/// Defined in HazptrThrLocal.h
///
/** hazptr_tc_entry */
template <template <typename> class Atom = std::atomic>
class hazptr_tc_entry;
/** hazptr_tc */
template <template <typename> class Atom = std::atomic>
class hazptr_tc;
/** hazptr_tc_tls */
template <template <typename> class Atom = std::atomic>
hazptr_tc<Atom>& hazptr_tc_tls();
/** hazptr_priv */
template <template <typename> class Atom = std::atomic>
class hazptr_priv;
/** hazptr_priv_tls */
template <template <typename> class Atom = std::atomic>
hazptr_priv<Atom>& hazptr_priv_tls();
///
/// Hazard pointer domain
/// Defined in HazptrDomain.h
///
/** hazptr_domain */
template <template <typename> class Atom = std::atomic>
class hazptr_domain;
/** default_hazptr_domain */
template <template <typename> class Atom = std::atomic>
hazptr_domain<Atom>& default_hazptr_domain();
/** hazptr_domain_push_retired */
template <template <typename> class Atom = std::atomic>
void hazptr_domain_push_retired(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int rcount,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** hazptr_retire */
template <
template <typename> class Atom = std::atomic,
typename T,
typename D = std::default_delete<T>>
void hazptr_retire(T* obj, D reclaim = {});
/** hazptr_cleanup */
template <template <typename> class Atom = std::atomic>
void hazptr_cleanup(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** Global default domain defined in Hazptr.cpp */
extern hazptr_domain<std::atomic> default_domain;
///
/// Classes related to hazard pointer holders.
/// Defined in HazptrHolder.h
///
/** hazptr_holder */
template <template <typename> class Atom = std::atomic>
class hazptr_holder;
/** Free function swap of hazptr_holder-s */
template <template <typename> class Atom = std::atomic>
void swap(hazptr_holder<Atom>&, hazptr_holder<Atom>&) noexcept;
/** hazptr_array */
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
class hazptr_array;
/** hazptr_local */
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
class hazptr_local;
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/synchronization/Hazptr.h>
#include <atomic>
namespace folly {
FOLLY_STATIC_CTOR_PRIORITY_MAX hazptr_domain<std::atomic> default_domain;
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/synchronization/HazptrDomain.h>
#include <folly/synchronization/HazptrHolder.h>
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/synchronization/HazptrThrLocal.h>
/// Hazard pointers is a safe reclamation method. It protects objects
/// from being reclaimed while being accessed by one or more threads, but
/// allows objects to be removed concurrently while being accessed.
///
/// What is a Hazard Pointer?
/// -------------------------
/// A hazard pointer is a single-writer multi-reader pointer that can
/// be owned by at most one thread at a time. To protect an object A
/// from being reclaimed while in use, a thread X sets one of its
/// owned hazard pointers, P, to the address of A. If P is set to &A
/// before A is removed (i.e., it becomes unreachable) then A will not be
/// reclaimed as long as P continues to hold the value &A.
///
/// Why use hazard pointers?
/// ------------------------
/// - Speed and scalability.
/// - Can be used while blocking.
///
/// When not to use hazard pointers?
/// --------------------------------
/// - When thread local data is not supported efficiently.
///
/// Basic Interface
/// ---------------
/// - In the hazptr library, raw hazard pointers are not exposed to
/// users. Instead, each instance of the class hazptr_holder owns
/// and manages at most one hazard pointer.
/// - Typically classes of objects protected by hazard pointers are
/// derived from a class template hazptr_obj_base that provides a
/// member function retire(). When an object A is removed,
/// A.retire() is called to pass responsibility for reclaiming A to
/// the hazptr library. A will be reclaimed only after it is not
/// protected by hazard pointers.
/// - The essential components of the hazptr API are:
/// o hazptr_holder: Class that owns and manages a hazard pointer.
/// o get_protected: Mmember function of hazptr_holder. Protects
/// an object pointed to by an atomic source (if not null).
/// T* get_protected(const atomic<T*>& src);
/// o hazptr_obj_base<T>: Base class for protected objects.
/// o retire: Member function of hazptr_obj_base that automatically
/// reclaims the object when safe.
/// void retire();
///
/// Default Domain and Default Deleters
/// -----------------------------------
/// - Most uses do not need to specify custom domains and custom
/// deleters, and by default use the default domain and default
/// deleters.
///
/// Simple usage example
/// --------------------
/// class Config : public hazptr_obj_base<Config> {
/// /* ... details ... */
/// U get_config(V v);
/// };
///
/// std::atomic<Config*> config_;
///
/// // Called frequently
/// U get_config(V v) {
/// hazptr_holder h; /* h owns a hazard pointer */
/// Config* ptr = h.get_protected(config_);
/// /* safe to access *ptr as long as it is protected by h */
/// return ptr->get_config(v);
/// /* h dtor resets and releases the owned hazard pointer,
/// *ptr will be no longer protected by this hazard pointer */
/// }
///
/// // called rarely
/// void update_config(Config* new_config) {
/// Config* ptr = config_.exchange(new_config);
/// ptr->retire() // Member function of hazptr_obj_base<Config>
/// }
///
/// Optimized Holders
/// -----------------
/// - The template hazptr_array<M> provides most of the functionality
/// of M hazptr_holder-s but with faster construction/destruction
/// (for M > 1), at the cost of restrictions (on move and swap).
/// - The template hazptr_local<M> provides greater speed even when
/// M=1 (~2 ns vs ~5 ns for construction/destruction) but it is
/// unsafe for the current thread to construct any other holder-type
/// objects (hazptr_holder, hazptr_array and other hazptr_local)
/// while the current instance exists.
/// - In the above example, if Config::get_config() and all of its
/// descendants are guaranteed not to use hazard pointers, then it
/// can be faster (by ~3 ns.) to use
/// hazptr_local<1> h;
/// Config* ptr = h[0].get_protected(config_);
/// than
/// hazptr_holder h;
/// Config* ptr = h.get_protected(config_);
///
/// Memory Usage
/// ------------
/// - The size of the metadata for the hazptr library is linear in the
/// number of threads using hazard pointers, assuming a constant
/// number of hazard pointers per thread, which is typical.
/// - The typical number of reclaimable but not yet reclaimed of
/// objects is linear in the number of hazard pointers, which
/// typically is linear in the number of threads using hazard
/// pointers.
///
/// Alternative Safe Reclamation Methods
/// ------------------------------------
/// - Locking (exclusive or shared):
/// o Pros: simple to reason about.
/// o Cons: serialization, high reader overhead, high contention, deadlock.
/// o When to use: When speed and contention are not critical, and
/// when deadlock avoidance is simple.
/// - Reference counting (atomic shared_ptr):
/// o Pros: automatic reclamation, thread-anonymous, independent of
/// support for thread local data, immune to deadlock.
/// o Cons: high reader (and writer) overhead, high reader (and
/// writer) contention.
/// o When to use: When thread local support is lacking and deadlock
/// can be a problem, or automatic reclamation is needed.
/// - Read-copy-update (RCU):
/// o Pros: simple, fast, scalable.
/// o Cons: sensitive to blocking
/// o When to use: When speed and scalability are important and
/// objects do not need to be protected while blocking.
///
/// Hazard Pointers vs RCU
/// ----------------------
/// - The differences between hazard pointers and RCU boil down to
/// that hazard pointers protect specific objects, whereas RCU
/// sections protect all protectable objects.
/// - Both have comparably low overheads for protection (i.e. reading
/// or traversal) in the order of low nanoseconds.
/// - Both support effectively perfect scalability of object
/// protection by read-only operations (barring other factors).
/// - Both rely on thread local data for performance.
/// - Hazard pointers can protect objects while blocking
/// indefinitely. Hazard pointers only prevent the reclamation of
/// the objects they are protecting.
/// - RCU sections do not allow indefinite blocking, because RCU
/// prevents the reclamation of all protectable objects, which
/// otherwise would lead to deadlock and/or running out of memory.
/// - Hazard pointers can support end-to-end lock-free operations,
/// including updates (provided lock-free allocator), regardless of
/// thread delays and scheduling constraints.
/// - RCU can support wait-free read operations, but reclamation of
/// unbounded objects can be delayed for as long as a single thread
/// is delayed.
/// - The number of unreclaimed objects is bounded when protected by
/// hazard pointers, but is unbounded when protected by RCU.
/// - RCU is simpler to use than hazard pointers (except for the
/// blocking and deadlock issues mentioned above). Hazard pointers
/// need to identify protected objects, whereas RCU does not need to
/// because it protects all protectable objects.
/// - Both can protect linked structures. Hazard pointers needs
/// additional link counting with low or moderate overhead for
/// update operations, and no overhead for readers. RCU protects
/// protects linked structures automatically, because it protects
/// everything.
///
/// Differences from the Standard Proposal
/// --------------------------------------
/// - The latest standard proposal is in wg21.link/p0566.
/// - This library's API differs from the standard proposal because:
/// (a) the standard proposal is changing based on committee
/// feedback, and (b) this library provides additional
/// fast-evolving features based on usage experience that do not
/// have corressponding proposed standard wording.
/// - The main differences are:
/// o This library uses an extra atomic template parameter for
/// testing and debugging.
/// o This library does not support a custom polymorphic allocator
/// (C++17) parameter for the hazptr_domain constructor, until
/// such support becomes widely available.
/// o The construction of empty and non-empty hazptr_holder-s are
/// reversed. This library will conform eventually.
/// o hazptr_holder member functions get_protected and reset are
/// called protect and reset_protected, respectively, in the
/// latest proposal. Will conform eventually.
/// o hazptr_array and hazptr_local are not part of the standard
/// proposal.
/// o Link counting support and protection of linked structures is
/// not part of the current standard proposal.
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/synchronization/HazptrThrLocal.h>
#include <folly/Portability.h>
#include <folly/Singleton.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <atomic>
#include <unordered_set> // for hash set in bulk_reclaim
///
/// Classes related to hazard pointer domains.
///
namespace folly {
/**
* hazptr_domain
*
* A domain manages a set of hazard pointers and a set of retired objects.
*
* Most user code need not specify any domains.
*/
template <template <typename> class Atom>
class hazptr_domain {
static constexpr int kThreshold = 1000;
static constexpr int kMultiplier = 2;
static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds
Atom<hazptr_rec<Atom>*> hazptrs_{nullptr};
Atom<hazptr_obj<Atom>*> retired_{nullptr};
Atom<uint64_t> sync_time_{0};
/* Using signed int for rcount_ because it may transiently be negative.
Using signed int for all integer variables that may be involved in
calculations related to the value of rcount_. */
Atom<int> hcount_{0};
Atom<int> rcount_{0};
public:
/** Constructor */
hazptr_domain() = default;
/** Destructor */
~hazptr_domain() {
reclaim_all_objects();
free_hazptr_recs();
}
hazptr_domain(const hazptr_domain&) = delete;
hazptr_domain(hazptr_domain&&) = delete;
hazptr_domain& operator=(const hazptr_domain&) = delete;
hazptr_domain& operator=(hazptr_domain&&) = delete;
public:
/** retire - nonintrusive - allocates memory */
template <typename T, typename D = std::default_delete<T>>
void retire(T* obj, D reclaim = {}) {
struct hazptr_retire_node : hazptr_obj<Atom> {
std::unique_ptr<T, D> obj_;
hazptr_retire_node(T* retireObj, D toReclaim)
: obj_{retireObj, std::move(toReclaim)} {}
};
auto node = new hazptr_retire_node(obj, std::move(reclaim));
node->reclaim_ = [](hazptr_obj<Atom>* p) {
delete static_cast<hazptr_retire_node*>(p);
};
push_retired(node, node, 1);
}
/** cleanup */
void cleanup() noexcept {
relaxed_cleanup();
}
private:
friend void hazptr_domain_push_retired<Atom>(
hazptr_obj<Atom>*,
hazptr_obj<Atom>*,
int,
hazptr_domain<Atom>&) noexcept;
friend class hazptr_holder<Atom>;
#if FOLLY_HAZPTR_THR_LOCAL
friend class hazptr_tc<Atom>;
#endif
/** hprec_acquire */
hazptr_rec<Atom>* hprec_acquire() {
auto rec = try_acquire_existing_hprec();
return rec != nullptr ? rec : acquire_new_hprec();
}
/** hprec_release */
void hprec_release(hazptr_rec<Atom>* hprec) noexcept {
hprec->release();
}
/** push_retired */
void push_retired(hazptr_obj<Atom>* head, hazptr_obj<Atom>* tail, int count) {
/*** Full fence ***/ asymmetricLightBarrier();
while (true) {
auto r = retired();
tail->set_next(r);
if (retired_.compare_exchange_weak(
r, head, std::memory_order_release, std::memory_order_acquire)) {
break;
}
}
rcount_.fetch_add(count, std::memory_order_release);
if (try_timed_cleanup()) {
return;
}
if (reached_threshold(rcount(), hcount())) {
try_bulk_reclaim();
}
}
hazptr_rec<Atom>* head() const noexcept {
return hazptrs_.load(std::memory_order_acquire);
}
hazptr_obj<Atom>* retired() const noexcept {
return retired_.load(std::memory_order_acquire);
}
int hcount() const noexcept {
return hcount_.load(std::memory_order_acquire);
}
int rcount() const noexcept {
return rcount_.load(std::memory_order_acquire);
}
bool reached_threshold(int rc, int hc) const noexcept {
return rc >= kThreshold && rc >= kMultiplier * hc;
}
void reclaim_all_objects() {
auto retired = retired_.exchange(nullptr);
while (retired) {
auto obj = retired;
while (obj) {
auto next = obj->next();
DCHECK(obj != next);
(*(obj->reclaim()))(obj);
obj = next;
}
retired = retired_.exchange(nullptr);
}
}
void free_hazptr_recs() {
/* Leak the hazard pointers for the default domain to avoid
destruction order issues with thread caches. */
if (this == &default_hazptr_domain<Atom>()) {
return;
}
auto rec = head();
while (rec) {
auto next = rec->next();
DCHECK(!rec->active());
delete rec;
rec = next;
}
}
void relaxed_cleanup() noexcept {
#if FOLLY_HAZPTR_THR_LOCAL
hazptr_obj<Atom>* h = nullptr;
hazptr_obj<Atom>* t = nullptr;
for (hazptr_priv<Atom>& priv :
hazptr_priv_singleton<Atom>::accessAllThreads()) {
priv.collect(h, t);
}
if (h) {
DCHECK(t);
push_retired(h, t, 0);
}
#endif
rcount_.store(0, std::memory_order_release);
bulk_reclaim();
}
void try_bulk_reclaim() {
auto hc = hcount();
auto rc = rcount();
if (!reached_threshold(rc, hc)) {
return;
}
rc = rcount_.exchange(0, std::memory_order_release);
if (!reached_threshold(rc, hc)) {
/* No need to add rc back to rcount_. At least one concurrent
try_bulk_reclaim will proceed to bulk_reclaim. */
return;
}
bulk_reclaim();
}
void bulk_reclaim() {
auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
auto rec = hazptrs_.load(std::memory_order_acquire);
/* Part 1 - read hazard pointer values into private search structure */
std::unordered_set<const void*> hashset; // TOTO: lock-free fixed hash set
for (; rec; rec = rec->next()) {
hashset.insert(rec->hazptr());
}
/* Part 2 - for each retired object, reclaim if no match */
bulk_lookup_and_reclaim(obj, hashset);
}
void bulk_lookup_and_reclaim(
hazptr_obj<Atom>* obj,
const std::unordered_set<const void*>& hashset) {
int rcount = 0;
hazptr_obj<Atom>* head = nullptr;
hazptr_obj<Atom>* tail = nullptr;
while (obj) {
auto next = obj->next();
DCHECK_NE(obj, next);
if (hashset.count(obj->raw_ptr()) == 0) {
(*(obj->reclaim()))(obj);
} else {
obj->set_next(head);
head = obj;
if (tail == nullptr) {
tail = obj;
}
++rcount;
}
obj = next;
}
if (tail) {
push_retired(head, tail, rcount);
}
}
bool try_timed_cleanup() {
uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
auto prevtime = sync_time_.load(std::memory_order_relaxed);
if (time < prevtime ||
!sync_time_.compare_exchange_strong(
prevtime, time + kSyncTimePeriod, std::memory_order_relaxed)) {
return false;
}
relaxed_cleanup(); // calling regular cleanup may self deadlock
return true;
}
hazptr_rec<Atom>* try_acquire_existing_hprec() {
auto rec = head();
while (rec) {
auto next = rec->next();
if (rec->try_acquire()) {
return rec;
}
rec = next;
}
return nullptr;
}
hazptr_rec<Atom>* acquire_new_hprec() {
auto rec = new hazptr_rec<Atom>;
rec->set_active();
while (true) {
auto h = head();
rec->set_next(h);
if (hazptrs_.compare_exchange_weak(
h, rec, std::memory_order_release, std::memory_order_acquire)) {
break;
}
}
hcount_.fetch_add(1);
return rec;
}
}; // hazptr_domain
/**
* Free functions related to hazptr domains
*/
/** default_hazptr_domain: Returns reference to the default domain */
template <template <typename> class Atom>
struct hazptr_default_domain_helper {
static FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& get() {
static hazptr_domain<Atom> domain;
return domain;
}
};
template <>
struct hazptr_default_domain_helper<std::atomic> {
static FOLLY_ALWAYS_INLINE hazptr_domain<std::atomic>& get() {
return default_domain;
}
};
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& default_hazptr_domain() {
return hazptr_default_domain_helper<Atom>::get();
}
/** hazptr_domain_push_retired: push a list of retired objects into a domain */
template <template <typename> class Atom>
void hazptr_domain_push_retired(
hazptr_obj<Atom>* head,
hazptr_obj<Atom>* tail,
int rcount,
hazptr_domain<Atom>& domain) noexcept {
domain.push_retired(head, tail, rcount);
}
template <template <typename> class Atom, typename T, typename D>
FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
default_hazptr_domain<Atom>().retire(obj, std::move(reclaim));
}
/** hazptr_cleanup: Reclaims all reclaimable objects retired to the domain */
template <template <typename> class Atom>
void hazptr_cleanup(hazptr_domain<Atom>& domain) noexcept {
domain.cleanup();
}
} // namespace folly
This diff is collapsed.
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/CPortability.h>
#include <folly/Portability.h>
#include <glog/logging.h>
#include <atomic>
#include <memory>
///
/// Classes related to objects protected by hazard pointers.
///
namespace folly {
/**
* hazptr_obj
*
* Object protected by hazard pointers.
*/
template <template <typename> class Atom>
class hazptr_obj {
using ReclaimFnPtr = void (*)(hazptr_obj*);
ReclaimFnPtr reclaim_;
hazptr_obj<Atom>* next_;
public:
/** Constructors */
/* All constructors set next_ to this in order to catch misuse bugs
such as double retire. */
hazptr_obj() noexcept : next_(this) {}
hazptr_obj(const hazptr_obj<Atom>&) noexcept : next_(this) {}
hazptr_obj(hazptr_obj<Atom>&&) noexcept : next_(this) {}
/** Copy operator */
hazptr_obj<Atom>& operator=(const hazptr_obj<Atom>&) noexcept {
return *this;
}
/** Move operator */
hazptr_obj<Atom>& operator=(hazptr_obj<Atom>&&) noexcept {
return *this;
}
private:
friend class hazptr_domain<Atom>;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base;
template <typename, template <typename> class, typename>
friend class hazptr_obj_base_refcounted;
friend class hazptr_priv<Atom>;
hazptr_obj<Atom>* next() const noexcept {
return next_;
}
void set_next(hazptr_obj* obj) noexcept {
next_ = obj;
}
ReclaimFnPtr reclaim() noexcept {
return reclaim_;
}
const void* raw_ptr() const {
return this;
}
void pre_retire_check() noexcept {
// Only for catching misuse bugs like double retire
if (next_ != this) {
pre_retire_check_fail();
}
}
void do_retire(hazptr_domain<Atom>& domain) {
#if FOLLY_HAZPTR_THR_LOCAL
if (&domain == &default_hazptr_domain<Atom>()) {
hazptr_priv_tls<Atom>().push(this);
return;
}
#endif
hazptr_domain_push_retired(this, this, 1, domain);
}
FOLLY_NOINLINE void pre_retire_check_fail() noexcept {
CHECK_EQ(next_, this);
}
}; // hazptr_obj
/**
* hazptr_obj_base
*
* Base template for objects protected by hazard pointers.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base : public hazptr_obj<Atom> {
D deleter_; // TODO: EBO
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(
D deleter = {},
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
pre_retire(std::move(deleter));
set_reclaim();
this->do_retire(domain); // defined in hazptr_obj
}
void retire(hazptr_domain<Atom>& domain) {
retire({}, domain);
}
private:
void pre_retire(D deleter) {
this->pre_retire_check(); // defined in hazptr_obj
deleter_ = std::move(deleter);
}
void set_reclaim() {
this->reclaim_ = [](hazptr_obj<Atom>* p) {
auto hobp = static_cast<hazptr_obj_base<T, Atom, D>*>(p);
auto obj = static_cast<T*>(hobp);
hobp->deleter_(obj);
};
}
}; // hazptr_obj_base
/**
* hazptr_obj_base_refcounted
*
* Base template for reference counted objects protected by hazard
* pointers.
*/
template <typename T, template <typename> class Atom, typename D>
class hazptr_obj_base_refcounted : public hazptr_obj<Atom> {
Atom<uint32_t> refcount_{0};
D deleter_;
public:
/* Retire a removed object and pass the responsibility for
* reclaiming it to the hazptr library */
void retire(
D deleter = {},
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
this->pre_retire(std::move(deleter)); // defined in hazptr_obj
set_reclaim();
this->do_retire(domain); // defined in hazptr_obj
}
void retire(hazptr_domain<Atom>& domain) {
retire({}, domain);
}
/* Increments the reference count. */
void acquire_ref() noexcept {
refcount_.fetch_add(1u, std::memory_order_acq_rel);
}
/* The same as acquire_ref() except that in addition the caller
* guarantees that the call is made in a thread-safe context, e.g.,
* the object is not yet shared. This is just an optimization to
* save an atomic read-modify-write operation. */
void acquire_ref_safe() noexcept {
auto oldval = refcount_.load(std::memory_order_acquire);
refcount_.store(oldval + 1u, std::memory_order_release);
}
/* Decrements the reference count and returns true if the object is
* safe to reclaim. */
bool release_ref() noexcept {
auto oldval = refcount_.load(std::memory_order_acquire);
if (oldval > 0u) {
oldval = refcount_.fetch_sub(1u, std::memory_order_acq_rel);
} else {
if (kIsDebug) {
refcount_.store(~0u);
}
}
return oldval == 0;
}
private:
void pre_retire(D deleter) {
this->pre_retire_check(); // defined in hazptr_obj
deleter_ = std::move(deleter);
}
void set_reclaim() {
this->reclaim_ = [](hazptr_obj<Atom>* p) {
auto hrobp = static_cast<hazptr_obj_base_refcounted<T, Atom, D>*>(p);
if (hrobp->release_ref()) {
auto obj = static_cast<T*>(hrobp);
hrobp->deleter_(obj);
}
};
}
}; // hazptr_obj_base_refcounted
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#include <folly/concurrency/CacheLocality.h>
#include <atomic>
namespace folly {
/**
* hazptr_rec:
*
* Contains the actual hazard pointer.
*/
template <template <typename> class Atom>
class alignas(hardware_destructive_interference_size) hazptr_rec {
Atom<const void*> hazptr_{nullptr}; // the hazard pointer
hazptr_rec* next_{nullptr};
Atom<bool> active_{false};
friend class hazptr_domain<Atom>;
friend class hazptr_holder<Atom>;
friend class hazptr_tc_entry<Atom>;
const void* hazptr() const noexcept {
return hazptr_.load(std::memory_order_acquire);
}
FOLLY_ALWAYS_INLINE void reset_hazptr(const void* p = nullptr) noexcept {
hazptr_.store(p, std::memory_order_release);
}
bool active() const noexcept {
return active_.load(std::memory_order_acquire);
}
void set_active() noexcept {
active_.store(true, std::memory_order_relaxed);
}
bool try_acquire() noexcept {
bool a = active();
return !a &&
active_.compare_exchange_strong(
a, true, std::memory_order_release, std::memory_order_relaxed);
}
void release() noexcept {
active_.store(false, std::memory_order_release);
}
hazptr_rec<Atom>* next() {
return next_;
}
void set_next(hazptr_rec<Atom>* rec) {
next_ = rec;
}
}; // hazptr_rec
} // namespace folly
/*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr-fwd.h>
#if FOLLY_HAZPTR_THR_LOCAL
#include <folly/synchronization/HazptrObj.h>
#include <folly/synchronization/HazptrRec.h>
#include <folly/SingletonThreadLocal.h>
#include <glog/logging.h>
#include <atomic>
/**
* Thread local classes and singletons
*/
namespace folly {
/**
* hazptr_tc_entry
*
* Thread cache entry.
*/
template <template <typename> class Atom>
class hazptr_tc_entry {
hazptr_rec<Atom>* hprec_;
template <uint8_t, template <typename> class>
friend class hazptr_array;
template <uint8_t, template <typename> class>
friend class hazptr_local;
friend class hazptr_tc<Atom>;
FOLLY_ALWAYS_INLINE void fill(hazptr_rec<Atom>* hprec) noexcept {
hprec_ = hprec;
}
FOLLY_ALWAYS_INLINE hazptr_rec<Atom>* get() const noexcept {
return hprec_;
}
void evict() {
hprec_->release();
}
}; // hazptr_tc_entry
/**
* hazptr_tc:
*
* Thread cache of hazptr_rec-s that belong to the default domain.
*/
template <template <typename> class Atom>
class hazptr_tc {
static constexpr uint8_t kCapacity = 3;
hazptr_tc_entry<Atom> entry_[kCapacity];
uint8_t count_{0};
bool local_{false}; // for debug mode only
public:
~hazptr_tc() {
for (uint8_t i = 0; i < count(); ++i) {
entry_[i].evict();
}
}
static constexpr uint8_t capacity() noexcept {
return kCapacity;
}
private:
template <uint8_t, template <typename> class>
friend class hazptr_array;
friend class hazptr_holder<Atom>;
template <uint8_t, template <typename> class>
friend class hazptr_local;
FOLLY_ALWAYS_INLINE
hazptr_tc_entry<Atom>& operator[](uint8_t i) noexcept {
DCHECK(i <= capacity());
return entry_[i];
}
FOLLY_ALWAYS_INLINE hazptr_rec<Atom>* try_get() noexcept {
if (LIKELY(count_ > 0)) {
auto hprec = entry_[--count_].get();
return hprec;
}
return nullptr;
}
FOLLY_ALWAYS_INLINE bool try_put(hazptr_rec<Atom>* hprec) noexcept {
if (LIKELY(count_ < capacity())) {
entry_[count_++].fill(hprec);
return true;
}
return false;
}
FOLLY_ALWAYS_INLINE uint8_t count() const noexcept {
return count_;
}
FOLLY_ALWAYS_INLINE void set_count(uint8_t val) noexcept {
count_ = val;
}
FOLLY_NOINLINE void fill(uint8_t num) {
DCHECK_LE(count_ + num, capacity());
auto& domain = default_hazptr_domain<Atom>();
for (uint8_t i = 0; i < num; ++i) {
auto hprec = domain.hprec_acquire();
entry_[count_++].fill(hprec);
}
}
FOLLY_NOINLINE void evict(uint8_t num) {
DCHECK_GE(count_, num);
for (uint8_t i = 0; i < num; ++i) {
entry_[--count_].evict();
}
}
bool local() const noexcept { // for debugging only
return local_;
}
void set_local(bool b) noexcept { // for debugging only
local_ = b;
}
}; // hazptr_tc
/** hazptr_tc_tls */
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_tc<Atom>& hazptr_tc_tls() {
return folly::SingletonThreadLocal<hazptr_tc<Atom>, void>::get();
}
/**
* hazptr_priv
*
* Per-thread list of retired objects to be pushed in bulk to domain.
*/
template <template <typename> class Atom>
class hazptr_priv {
static constexpr int kThreshold = 20;
Atom<hazptr_obj<Atom>*> head_;
Atom<hazptr_obj<Atom>*> tail_;
int rcount_;
public:
hazptr_priv() : head_(nullptr), tail_(nullptr), rcount_(0) {}
~hazptr_priv() {
if (!empty()) {
push_all_to_domain();
}
}
private:
friend class hazptr_domain<Atom>;
friend class hazptr_obj<Atom>;
bool empty() const noexcept {
return head() == nullptr;
}
void push(hazptr_obj<Atom>* obj) {
while (true) {
if (tail()) {
if (push_in_non_empty_list(obj)) {
break;
}
} else {
if (push_in_empty_list(obj)) {
break;
}
}
}
if (++rcount_ >= kThreshold) {
push_all_to_domain();
}
}
void push_all_to_domain() {
hazptr_obj<Atom>* h = nullptr;
hazptr_obj<Atom>* t = nullptr;
collect(h, t);
if (h) {
DCHECK(t);
hazptr_domain_push_retired<Atom>(h, t, rcount_);
rcount_ = 0;
}
}
void collect(
hazptr_obj<Atom>*& colHead,
hazptr_obj<Atom>*& colTail) noexcept {
// This function doesn't change rcount_.
// The value rcount_ is accurate excluding the effects of calling collect().
auto h = exchange_head();
if (h) {
auto t = exchange_tail();
DCHECK(t);
if (colTail) {
colTail->set_next(h);
} else {
colHead = h;
}
colTail = t;
}
}
hazptr_obj<Atom>* head() const noexcept {
return head_.load(std::memory_order_acquire);
}
hazptr_obj<Atom>* tail() const noexcept {
return tail_.load(std::memory_order_acquire);
}
void set_head(hazptr_obj<Atom>* obj) noexcept {
head_.store(obj, std::memory_order_release);
}
bool cas_head(hazptr_obj<Atom>* expected, hazptr_obj<Atom>* obj) noexcept {
return head_.compare_exchange_weak(
expected, obj, std::memory_order_acq_rel, std::memory_order_relaxed);
}
bool cas_tail(hazptr_obj<Atom>* expected, hazptr_obj<Atom>* obj) noexcept {
return tail_.compare_exchange_weak(
expected, obj, std::memory_order_acq_rel, std::memory_order_relaxed);
}
hazptr_obj<Atom>* exchange_head() noexcept {
return head_.exchange(nullptr, std::memory_order_acq_rel);
}
hazptr_obj<Atom>* exchange_tail() noexcept {
return tail_.exchange(nullptr, std::memory_order_acq_rel);
}
bool push_in_non_empty_list(hazptr_obj<Atom>* obj) noexcept {
auto h = head();
if (h) {
obj->set_next(h);
if (cas_head(h, obj)) {
return true;
}
}
return false;
}
bool push_in_empty_list(hazptr_obj<Atom>* obj) noexcept {
hazptr_obj<Atom>* t = nullptr;
obj->set_next(nullptr);
if (cas_tail(t, obj)) {
set_head(obj);
return true;
}
return false;
}
}; // hazptr_priv
/** hazptr_priv_tls */
struct HazptrTag {};
template <template <typename> class Atom>
using hazptr_priv_singleton =
folly::SingletonThreadLocal<hazptr_priv<Atom>, HazptrTag>;
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_priv<Atom>& hazptr_priv_tls() {
return hazptr_priv_singleton<Atom>::get();
}
} // namespace folly
#endif // FOLLY_HAZPTR_THR_LOCAL
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr.h>
namespace folly {
template <typename T, template <typename> class Atom = std::atomic>
class HazptrLockFreeLIFO {
struct Node;
Atom<Node*> head_;
public:
HazptrLockFreeLIFO() : head_(nullptr) {}
~HazptrLockFreeLIFO() {
Node* next;
for (auto node = head(); node; node = next) {
next = node->next();
node->retire();
}
hazptr_cleanup<Atom>();
}
void push(T val) {
auto node = new Node(val, head());
while (!cas_head(node->next_, node)) {
/* try again */;
}
}
bool pop(T& val) {
hazptr_local<1, Atom> h;
hazptr_holder<Atom>& hptr = h[0];
Node* node;
while (true) {
node = hptr.get_protected(head_);
if (node == nullptr) {
return false;
}
auto next = node->next();
if (cas_head(node, next)) {
break;
}
}
hptr.reset();
val = node->value();
node->retire();
return true;
}
private:
Node* head() {
return head_.load(std::memory_order_acquire);
}
bool cas_head(Node*& expected, Node* newval) {
return head_.compare_exchange_weak(
expected, newval, std::memory_order_acq_rel, std::memory_order_acquire);
}
struct Node : public hazptr_obj_base<Node, Atom> {
T value_;
Node* next_;
Node(T v, Node* n) : value_(v), next_(n) {}
Node* next() {
return next_;
}
T value() {
return value_;
}
};
};
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr.h>
#include <atomic>
namespace folly {
/** Set implemented as an ordered singly-linked list.
*
* A single writer thread may add or remove elements. Multiple reader
* threads may search the set concurrently with each other and with
* the writer's operations.
*/
template <typename T, template <typename> class Atom = std::atomic>
class HazptrSWMRSet {
template <typename Node>
struct Reclaimer {
void operator()(Node* p) {
delete p;
}
};
struct Node : public hazptr_obj_base<Node, Atom, Reclaimer<Node>> {
T elem_;
Atom<Node*> next_;
Node(T e, Node* n) : elem_(e), next_(n) {}
};
Atom<Node*> head_{nullptr};
public:
HazptrSWMRSet() : head_(nullptr) {}
~HazptrSWMRSet() {
auto p = head_.load();
while (p) {
auto next = p->next_.load();
delete p;
p = next;
}
}
bool add(T v) {
auto prev = &head_;
locate_lower_bound(v, prev);
auto curr = prev->load(std::memory_order_relaxed);
if (curr && curr->elem_ == v) {
return false;
}
prev->store(new Node(std::move(v), curr));
return true;
}
bool remove(const T& v) {
auto prev = &head_;
locate_lower_bound(v, prev);
auto curr = prev->load(std::memory_order_relaxed);
if (!curr || curr->elem_ != v) {
return false;
}
Node* curr_next = curr->next_.load();
// Patch up the actual list...
prev->store(curr_next, std::memory_order_release);
// ...and only then null out the removed node.
curr->next_.store(nullptr, std::memory_order_release);
curr->retire();
return true;
}
/* Used by readers */
bool contains(const T& val) const {
/* Two hazard pointers for hand-over-hand traversal. */
hazptr_local<2, Atom> hptr;
hazptr_holder<Atom>* hptr_prev = &hptr[0];
hazptr_holder<Atom>* hptr_curr = &hptr[1];
while (true) {
auto prev = &head_;
auto curr = prev->load(std::memory_order_acquire);
while (true) {
if (!curr) {
return false;
}
if (!hptr_curr->try_protect(curr, *prev)) {
break;
}
auto next = curr->next_.load(std::memory_order_acquire);
if (prev->load(std::memory_order_acquire) != curr) {
break;
}
if (curr->elem_ == val) {
return true;
} else if (!(curr->elem_ < val)) {
return false; // because the list is sorted
}
prev = &(curr->next_);
curr = next;
std::swap(hptr_curr, hptr_prev);
}
}
}
private:
/* Used by the single writer */
void locate_lower_bound(const T& v, Atom<Node*>*& prev) const {
auto curr = prev->load(std::memory_order_relaxed);
while (curr) {
if (curr->elem_ >= v) {
break;
}
prev = &(curr->next_);
curr = curr->next_.load(std::memory_order_relaxed);
}
return;
}
};
} // namespace folly
/*
* Copyright 2016-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/synchronization/Hazptr.h>
#include <string>
namespace folly {
/** Wide CAS.
*/
template <typename T, template <typename> class Atom = std::atomic>
class HazptrWideCAS {
struct Node : public hazptr_obj_base<Node, Atom> {
T val_;
explicit Node(T v = {}) : val_(v) {}
};
Atom<Node*> node_;
public:
HazptrWideCAS() : node_(new Node()) {}
~HazptrWideCAS() {
delete node_.load(std::memory_order_relaxed);
}
bool cas(T& u, T& v) {
Node* n = new Node(v);
hazptr_holder<Atom> hptr;
Node* p;
while (true) {
p = hptr.get_protected(node_);
if (p->val_ != u) {
delete n;
return false;
}
if (node_.compare_exchange_weak(
p, n, std::memory_order_relaxed, std::memory_order_release)) {
break;
}
}
hptr.reset();
p->retire();
return true;
}
};
} // namespace folly
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment