Commit 47ee6e69 authored by Maged Michael's avatar Maged Michael Committed by Facebook GitHub Bot

hazptr: Use WG21 P1121 function names protect and reset_protection

Summary:
Change the hazptr_hoklder function names get_protected and reset to (the names in [WG21 P1121](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1121r3.pdf)) protect and reset_protection, respectively.

The primary change is in folly/synchronization/HazptrHolder.h

Reviewed By: yfeldblum

Differential Revision: D28662903

fbshipit-source-id: d074c718514716229daaf8dae14b5c275942b38a
parent 5e9db683
......@@ -130,7 +130,7 @@ class AtomicCoreCachedSharedPtr {
std::shared_ptr<T> get() const {
folly::hazptr_local<1> hazptr;
auto slots = hazptr[0].get_protected(slots_);
auto slots = hazptr[0].protect(slots_);
if (!slots) {
return nullptr;
}
......
......@@ -362,7 +362,7 @@ class UnboundedQueue {
// Using hazptr_holder instead of hazptr_local because it is
// possible that the T ctor happens to use hazard pointers.
hazptr_holder<Atom> hptr;
Segment* s = hptr.get_protected(p_.tail);
Segment* s = hptr.protect(p_.tail);
enqueueCommon(s, std::forward<Arg>(arg));
}
}
......@@ -397,7 +397,7 @@ class UnboundedQueue {
// possible to call the T dtor and it may happen to use hazard
// pointers.
hazptr_holder<Atom> hptr;
Segment* s = hptr.get_protected(c_.head);
Segment* s = hptr.protect(c_.head);
return dequeueCommon(s);
}
}
......@@ -428,7 +428,7 @@ class UnboundedQueue {
// Using hazptr_holder instead of hazptr_local because it is
// possible to call ~T() and it may happen to use hazard pointers.
hazptr_holder<Atom> hptr;
Segment* s = hptr.get_protected(c_.head);
Segment* s = hptr.protect(c_.head);
return tryDequeueUntilMC(s, deadline);
}
}
......
......@@ -353,13 +353,13 @@ class alignas(64) BucketTable {
auto idx = getIdx(bcount, h);
auto prev = &buckets->buckets_[idx]();
auto node = hazcurr.get_protected(*prev);
auto node = hazcurr.protect(*prev);
while (node) {
if (KeyEqual()(k, node->getItem().first)) {
res.setNode(node, buckets, bcount, idx);
return true;
}
node = haznext.get_protected(node->next_);
node = haznext.protect(node->next_);
hazcurr.swap(haznext);
}
return false;
......@@ -396,7 +396,7 @@ class alignas(64) BucketTable {
}
if (iter) {
iter->hazptrs_[0].reset(buckets);
iter->hazptrs_[0].reset_protection(buckets);
iter->setNode(
node->next_.load(std::memory_order_acquire),
buckets,
......@@ -534,7 +534,7 @@ class alignas(64) BucketTable {
const Iterator& operator++() {
DCHECK(node_);
node_ = hazptrs_[2].get_protected(node_->next_);
node_ = hazptrs_[2].protect(node_->next_);
hazptrs_[1].swap(hazptrs_[2]);
if (!node_) {
++idx_;
......@@ -549,7 +549,7 @@ class alignas(64) BucketTable {
break;
}
DCHECK(buckets_);
node_ = hazptrs_[1].get_protected(buckets_->buckets_[idx_]());
node_ = hazptrs_[1].protect(buckets_->buckets_[idx_]());
if (node_) {
break;
}
......@@ -604,7 +604,7 @@ class alignas(64) BucketTable {
while (true) {
auto seqlock = seqlock_.load(std::memory_order_acquire);
bcount = bucket_count_.load(std::memory_order_acquire);
buckets = hazptr.get_protected(buckets_);
buckets = hazptr.protect(buckets_);
auto seqlock2 = seqlock_.load(std::memory_order_acquire);
if (!(seqlock & 1) && (seqlock == seqlock2)) {
break;
......@@ -646,12 +646,12 @@ class alignas(64) BucketTable {
auto prev = head;
auto& hazbuckets = it.hazptrs_[0];
auto& haznode = it.hazptrs_[1];
hazbuckets.reset(buckets);
hazbuckets.reset_protection(buckets);
while (node) {
// Is the key found?
if (KeyEqual()(k, node->getItem().first)) {
it.setNode(node, buckets, bcount, idx);
haznode.reset(node);
haznode.reset_protection(node);
if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) {
return false;
......@@ -682,8 +682,8 @@ class alignas(64) BucketTable {
node = node->next_.load(std::memory_order_relaxed);
}
if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) {
haznode.reset();
hazbuckets.reset();
haznode.reset_protection();
hazbuckets.reset_protection();
return false;
}
// Node not found, check for rehash on ANY
......@@ -698,7 +698,7 @@ class alignas(64) BucketTable {
buckets = buckets_.load(std::memory_order_relaxed);
DCHECK(buckets); // Use-after-destruction by user.
bcount <<= 1;
hazbuckets.reset(buckets);
hazbuckets.reset_protection(buckets);
idx = getIdx(bcount, h);
head = &buckets->buckets_[idx]();
headnode = head->load(std::memory_order_relaxed);
......@@ -716,7 +716,7 @@ class alignas(64) BucketTable {
cur->next_.store(headnode, std::memory_order_relaxed);
head->store(cur, std::memory_order_release);
it.setNode(cur, buckets, bcount, idx);
haznode.reset(cur);
haznode.reset_protection(cur);
return true;
}
......@@ -1096,7 +1096,7 @@ class alignas(64) SIMDTable {
}
DCHECK(chunks_);
// Note that iteration could also be implemented with tag filtering
node_ = hazptrs_[1].get_protected(
node_ = hazptrs_[1].protect(
chunks_->getChunk(chunk_idx_, chunk_count_)->item(tag_idx_));
if (node_) {
break;
......@@ -1284,13 +1284,13 @@ class alignas(64) SIMDTable {
auto hits = chunk->tagMatchIter(hp.second);
while (hits.hasNext()) {
size_t tag_idx = hits.next();
Node* node = hazz.get_protected(chunk->item(tag_idx));
Node* node = hazz.protect(chunk->item(tag_idx));
if (LIKELY(node && KeyEqual()(k, node->getItem().first))) {
chunk_idx = chunk_idx & (ccount - 1);
res.setNode(node, chunks, ccount, chunk_idx, tag_idx);
return true;
}
hazz.reset();
hazz.reset_protection();
}
if (LIKELY(chunk->outboundOverflowCount() == 0)) {
......@@ -1348,7 +1348,7 @@ class alignas(64) SIMDTable {
decSize();
if (iter) {
iter->hazptrs_[0].reset(chunks);
iter->hazptrs_[0].reset_protection(chunks);
iter->setNode(nullptr, chunks, ccount, chunk_idx, tag_idx + 1);
iter->next();
}
......@@ -1467,9 +1467,9 @@ class alignas(64) SIMDTable {
DCHECK(chunks); // Use-after-destruction by user.
node = find_internal(k, hp, chunks, ccount, chunk_idx, tag_idx);
it.hazptrs_[0].reset(chunks);
it.hazptrs_[0].reset_protection(chunks);
if (node) {
it.hazptrs_[1].reset(node);
it.hazptrs_[1].reset_protection(node);
it.setNode(node, chunks, ccount, chunk_idx, tag_idx);
if (type == InsertType::MATCH) {
if (!match(node->getItem().second)) {
......@@ -1480,7 +1480,7 @@ class alignas(64) SIMDTable {
}
} else {
if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) {
it.hazptrs_[0].reset();
it.hazptrs_[0].reset_protection();
return false;
}
// Already checked for rehash on DOES_NOT_EXIST, now check on ANY
......@@ -1493,7 +1493,7 @@ class alignas(64) SIMDTable {
ccount = chunk_count_.load(std::memory_order_relaxed);
chunks = chunks_.load(std::memory_order_relaxed);
DCHECK(chunks); // Use-after-destruction by user.
it.hazptrs_[0].reset(chunks);
it.hazptrs_[0].reset_protection(chunks);
}
}
return true;
......@@ -1542,7 +1542,7 @@ class alignas(64) SIMDTable {
while (true) {
auto seqlock = seqlock_.load(std::memory_order_acquire);
ccount = chunk_count_.load(std::memory_order_acquire);
chunks = hazptr.get_protected(chunks_);
chunks = hazptr.protect(chunks_);
auto seqlock2 = seqlock_.load(std::memory_order_acquire);
if (!(seqlock & 1) && (seqlock == seqlock2)) {
break;
......
......@@ -468,7 +468,7 @@ class RelaxedConcurrentPriorityQueue {
FOLLY_ALWAYS_INLINE T
optimisticReadValue(const Position& pos, folly::hazptr_holder<Atom>& hptr) {
Node* tmp = hptr.get_protected(levels_[pos.level][pos.index].head);
Node* tmp = hptr.protect(levels_[pos.level][pos.index].head);
return (tmp == nullptr) ? MIN_VALUE : tmp->val;
}
......
......@@ -405,7 +405,7 @@ class HazptrObserver {
struct HazptrSnapshot {
template <typename State>
explicit HazptrSnapshot(const std::atomic<State*>& state)
: holder_(), ptr_(get(holder_).get_protected(state)->snapshot_.get()) {}
: holder_(), ptr_(get(holder_).protect(state)->snapshot_.get()) {}
const T& operator*() const { return *get(); }
const T* operator->() const { return get(); }
......
......@@ -375,7 +375,7 @@ RequestContext::State::insertNewData(
FOLLY_ALWAYS_INLINE
bool RequestContext::State::hasContextData(const RequestToken& token) const {
hazptr_local<1> h;
Combined* combined = h[0].get_protected(combined_);
Combined* combined = h[0].protect(combined_);
return combined ? combined->requestData_.contains(token) : false;
}
......@@ -383,7 +383,7 @@ FOLLY_ALWAYS_INLINE
RequestData* FOLLY_NULLABLE
RequestContext::State::getContextData(const RequestToken& token) {
hazptr_local<1> h;
Combined* combined = h[0].get_protected(combined_);
Combined* combined = h[0].protect(combined_);
if (!combined) {
return nullptr;
}
......@@ -396,7 +396,7 @@ FOLLY_ALWAYS_INLINE
const RequestData* FOLLY_NULLABLE
RequestContext::State::getContextData(const RequestToken& token) const {
hazptr_local<1> h;
Combined* combined = h[0].get_protected(combined_);
Combined* combined = h[0].protect(combined_);
if (!combined) {
return nullptr;
}
......@@ -409,7 +409,7 @@ FOLLY_ALWAYS_INLINE
void RequestContext::State::onSet() {
// Don't use hazptr_local because callback may use hazptr
hazptr_holder<> h;
Combined* combined = h.get_protected(combined_);
Combined* combined = h.protect(combined_);
if (!combined) {
return;
}
......@@ -423,7 +423,7 @@ FOLLY_ALWAYS_INLINE
void RequestContext::State::onUnset() {
// Don't use hazptr_local because callback may use hazptr
hazptr_holder<> h;
Combined* combined = h.get_protected(combined_);
Combined* combined = h.protect(combined_);
if (!combined) {
return;
}
......@@ -576,8 +576,8 @@ void RequestContext::clearContextData(const RequestToken& val) {
bool checkNew = newCtx && newCtx->state_.combined();
if (checkCur && checkNew) {
hazptr_array<2> h;
auto curc = h[0].get_protected(curCtx->state_.combined_);
auto newc = h[1].get_protected(newCtx->state_.combined_);
auto curc = h[0].protect(curCtx->state_.combined_);
auto newc = h[1].protect(newCtx->state_.combined_);
auto& curcb = curc->callbackData_;
auto& newcb = newc->callbackData_;
for (auto it = curcb.begin(); it != curcb.end(); ++it) {
......
......@@ -59,9 +59,9 @@
/// protected by hazard pointers.
/// - The essential components of the hazptr API are:
/// o hazptr_holder: Class that owns and manages a hazard pointer.
/// o get_protected: Mmember function of hazptr_holder. Protects
/// o protect: Mmember function of hazptr_holder. Protects
/// an object pointed to by an atomic source (if not null).
/// T* get_protected(const atomic<T*>& src);
/// T* protect(const atomic<T*>& src);
/// o hazptr_obj_base<T>: Base class for protected objects.
/// o retire: Member function of hazptr_obj_base that automatically
/// reclaims the object when safe.
......@@ -85,7 +85,7 @@
/// // Called frequently
/// U get_config(V v) {
/// hazptr_holder h; /* h owns a hazard pointer */
/// Config* ptr = h.get_protected(config_);
/// Config* ptr = h.protect(config_);
/// /* safe to access *ptr as long as it is protected by h */
/// return ptr->get_config(v);
/// /* h dtor resets and releases the owned hazard pointer,
......@@ -112,10 +112,10 @@
/// descendants are guaranteed not to use hazard pointers, then it
/// can be faster (by ~3 ns.) to use
/// hazptr_local<1> h;
/// Config* ptr = h[0].get_protected(config_);
/// Config* ptr = h[0].protect(config_);
/// than
/// hazptr_holder h;
/// Config* ptr = h.get_protected(config_);
/// Config* ptr = h.protect(config_);
///
/// Memory Usage
/// ------------
......@@ -203,9 +203,6 @@
/// such support becomes widely available.
/// o The construction of empty and non-empty hazptr_holder-s are
/// reversed. This library will conform eventually.
/// o hazptr_holder member functions get_protected and reset are
/// called protect and reset_protected, respectively, in the
/// latest proposal. Will conform eventually.
/// o hazptr_array and hazptr_local are not part of the standard
/// proposal.
/// o Link counting support and protection of linked structures is
......
......@@ -39,9 +39,9 @@ namespace folly {
* T* ptr;
* {
* hazptr_holder h;
* ptr = h.get_protected(src);
* ptr = h.protect(src);
* // ... *ptr is protected ...
* h.reset();
* h.reset_protection();
* // ... *ptr is not protected ...
* ptr = src.load();
* while (!h.try_protect(ptr, src)) {}
......@@ -124,24 +124,24 @@ class hazptr_holder {
/* Filtering the protected pointer through function Func is useful
for stealing bits of the pointer word */
auto p = ptr;
reset(f(p));
reset_protection(f(p));
/*** Full fence ***/ folly::asymmetricLightBarrier();
ptr = src.load(std::memory_order_acquire);
if (UNLIKELY(p != ptr)) {
reset();
reset_protection();
return false;
}
return true;
}
/** get_protected */
/** protect */
template <typename T>
FOLLY_ALWAYS_INLINE T* get_protected(const Atom<T*>& src) noexcept {
return get_protected(src, [](T* t) { return t; });
FOLLY_ALWAYS_INLINE T* protect(const Atom<T*>& src) noexcept {
return protect(src, [](T* t) { return t; });
}
template <typename T, typename Func>
FOLLY_ALWAYS_INLINE T* get_protected(const Atom<T*>& src, Func f) noexcept {
FOLLY_ALWAYS_INLINE T* protect(const Atom<T*>& src, Func f) noexcept {
T* ptr = src.load(std::memory_order_relaxed);
while (!try_protect(ptr, src, f)) {
/* Keep trying */;
......@@ -151,13 +151,13 @@ class hazptr_holder {
/** reset */
template <typename T>
FOLLY_ALWAYS_INLINE void reset(const T* ptr) noexcept {
FOLLY_ALWAYS_INLINE void reset_protection(const T* ptr) noexcept {
auto p = static_cast<hazptr_obj<Atom>*>(const_cast<T*>(ptr));
DCHECK(hprec_); // UB if *this is empty
hprec_->reset_hazptr(p);
}
FOLLY_ALWAYS_INLINE void reset(std::nullptr_t = nullptr) noexcept {
FOLLY_ALWAYS_INLINE void reset_protection(std::nullptr_t = nullptr) noexcept {
DCHECK(hprec_); // UB if *this is empty
hprec_->reset_hazptr();
}
......@@ -285,7 +285,7 @@ class hazptr_array {
count = cap - M;
}
for (uint8_t i = 0; i < M; ++i) {
h[i].reset();
h[i].reset_protection();
tc[count + i].fill(h[i].hprec());
new (&h[i]) hazptr_holder<Atom>(nullptr);
}
......@@ -380,7 +380,7 @@ class hazptr_local {
tc.set_local(false);
}
for (uint8_t i = 0; i < M; ++i) {
h[i].reset();
h[i].reset_protection();
}
#else
for (uint8_t i = 0; i < M; ++i) {
......
......@@ -50,7 +50,7 @@ class HazptrLockFreeLIFO {
hazptr_holder<Atom>& hptr = h[0];
Node* node;
while (true) {
node = hptr.get_protected(head_);
node = hptr.protect(head_);
if (node == nullptr) {
return false;
}
......@@ -59,7 +59,7 @@ class HazptrLockFreeLIFO {
break;
}
}
hptr.reset();
hptr.reset_protection();
val = node->value();
node->retire();
return true;
......
......@@ -43,7 +43,7 @@ class HazptrWideCAS {
hazptr_holder<Atom> hptr;
Node* p;
while (true) {
p = hptr.get_protected(node_);
p = hptr.protect(node_);
if (p->val_ != u) {
delete n;
return false;
......@@ -53,7 +53,7 @@ class HazptrWideCAS {
break;
}
}
hptr.reset();
hptr.reset_protection();
p->retire();
return true;
}
......
......@@ -202,7 +202,7 @@ struct List {
}
bool protect_all(int val, hazptr_holder<Atom>& hptr) {
auto curr = hptr.get_protected(head_);
auto curr = hptr.protect(head_);
while (curr) {
auto next = curr->next();
if (curr->value() == val) {
......@@ -332,12 +332,12 @@ void basic_protection_test() {
c_.clear();
auto obj = new Node<Atom>;
hazptr_holder<Atom> h;
h.reset(obj);
h.reset_protection(obj);
obj->retire();
ASSERT_EQ(c_.ctors(), 1);
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 0);
h.reset();
h.reset_protection();
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 1);
}
......@@ -353,7 +353,7 @@ void virtual_test() {
bar->a = i;
hazptr_holder<Atom> hptr;
hptr.reset(bar);
hptr.reset_protection(bar);
bar->retire();
ASSERT_EQ(bar->a, i);
}
......@@ -388,7 +388,7 @@ void move_test() {
auto x = new Node<Atom>(i);
hazptr_holder<Atom> hptr0;
// Protect object
hptr0.reset(x);
hptr0.reset_protection(x);
// Retire object
x->retire();
// Move constructor - still protected
......@@ -404,7 +404,7 @@ void move_test() {
// Access object
ASSERT_EQ(x->value(), i);
// Unprotect object - hptr2 is nonempty
hptr2.reset();
hptr2.reset_protection();
}
hazptr_cleanup<Atom>();
}
......@@ -415,7 +415,7 @@ void array_test() {
auto x = new Node<Atom>(i);
hazptr_array<3, Atom> hptr;
// Protect object
hptr[2].reset(x);
hptr[2].reset_protection(x);
// Empty array
hazptr_array<3, Atom> h(nullptr);
// Move assignment
......@@ -424,7 +424,7 @@ void array_test() {
x->retire();
ASSERT_EQ(x->value(), i);
// Unprotect object - hptr2 is nonempty
h[2].reset();
h[2].reset_protection();
}
hazptr_cleanup<Atom>();
}
......@@ -464,11 +464,11 @@ void local_test() {
auto x = new Node<Atom>(i);
hazptr_local<3, Atom> hptr;
// Protect object
hptr[2].reset(x);
hptr[2].reset_protection(x);
// Retire object
x->retire();
// Unprotect object - hptr2 is nonempty
hptr[2].reset();
hptr[2].reset_protection();
}
hazptr_cleanup<Atom>();
}
......@@ -483,7 +483,7 @@ void linked_test() {
}
p = new NodeRC<Mutable, Atom>(num - 1, p, Mutable);
hazptr_holder<Atom> hptr;
hptr.reset(p);
hptr.reset_protection(p);
if (!Mutable) {
for (auto q = p->next(); q; q = q->next()) {
q->retire();
......@@ -508,7 +508,7 @@ void linked_test() {
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 0);
hptr.reset();
hptr.reset_protection();
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), num);
}
......@@ -532,7 +532,7 @@ void mt_linked_test() {
/* spin */
}
hazptr_holder<Atom> hptr;
auto p = hptr.get_protected(head());
auto p = hptr.protect(head());
++setHazptrs;
/* Concurrent with removal */
int v = num;
......@@ -601,7 +601,7 @@ void auto_retire_test() {
root-->a a-->b a-->c b-->d c-->d
a(1,0) b(1,0) c(1,0) d(2,0)
*/
h.reset(c); /* h protects c */
h.reset_protection(c); /* h protects c */
hazptr_cleanup<Atom>();
ASSERT_EQ(c_.dtors(), 0);
/* Nothing is retired or reclaimed yet */
......@@ -653,7 +653,7 @@ void auto_retire_test() {
bulk_reclamed-ed (i.e, found not protected): d
*/
ASSERT_EQ(c_.dtors(), 2);
h.reset(); /* c is now no longer protected */
h.reset_protection(); /* c is now no longer protected */
hazptr_cleanup<Atom>();
/* hazptr_cleanup calls bulk_reclaim which finds c unprotected,
which triggers a call to c->release_ref.
......@@ -734,8 +734,8 @@ void cleanup_test() {
hazptr_array<2, Atom> h;
auto p0 = new Node<Atom>;
auto p1 = new Node<Atom>;
h[0].reset(p0);
h[1].reset(p1);
h[0].reset_protection(p0);
h[1].reset_protection(p1);
p0->retire();
p1->retire();
}
......@@ -750,8 +750,8 @@ void cleanup_test() {
hazptr_local<2, Atom> h;
auto p0 = new Node<Atom>;
auto p1 = new Node<Atom>;
h[0].reset(p0);
h[1].reset(p1);
h[0].reset_protection(p0);
h[1].reset_protection(p1);
p0->retire();
p1->retire();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment