Commit 8ecee58a authored by Maged Michael's avatar Maged Michael Committed by Facebook GitHub Bot

hazard pointers: Extend cleanup to cover cohort objects

Summary:
Extend cleanup to cover retired cohort objects.

The change includes incrementing num_bulk_reclaims_ before invoking do_reclamation either directly or in an executor. The corresponding decrement is done at the completion of do_reclamation, which may happen on a different thread if reclamation is done in an executor.

Reviewed By: davidtgoldblatt

Differential Revision: D30513628

fbshipit-source-id: 80227e85301274c66cc200ee5228654f1ca07d08
parent 58f83287
...@@ -180,6 +180,8 @@ class hazptr_domain { ...@@ -180,6 +180,8 @@ class hazptr_domain {
/** cleanup */ /** cleanup */
void cleanup() noexcept { void cleanup() noexcept {
relaxed_cleanup(); relaxed_cleanup();
inc_num_bulk_reclaims();
do_reclamation(0);
wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
} }
...@@ -251,6 +253,19 @@ class hazptr_domain { ...@@ -251,6 +253,19 @@ class hazptr_domain {
expected, newval, std::memory_order_acq_rel, std::memory_order_relaxed); expected, newval, std::memory_order_acq_rel, std::memory_order_relaxed);
} }
uint16_t load_num_bulk_reclaims() {
return num_bulk_reclaims_.load(std::memory_order_acquire);
}
void inc_num_bulk_reclaims() {
num_bulk_reclaims_.fetch_add(1, std::memory_order_release);
}
void dec_num_bulk_reclaims() {
DCHECK_GT(load_num_bulk_reclaims(), 0);
num_bulk_reclaims_.fetch_sub(1, std::memory_order_release);
}
/** hprec_acquire */ /** hprec_acquire */
hazptr_rec<Atom>* hprec_acquire() { hazptr_rec<Atom>* hprec_acquire() {
auto rec = try_acquire_existing_hprec(); auto rec = try_acquire_existing_hprec();
...@@ -312,6 +327,7 @@ class hazptr_domain { ...@@ -312,6 +327,7 @@ class hazptr_domain {
if (rcount == 0) if (rcount == 0)
return; return;
} }
inc_num_bulk_reclaims();
if (std::is_same<Atom<int>, std::atomic<int>>{} && if (std::is_same<Atom<int>, std::atomic<int>>{} &&
this == &default_hazptr_domain<Atom>() && hazptr_use_executor()) { this == &default_hazptr_domain<Atom>() && hazptr_use_executor()) {
invoke_reclamation_in_executor(rcount); invoke_reclamation_in_executor(rcount);
...@@ -453,8 +469,9 @@ class hazptr_domain { ...@@ -453,8 +469,9 @@ class hazptr_domain {
} }
rcount = check_count_threshold(); rcount = check_count_threshold();
if (rcount == 0) if (rcount == 0)
return; break;
} }
dec_num_bulk_reclaims();
} }
/** lookup_and_reclaim */ /** lookup_and_reclaim */
...@@ -584,7 +601,7 @@ class hazptr_domain { ...@@ -584,7 +601,7 @@ class hazptr_domain {
} }
void wait_for_zero_bulk_reclaims() { void wait_for_zero_bulk_reclaims() {
while (num_bulk_reclaims_.load(std::memory_order_acquire) > 0) { while (load_num_bulk_reclaims() > 0) {
std::this_thread::yield(); std::this_thread::yield();
} }
} }
...@@ -605,7 +622,7 @@ class hazptr_domain { ...@@ -605,7 +622,7 @@ class hazptr_domain {
} }
void bulk_reclaim(bool transitive = false) { void bulk_reclaim(bool transitive = false) {
num_bulk_reclaims_.fetch_add(1, std::memory_order_acquire); inc_num_bulk_reclaims();
while (true) { while (true) {
auto obj = retired_.exchange(nullptr, std::memory_order_acquire); auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
/*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED); /*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
...@@ -620,7 +637,7 @@ class hazptr_domain { ...@@ -620,7 +637,7 @@ class hazptr_domain {
break; break;
} }
} }
num_bulk_reclaims_.fetch_sub(1, std::memory_order_release); dec_num_bulk_reclaims();
} }
bool bulk_lookup_and_reclaim( bool bulk_lookup_and_reclaim(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment