Commit b73bf8c1 authored by Maged Michael's avatar Maged Michael Committed by Facebook Github Bot

hazptr: Avoid unnecessary access to domain tagged list

Summary:
This diff avoids locking and traversing the domain's tagged list on shutdown of batches that never pushed objects to the list.

This change required batches to keep track of pushing objects to the domain's tagged list.
This in turn required reclamation of tagged objects to be invoked by the batch rather than using the free function hazptr_cleanup_batch_tag.
In order to avoid batches keeping track of the associated domain (and adding an extra field), batches are used only with the default domain.

Reviewed By: davidtgoldblatt

Differential Revision: D17416380

fbshipit-source-id: e5a3cd1492c1f1daf499b44fa1cfb4bb822062f7
parent a8f674ce
......@@ -594,7 +594,6 @@ class ConcurrentHashMap {
if (batch_.compare_exchange_strong(b, newbatch)) {
b = newbatch;
} else {
newbatch->shutdown_and_reclaim();
newbatch->~hazptr_obj_batch<Atom>();
Allocator().deallocate(storage, sizeof(hazptr_obj_batch<Atom>));
}
......@@ -605,8 +604,6 @@ class ConcurrentHashMap {
void batch_shutdown_cleanup() {
auto b = batch();
if (b) {
b->shutdown_and_reclaim();
hazptr_cleanup_batch_tag(b);
b->~hazptr_obj_batch<Atom>();
Allocator().deallocate((uint8_t*)b, sizeof(hazptr_obj_batch<Atom>));
}
......
......@@ -259,7 +259,6 @@ class UnboundedQueue {
/** destructor */
~UnboundedQueue() {
cleanUpRemainingItems();
c_.batch.shutdown_and_reclaim();
reclaimRemainingSegments();
}
......
......@@ -156,12 +156,6 @@ template <template <typename> class Atom = std::atomic>
void hazptr_cleanup(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** hazptr_cleanup_batch_tag */
template <template <typename> class Atom = std::atomic>
void hazptr_cleanup_batch_tag(
const hazptr_obj_batch<Atom>* batch,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) noexcept;
/** Global default domain defined in Hazptr.cpp */
extern hazptr_domain<std::atomic> default_domain;
......
......@@ -639,12 +639,4 @@ void hazptr_cleanup(hazptr_domain<Atom>& domain) noexcept {
domain.cleanup();
}
/** hazptr_cleanup_tag: Reclaims objects asssociated with a tag */
template <template <typename> class Atom>
void hazptr_cleanup_batch_tag(
const hazptr_obj_batch<Atom>* batch,
hazptr_domain<Atom>& domain) noexcept {
domain.cleanup_batch_tag(batch);
}
} // namespace folly
......@@ -139,6 +139,11 @@ class hazptr_obj {
return reinterpret_cast<hazptr_obj_batch<Atom>*>(btag);
}
/** tagged */
bool tagged() {
return (batch_tag_ & kTagBit) == kTagBit;
}
/** set_batch_tag: Set batch and make object tagged. */
void set_batch_tag(hazptr_obj_batch<Atom>* batch) {
batch_tag_ = reinterpret_cast<uintptr_t>(batch) + kTagBit;
......@@ -184,7 +189,8 @@ class hazptr_obj {
void push_obj(hazptr_domain<Atom>& domain) {
auto b = batch();
if (b) {
b->push_obj(this, domain);
DCHECK_EQ(&domain, &default_hazptr_domain<Atom>());
b->push_obj(this);
} else {
push_to_retired(domain);
}
......@@ -278,6 +284,8 @@ class hazptr_obj_list {
*
* See description of hazptr_obj for notes on batches, tags, and
* tageed and untagged objects.
*
* [Note: For now supports only the default domain.]
*/
template <template <typename> class Atom>
class hazptr_obj_batch {
......@@ -290,10 +298,12 @@ class hazptr_obj_batch {
SharedList l_;
Atom<int> count_;
bool active_;
Atom<bool> pushed_to_domain_tagged_;
public:
/** Constructor */
hazptr_obj_batch() noexcept : l_(), count_(0), active_(true) {}
hazptr_obj_batch() noexcept
: l_(), count_(0), active_(true), pushed_to_domain_tagged_{false} {}
/** Not copyable or moveable */
hazptr_obj_batch(const hazptr_obj_batch& o) = delete;
......@@ -303,11 +313,14 @@ class hazptr_obj_batch {
/** Destructor */
~hazptr_obj_batch() {
if (active_) {
shutdown_and_reclaim();
}
DCHECK(!active_);
DCHECK(l_.empty());
}
/** shutdown */
/** shutdown_and_reclaim */
void shutdown_and_reclaim() {
DCHECK(active_);
active_ = false;
......@@ -317,6 +330,10 @@ class hazptr_obj_batch {
Obj* obj = l.head();
reclaim_list(obj);
}
if (pushed_to_domain_tagged_.load(std::memory_order_relaxed)) {
default_hazptr_domain<Atom>().cleanup_batch_tag(this);
}
DCHECK(l_.empty());
}
private:
......@@ -340,13 +357,11 @@ class hazptr_obj_batch {
}
/** push_obj */
void push_obj(
Obj* obj,
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
void push_obj(Obj* obj) {
if (active_) {
l_.push(obj);
inc_count();
check_threshold_push(domain);
check_threshold_push();
} else {
obj->set_next(nullptr);
reclaim_list(obj);
......@@ -367,11 +382,14 @@ class hazptr_obj_batch {
}
/** check_threshold_push */
void check_threshold_push(hazptr_domain<Atom>& domain) {
void check_threshold_push() {
auto c = count();
while (c >= kThreshold) {
if (cas_count(c, 0)) {
List ll = l_.pop_all();
if (ll.head()->tagged()) {
pushed_to_domain_tagged_.store(true, std::memory_order_relaxed);
}
if (kIsDebug) {
Obj* p = ll.head();
for (int i = 1; p; ++i, p = p->next()) {
......@@ -379,7 +397,7 @@ class hazptr_obj_batch {
}
}
hazptr_obj_list<Atom> l(ll.head(), ll.tail(), c);
hazptr_domain_push_list<Atom>(l, domain);
hazptr_domain_push_list<Atom>(l);
return;
}
}
......
......@@ -35,7 +35,6 @@ DEFINE_int64(num_ops, 1003, "Number of ops or pairs of ops per rep");
using folly::default_hazptr_domain;
using folly::hazptr_array;
using folly::hazptr_cleanup;
using folly::hazptr_cleanup_batch_tag;
using folly::hazptr_domain;
using folly::hazptr_holder;
using folly::hazptr_local;
......@@ -826,7 +825,6 @@ void batch_test() {
}
});
DSched::join(thr);
batch.shutdown_and_reclaim();
}
ASSERT_EQ(c_.ctors(), num);
// ASSERT_GT(c_.dtors(), 0);
......@@ -842,8 +840,6 @@ void batch_test() {
}
});
DSched::join(thr);
batch.shutdown_and_reclaim();
hazptr_cleanup_batch_tag<Atom>(&batch);
}
ASSERT_EQ(c_.ctors(), num);
ASSERT_GT(c_.dtors(), 0);
......@@ -861,8 +857,6 @@ void recursive_destruction_test() {
}
~Foo() {
set(nullptr);
batch_.shutdown_and_reclaim();
hazptr_cleanup_batch_tag<Atom>(&batch_);
c_.inc_dtors();
}
void set(Foo* foo) {
......@@ -893,7 +887,6 @@ void recursive_destruction_test() {
}
}
foo0->retire();
b0.shutdown_and_reclaim();
});
}
for (auto& t : threads) {
......@@ -1428,8 +1421,6 @@ uint64_t batch_bench(std::string name, int nthreads) {
auto fn = [&](int tid) {
for (int j = tid; j < ops; j += nthreads) {
hazptr_obj_batch b;
b.shutdown_and_reclaim();
hazptr_cleanup_batch_tag<std::atomic>(&b);
}
};
auto endFn = [] {};
......@@ -1506,7 +1497,7 @@ allocate/retire/reclaim object 70 ns 68 ns 67 ns
20-item list protect all - own hazptr 28 ns 28 ns 28 ns
20-item list protect all 31 ns 29 ns 29 ns
1/1000 hazptr_cleanup 2 ns 1 ns 1 ns
Life cycle of unused tagged obj batch 1577 ns 1547 ns 1511 ns
Life cycle of unused tagged obj batch 1 ns 1 ns 1 ns
================================ 10 threads ================================
10x construct/destruct hazptr_holder 11 ns 8 ns 8 ns
10x construct/destruct hazptr_array<1> 8 ns 7 ns 7 ns
......@@ -1525,5 +1516,5 @@ allocate/retire/reclaim object 20 ns 17 ns 16 ns
20-item list protect all - own hazptr 4 ns 4 ns 4 ns
20-item list protect all 5 ns 4 ns 4 ns
1/1000 hazptr_cleanup 119 ns 113 ns 97 ns
Life cycle of unused tagged obj batch 1985 ns 1918 ns 1853 ns
Life cycle of unused tagged obj batch 0 ns 0 ns 0 ns
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment