Commit 852d07b4 authored by Maged Michael's avatar Maged Michael Committed by Facebook GitHub Bot

hazptr: Change hazard pointer construction to be consistent with WG21 P1121

Summary:
Change the constructors for hazptr_holder and add the function make_hazard_pointer to be consistent with [WG21 P1121](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1121r3.pdf).

Change the constructors for hazptr_array and add the function make_hazard_pointer_array to be consistent with the changes to hazptr_holder.

Now the default constructors construct empty holders and arrays.
The free functions make_hazard_pointer and make_hazard_pointer_array are used to construct nonempty holders and arrays.

Fix a missing barrier bug in HazptrTest.cpp.

Reviewed By: yfeldblum

Differential Revision: D28731269

fbshipit-source-id: 4132b2d612dbe1e8867c07d619efaba40be83785
parent a1bd849e
......@@ -361,7 +361,7 @@ class UnboundedQueue {
} else {
// Using hazptr_holder instead of hazptr_local because it is
// possible that the T ctor happens to use hazard pointers.
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
Segment* s = hptr.protect(p_.tail);
enqueueCommon(s, std::forward<Arg>(arg));
}
......@@ -396,7 +396,7 @@ class UnboundedQueue {
// Using hazptr_holder instead of hazptr_local because it is
// possible to call the T dtor and it may happen to use hazard
// pointers.
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
Segment* s = hptr.protect(c_.head);
return dequeueCommon(s);
}
......@@ -427,7 +427,7 @@ class UnboundedQueue {
} else {
// Using hazptr_holder instead of hazptr_local because it is
// possible to call ~T() and it may happen to use hazard pointers.
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
Segment* s = hptr.protect(c_.head);
return tryDequeueUntilMC(s, deadline);
}
......
......@@ -510,8 +510,9 @@ class alignas(64) BucketTable {
public:
class Iterator {
public:
FOLLY_ALWAYS_INLINE Iterator() {}
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {}
FOLLY_ALWAYS_INLINE Iterator()
: hazptrs_(make_hazard_pointer_array<3, Atom>()) {}
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_() {}
FOLLY_ALWAYS_INLINE ~Iterator() {}
void setNode(
......@@ -1009,8 +1010,9 @@ class alignas(64) SIMDTable {
class Iterator {
public:
FOLLY_ALWAYS_INLINE Iterator() {}
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {}
FOLLY_ALWAYS_INLINE Iterator()
: hazptrs_(make_hazard_pointer_array<2, Atom>()) {}
FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_() {}
FOLLY_ALWAYS_INLINE ~Iterator() {}
void setNode(
......
......@@ -830,7 +830,7 @@ class RelaxedConcurrentPriorityQueue {
// The push keeps the length of each element stable
void moundPush(const T& val) {
Position cur;
folly::hazptr_holder<Atom> hptr;
folly::hazptr_holder<Atom> hptr = folly::make_hazard_pointer<Atom>();
Node* newNode = new Node;
newNode->val = val;
uint32_t seed = folly::Random::rand32() % (1 << 21);
......
......@@ -404,14 +404,20 @@ class HazptrObserver {
template <typename Holder>
struct HazptrSnapshot {
template <typename State>
explicit HazptrSnapshot(const std::atomic<State*>& state)
: holder_(), ptr_(get(holder_).protect(state)->snapshot_.get()) {}
explicit HazptrSnapshot(const std::atomic<State*>& state) : holder_() {
make(holder_);
ptr_ = get(holder_).protect(state)->snapshot_.get();
}
const T& operator*() const { return *get(); }
const T* operator->() const { return get(); }
const T* get() const { return ptr_; }
private:
static void make(hazptr_holder<>& holder) {
holder = folly::make_hazard_pointer<>();
}
static void make(hazptr_local<1>&) {}
static hazptr_holder<>& get(hazptr_holder<>& holder) { return holder; }
static hazptr_holder<>& get(hazptr_local<1>& holder) { return holder[0]; }
......
......@@ -408,7 +408,7 @@ RequestContext::State::getContextData(const RequestToken& token) const {
FOLLY_ALWAYS_INLINE
void RequestContext::State::onSet() {
// Don't use hazptr_local because callback may use hazptr
hazptr_holder<> h;
hazptr_holder<> h = make_hazard_pointer<>();
Combined* combined = h.protect(combined_);
if (!combined) {
return;
......@@ -422,7 +422,7 @@ void RequestContext::State::onSet() {
FOLLY_ALWAYS_INLINE
void RequestContext::State::onUnset() {
// Don't use hazptr_local because callback may use hazptr
hazptr_holder<> h;
hazptr_holder<> h = make_hazard_pointer<>();
Combined* combined = h.protect(combined_);
if (!combined) {
return;
......@@ -575,7 +575,7 @@ void RequestContext::clearContextData(const RequestToken& val) {
bool checkCur = curCtx && curCtx->state_.combined();
bool checkNew = newCtx && newCtx->state_.combined();
if (checkCur && checkNew) {
hazptr_array<2> h;
hazptr_array<2> h = make_hazard_pointer_array<2>();
auto curc = h[0].protect(curCtx->state_.combined_);
auto newc = h[1].protect(newCtx->state_.combined_);
auto& curcb = curc->callbackData_;
......
......@@ -173,6 +173,11 @@ extern hazptr_domain<std::atomic> default_domain;
template <template <typename> class Atom = std::atomic>
class hazptr_holder;
/** Free function make_hazard_pointer constructs nonempty holder */
template <template <typename> class Atom = std::atomic>
hazptr_holder<Atom> make_hazard_pointer(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>());
/** Free function swap of hazptr_holder-s */
template <template <typename> class Atom = std::atomic>
void swap(hazptr_holder<Atom>&, hazptr_holder<Atom>&) noexcept;
......@@ -181,6 +186,10 @@ void swap(hazptr_holder<Atom>&, hazptr_holder<Atom>&) noexcept;
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
class hazptr_array;
/** Free function make_hazard_pointer_array constructs nonempty array */
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
hazptr_array<M, Atom> make_hazard_pointer_array();
/** hazptr_local */
template <uint8_t M = 1, template <typename> class Atom = std::atomic>
class hazptr_local;
......
......@@ -84,7 +84,7 @@
///
/// // Called frequently
/// U get_config(V v) {
/// hazptr_holder h; /* h owns a hazard pointer */
/// hazptr_holder h = make_hazard_pointer();
/// Config* ptr = h.protect(config_);
/// /* safe to access *ptr as long as it is protected by h */
/// return ptr->get_config(v);
......@@ -201,8 +201,6 @@
/// o This library does not support a custom polymorphic allocator
/// (C++17) parameter for the hazptr_domain constructor, until
/// such support becomes widely available.
/// o The construction of empty and non-empty hazptr_holder-s are
/// reversed. This library will conform eventually.
/// o hazptr_array and hazptr_local are not part of the standard
/// proposal.
/// o Link counting support and protection of linked structures is
......
......@@ -218,6 +218,7 @@ class hazptr_domain {
hazptr_obj_list<Atom>&, hazptr_domain<Atom>&) noexcept;
friend void hazptr_domain_push_retired<Atom>(
hazptr_obj_list<Atom>&, bool check, hazptr_domain<Atom>&) noexcept;
friend hazptr_holder<Atom> make_hazard_pointer<Atom>(hazptr_domain<Atom>&);
friend class hazptr_holder<Atom>;
friend class hazptr_obj<Atom>;
friend class hazptr_obj_cohort<Atom>;
......
......@@ -38,7 +38,7 @@ namespace folly {
* Usage example:
* T* ptr;
* {
* hazptr_holder h;
* hazptr_holder h = make_hazard_pointer();
* ptr = h.protect(src);
* // ... *ptr is protected ...
* h.reset_protection();
......@@ -53,31 +53,26 @@ template <template <typename> class Atom>
class hazptr_holder {
hazptr_rec<Atom>* hprec_;
template <uint8_t M, template <typename> class A>
friend class hazptr_local;
friend hazptr_holder<Atom> make_hazard_pointer<Atom>(hazptr_domain<Atom>&);
template <uint8_t M, template <typename> class A>
friend hazptr_array<M, A> make_hazard_pointer_array();
/** Private constructor used by make_hazard_pointer and
make_hazard_pointer_array */
FOLLY_ALWAYS_INLINE explicit hazptr_holder(hazptr_rec<Atom>* hprec)
: hprec_(hprec) {}
public:
/** Constructor - automatically acquires a hazard pointer. */
FOLLY_ALWAYS_INLINE explicit hazptr_holder(
hazptr_domain<Atom>& domain = default_hazptr_domain<Atom>()) {
#if FOLLY_HAZPTR_THR_LOCAL
if (LIKELY(&domain == &default_hazptr_domain<Atom>())) {
auto hprec = hazptr_tc_tls<Atom>().try_get();
if (LIKELY(hprec != nullptr)) {
hprec_ = hprec;
return;
}
}
#endif
hprec_ = domain.hprec_acquire();
}
/** Default empty constructor */
FOLLY_ALWAYS_INLINE hazptr_holder() noexcept : hprec_(nullptr) {}
/** Empty constructor */
FOLLY_ALWAYS_INLINE explicit hazptr_holder(std::nullptr_t) noexcept
: hprec_(nullptr) {}
/** For nonempty construction use make_hazard_pointer. */
/** Move constructor */
FOLLY_ALWAYS_INLINE hazptr_holder(hazptr_holder&& rhs) noexcept {
hprec_ = rhs.hprec_;
rhs.hprec_ = nullptr;
}
FOLLY_ALWAYS_INLINE hazptr_holder(hazptr_holder&& rhs) noexcept
: hprec_(std::exchange(rhs.hprec_, nullptr)) {}
hazptr_holder(const hazptr_holder&) = delete;
hazptr_holder& operator=(const hazptr_holder&) = delete;
......@@ -103,8 +98,7 @@ class hazptr_holder {
/* Self-move is a no-op. */
if (LIKELY(this != &rhs)) {
this->~hazptr_holder();
new (this) hazptr_holder(nullptr);
hprec_ = rhs.hprec_;
new (this) hazptr_holder(rhs.hprec_);
rhs.hprec_ = nullptr;
}
return *this;
......@@ -149,7 +143,7 @@ class hazptr_holder {
return ptr;
}
/** reset */
/** reset_protection */
template <typename T>
FOLLY_ALWAYS_INLINE void reset_protection(const T* ptr) noexcept {
auto p = static_cast<hazptr_obj<Atom>*>(const_cast<T*>(ptr));
......@@ -182,7 +176,25 @@ class hazptr_holder {
}; // hazptr_holder
/**
* Free function swap of hazptr_holder-s.
* Free function make_hazard_pointer constructs nonempty holder
*/
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_holder<Atom> make_hazard_pointer(
hazptr_domain<Atom>& domain) {
#if FOLLY_HAZPTR_THR_LOCAL
if (LIKELY(&domain == &default_hazptr_domain<Atom>())) {
auto hprec = hazptr_tc_tls<Atom>().try_get();
if (LIKELY(hprec != nullptr)) {
return hazptr_holder<Atom>(hprec);
}
}
#endif
auto hprec = domain.hprec_acquire();
return hazptr_holder<Atom>(hprec);
}
/**
* Free function. Swaps hazptr_holder-s.
*/
template <template <typename> class Atom>
FOLLY_ALWAYS_INLINE void swap(
......@@ -216,45 +228,23 @@ class hazptr_array {
static_assert(M > 0, "M must be a positive integer.");
aligned_hazptr_holder<Atom> raw_[M];
bool empty_{false};
bool empty_;
friend hazptr_array<M, Atom> make_hazard_pointer_array<M, Atom>();
/** Private constructor used by make_hazard_pointer_array */
FOLLY_ALWAYS_INLINE explicit hazptr_array(std::nullptr_t) noexcept {}
public:
/** Constructor */
FOLLY_ALWAYS_INLINE hazptr_array() {
/** Default empty constructor */
FOLLY_ALWAYS_INLINE hazptr_array() noexcept : empty_(true) {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
#if FOLLY_HAZPTR_THR_LOCAL
static_assert(
M <= hazptr_tc<Atom>::capacity(),
"M must be within the thread cache capacity.");
auto& tc = hazptr_tc_tls<Atom>();
auto count = tc.count();
if (UNLIKELY(M > count)) {
tc.fill(M - count);
count = M;
}
uint8_t offset = count - M;
for (uint8_t i = 0; i < M; ++i) {
auto hprec = tc[offset + i].get();
DCHECK(hprec != nullptr);
new (&h[i]) hazptr_holder<Atom>(nullptr);
h[i].set_hprec(hprec);
}
tc.set_count(offset);
#else
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>;
new (&h[i]) hazptr_holder<Atom>();
}
#endif
}
/** Empty constructor */
FOLLY_ALWAYS_INLINE hazptr_array(std::nullptr_t) noexcept {
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&raw_);
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>(nullptr);
}
empty_ = true;
}
/** For nonempty construction use make_hazard_pointer_array. */
/** Move constructor */
FOLLY_ALWAYS_INLINE hazptr_array(hazptr_array&& other) noexcept {
......@@ -287,7 +277,7 @@ class hazptr_array {
for (uint8_t i = 0; i < M; ++i) {
h[i].reset_protection();
tc[count + i].fill(h[i].hprec());
new (&h[i]) hazptr_holder<Atom>(nullptr);
h[i].set_hprec(nullptr);
}
tc.set_count(count + M);
#else
......@@ -316,11 +306,44 @@ class hazptr_array {
}
}; // hazptr_array
/**
* Free function make_hazard_pointer_array constructs nonempty array
*/
template <uint8_t M, template <typename> class Atom>
FOLLY_ALWAYS_INLINE hazptr_array<M, Atom> make_hazard_pointer_array() {
hazptr_array<M, Atom> a(nullptr);
auto h = reinterpret_cast<hazptr_holder<Atom>*>(&a.raw_);
#if FOLLY_HAZPTR_THR_LOCAL
static_assert(
M <= hazptr_tc<Atom>::capacity(),
"M must be within the thread cache capacity.");
auto& tc = hazptr_tc_tls<Atom>();
auto count = tc.count();
if (UNLIKELY(M > count)) {
tc.fill(M - count);
count = M;
}
uint8_t offset = count - M;
for (uint8_t i = 0; i < M; ++i) {
auto hprec = tc[offset + i].get();
DCHECK(hprec != nullptr);
new (&h[i]) hazptr_holder<Atom>(hprec);
}
tc.set_count(offset);
#else
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>(make_hazard_pointer<Atom>());
}
#endif
a.empty_ = false;
return a;
}
/**
* hazptr_local
*
* Optimized for construction and destruction of one or more
* hazptr_holder-s with local scope.
* nonempty hazptr_holder-s with local scope.
*
* WARNING 1: Do not move from or to individual hazptr_holder-s.
*
......@@ -355,12 +378,11 @@ class hazptr_local {
for (uint8_t i = 0; i < M; ++i) {
auto hprec = tc[i].get();
DCHECK(hprec != nullptr);
new (&h[i]) hazptr_holder<Atom>(nullptr);
h[i].set_hprec(hprec);
new (&h[i]) hazptr_holder<Atom>(hprec);
}
#else
for (uint8_t i = 0; i < M; ++i) {
new (&h[i]) hazptr_holder<Atom>;
new (&h[i]) hazptr_holder<Atom>(make_hazard_pointer<Atom>());
}
#endif
}
......
......@@ -48,6 +48,8 @@ class hazptr_tc_entry {
template <uint8_t, template <typename> class>
friend class hazptr_local;
friend class hazptr_tc<Atom>;
template <uint8_t M, template <typename> class A>
friend hazptr_array<M, A> make_hazard_pointer_array();
FOLLY_ALWAYS_INLINE void fill(hazptr_rec<Atom>* hprec) noexcept {
hprec_ = hprec;
......@@ -86,6 +88,9 @@ class hazptr_tc {
friend class hazptr_holder<Atom>;
template <uint8_t, template <typename> class>
friend class hazptr_local;
friend hazptr_holder<Atom> make_hazard_pointer<Atom>(hazptr_domain<Atom>&);
template <uint8_t M, template <typename> class A>
friend hazptr_array<M, A> make_hazard_pointer_array();
FOLLY_ALWAYS_INLINE
hazptr_tc_entry<Atom>& operator[](uint8_t i) noexcept {
......
......@@ -40,7 +40,7 @@ class HazptrWideCAS {
bool cas(T& u, T& v) {
Node* n = new Node(v);
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
Node* p;
while (true) {
p = hptr.protect(node_);
......
......@@ -49,6 +49,8 @@ using folly::hazptr_tc;
using folly::HazptrLockFreeLIFO;
using folly::HazptrSWMRSet;
using folly::HazptrWideCAS;
using folly::make_hazard_pointer;
using folly::make_hazard_pointer_array;
using folly::test::Barrier;
using folly::test::DeterministicAtomic;
......@@ -322,8 +324,8 @@ void copy_and_move_test() {
template <template <typename> class Atom = std::atomic>
void basic_holders_test() {
{ hazptr_holder<Atom> h; }
{ hazptr_array<2, Atom> h; }
{ hazptr_holder<Atom> h = make_hazard_pointer<Atom>(); }
{ hazptr_array<2, Atom> h = make_hazard_pointer_array<2, Atom>(); }
{ hazptr_local<2, Atom> h; }
}
......@@ -331,7 +333,7 @@ template <template <typename> class Atom = std::atomic>
void basic_protection_test() {
c_.clear();
auto obj = new Node<Atom>;
hazptr_holder<Atom> h;
hazptr_holder<Atom> h = make_hazard_pointer<Atom>();
h.reset_protection(obj);
obj->retire();
ASSERT_EQ(c_.ctors(), 1);
......@@ -352,7 +354,7 @@ void virtual_test() {
auto bar = new Thing;
bar->a = i;
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
hptr.reset_protection(bar);
bar->retire();
ASSERT_EQ(bar->a, i);
......@@ -386,7 +388,7 @@ template <template <typename> class Atom = std::atomic>
void move_test() {
for (int i = 0; i < 100; ++i) {
auto x = new Node<Atom>(i);
hazptr_holder<Atom> hptr0;
hazptr_holder<Atom> hptr0 = make_hazard_pointer<Atom>();
// Protect object
hptr0.reset_protection(x);
// Retire object
......@@ -398,7 +400,7 @@ void move_test() {
ASSERT_EQ(phptr1, &hptr1);
hptr1 = std::move(*phptr1);
// Empty constructor
hazptr_holder<Atom> hptr2(nullptr);
hazptr_holder<Atom> hptr2;
// Move assignment - still protected
hptr2 = std::move(hptr1);
// Access object
......@@ -413,11 +415,11 @@ template <template <typename> class Atom = std::atomic>
void array_test() {
for (int i = 0; i < 100; ++i) {
auto x = new Node<Atom>(i);
hazptr_array<3, Atom> hptr;
hazptr_array<3, Atom> hptr = make_hazard_pointer_array<3, Atom>();
// Protect object
hptr[2].reset_protection(x);
// Empty array
hazptr_array<3, Atom> h(nullptr);
hazptr_array<3, Atom> h;
// Move assignment
h = std::move(hptr);
// Retire object
......@@ -438,16 +440,16 @@ void array_dtor_full_tc_test() {
#endif
{
// Fill the thread cache
hazptr_array<M, Atom> w;
hazptr_array<M, Atom> w = make_hazard_pointer_array<M, Atom>();
}
{
// Empty array x
hazptr_array<M, Atom> x(nullptr);
hazptr_array<M, Atom> x;
{
// y ctor gets elements from the thread cache filled by w dtor.
hazptr_array<M, Atom> y;
hazptr_array<M, Atom> y = make_hazard_pointer_array<M, Atom>();
// z ctor gets elements from the default domain.
hazptr_array<M, Atom> z;
hazptr_array<M, Atom> z = make_hazard_pointer_array<M, Atom>();
// Elements of y are moved to x.
x = std::move(y);
// z dtor fills the thread cache.
......@@ -482,7 +484,7 @@ void linked_test() {
p = new NodeRC<Mutable, Atom>(i, p, true);
}
p = new NodeRC<Mutable, Atom>(num - 1, p, Mutable);
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
hptr.reset_protection(p);
if (!Mutable) {
for (auto q = p->next(); q; q = q->next()) {
......@@ -531,7 +533,7 @@ void mt_linked_test() {
while (!ready.load()) {
/* spin */
}
hazptr_holder<Atom> hptr;
hazptr_holder<Atom> hptr = make_hazard_pointer<Atom>();
auto p = hptr.protect(head());
++setHazptrs;
/* Concurrent with removal */
......@@ -591,7 +593,7 @@ void auto_retire_test() {
c->acquire_link_safe();
b->acquire_link_safe();
auto a = new NodeAuto<Atom>(b, c);
hazptr_holder<Atom> h;
hazptr_holder<Atom> h = make_hazard_pointer<Atom>();
{
hazptr_root<NodeAuto<Atom>> root;
a->acquire_link_safe();
......@@ -729,9 +731,9 @@ void cleanup_test() {
}
{ // Cleanup after using array
c_.clear();
{ hazptr_array<2, Atom> h; }
{ hazptr_array<2, Atom> h = make_hazard_pointer_array<2, Atom>(); }
{
hazptr_array<2, Atom> h;
hazptr_array<2, Atom> h = make_hazard_pointer_array<2, Atom>();
auto p0 = new Node<Atom>;
auto p1 = new Node<Atom>;
h[0].reset_protection(p0);
......@@ -1229,12 +1231,12 @@ TEST(HazptrTest, reclamation_without_calling_cleanup) {
template <typename InitFunc, typename Func, typename EndFunc>
uint64_t run_once(
int nthreads, const InitFunc& init, const Func& fn, const EndFunc& endFn) {
std::atomic<bool> start{false};
Barrier b(nthreads + 1);
init();
std::vector<std::thread> threads(nthreads);
for (int tid = 0; tid < nthreads; ++tid) {
threads[tid] = std::thread([&, tid] {
b.wait();
b.wait();
fn(tid);
});
......@@ -1242,7 +1244,7 @@ uint64_t run_once(
b.wait();
// begin time measurement
auto tbegin = std::chrono::steady_clock::now();
start.store(true);
b.wait();
for (auto& t : threads) {
t.join();
}
......@@ -1283,7 +1285,6 @@ uint64_t bench(std::string name, int ops, const RepFunc& repFn) {
//
// Benchmarks
//
// const int ops = 1000000;
const int ops = 1000000;
inline uint64_t holder_bench(std::string name, int nthreads) {
......@@ -1291,7 +1292,7 @@ inline uint64_t holder_bench(std::string name, int nthreads) {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_holder<> h;
hazptr_holder<> h = make_hazard_pointer<>();
}
};
auto endFn = [] {};
......@@ -1306,7 +1307,7 @@ inline uint64_t array_bench(std::string name, int nthreads) {
auto init = [] {};
auto fn = [&](int tid) {
for (int j = tid; j < 10 * ops; j += nthreads) {
hazptr_array<M> a;
hazptr_array<M> a = make_hazard_pointer_array<M>();
}
};
auto endFn = [] {};
......@@ -1396,7 +1397,7 @@ uint64_t cleanup_bench(std::string name, int nthreads) {
auto repFn = [&] {
auto init = [] {};
auto fn = [&](int) {
hazptr_holder<std::atomic> hptr;
hazptr_holder<> hptr = make_hazard_pointer<>();
for (int i = 0; i < ops / 1000; i++) {
hazptr_cleanup();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment