Commit d60894bf authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook GitHub Bot

condense the atomic-fetch-bit-op implementations

Summary: Coalesce the inline-asm. Use invokers.

Reviewed By: Gownta

Differential Revision: D32302511

fbshipit-source-id: 6edf75c6ef820ed80e751d965174e5acfaac957e
parent 80c05933
......@@ -22,8 +22,6 @@
#include <tuple>
#include <type_traits>
#include <folly/Portability.h>
#ifdef _WIN32
#include <intrin.h>
#endif
......@@ -111,29 +109,41 @@ namespace detail {
// Currently, at the time of writing it seems like gcc7 and greater can make
// this optimization and clang cannot - https://gcc.godbolt.org/z/Q83rxX
template <typename Atomic>
bool atomic_fetch_set_fallback(
Atomic& atomic, std::size_t bit, std::memory_order order) {
using Integer = decltype(atomic.load());
auto mask = Integer(Integer{0b1} << bit);
return (atomic.fetch_or(mask, order) & mask);
}
template <typename Atomic>
bool atomic_fetch_reset_fallback(
Atomic& atomic, std::size_t bit, std::memory_order order) {
using Integer = decltype(atomic.load());
auto mask = Integer(Integer{0b1} << bit);
return (atomic.fetch_and(Integer(~mask), order) & mask);
}
template <typename Atomic>
bool atomic_fetch_flip_fallback(
Atomic& atomic, std::size_t bit, std::memory_order order) {
using Integer = decltype(atomic.load());
auto mask = Integer(Integer{0b1} << bit);
return (atomic.fetch_xor(mask, order) & mask);
}
struct atomic_fetch_set_fallback_fn {
template <typename Atomic>
bool operator()(
Atomic& atomic, std::size_t bit, std::memory_order order) const {
using Integer = decltype(atomic.load());
auto mask = Integer(Integer{0b1} << bit);
return (atomic.fetch_or(mask, order) & mask);
}
};
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_set_fallback_fn
atomic_fetch_set_fallback{};
struct atomic_fetch_reset_fallback_fn {
template <typename Atomic>
bool operator()(
Atomic& atomic, std::size_t bit, std::memory_order order) const {
using Integer = decltype(atomic.load());
auto mask = Integer(Integer{0b1} << bit);
return (atomic.fetch_and(Integer(~mask), order) & mask);
}
};
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_reset_fallback_fn
atomic_fetch_reset_fallback{};
struct atomic_fetch_flip_fallback_fn {
template <typename Atomic>
bool operator()(
Atomic& atomic, std::size_t bit, std::memory_order order) const {
using Integer = decltype(atomic.load());
auto mask = Integer(Integer{0b1} << bit);
return (atomic.fetch_xor(mask, order) & mask);
}
};
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_flip_fallback_fn
atomic_fetch_flip_fallback{};
/**
* A simple trait to determine if the given type is an instantiation of
......@@ -214,35 +224,64 @@ inline bool atomic_fetch_flip_native(
#else
#define FOLLY_DETAIL_ATOMIC_BIT_OP_DEFINE(instr) \
struct atomic_fetch_bit_op_native_##instr##_fn { \
template <typename I> \
FOLLY_ERASE bool operator()(I* ptr, I bit) const { \
bool out = false; \
if (sizeof(I) == 2) { \
asm volatile("lock " #instr "w %1, (%2); setc %0" \
: "=r"(out) \
: "ri"(bit), "r"(ptr) \
: "memory", "flags"); \
} \
if (sizeof(I) == 4) { \
asm volatile("lock " #instr "l %1, (%2); setc %0" \
: "=r"(out) \
: "ri"(bit), "r"(ptr) \
: "memory", "flags"); \
} \
if (sizeof(I) == 8) { \
asm volatile("lock " #instr "q %1, (%2); setc %0" \
: "=r"(out) \
: "ri"(bit), "r"(ptr) \
: "memory", "flags"); \
} \
return out; \
} \
}; \
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_bit_op_native_##instr##_fn \
atomic_fetch_bit_op_native_##instr
FOLLY_DETAIL_ATOMIC_BIT_OP_DEFINE(bts);
FOLLY_DETAIL_ATOMIC_BIT_OP_DEFINE(btr);
FOLLY_DETAIL_ATOMIC_BIT_OP_DEFINE(btc);
#undef FOLLY_DETAIL_ATOMIC_BIT_OP_DEFINE
template <typename Integer, typename Op, typename Fb>
FOLLY_ERASE bool atomic_fetch_bit_op_native_(
std::atomic<Integer>& atomic,
std::size_t bit,
std::memory_order order,
Op op,
Fb fb) {
constexpr auto atomic_size = sizeof(Integer);
constexpr auto lo_size = std::size_t(2);
constexpr auto hi_size = std::size_t(8);
// some versions of TSAN do not properly instrument the inline assembly
if (atomic_size < lo_size || atomic_size > hi_size || folly::kIsSanitize) {
return fb(atomic, bit, order);
}
return op(reinterpret_cast<Integer*>(&atomic), Integer(bit));
}
template <typename Integer>
inline bool atomic_fetch_set_native(
std::atomic<Integer>& atomic, std::size_t bit, std::memory_order order) {
auto previous = false;
if /* constexpr */ (sizeof(Integer) == 2) {
auto pointer = reinterpret_cast<std::uint16_t*>(&atomic);
asm volatile("lock; btsw %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint16_t>(bit)), "r"(pointer)
: "memory", "flags");
} else if /* constexpr */ (sizeof(Integer) == 4) {
auto pointer = reinterpret_cast<std::uint32_t*>(&atomic);
asm volatile("lock; btsl %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint32_t>(bit)), "r"(pointer)
: "memory", "flags");
} else if /* constexpr */ (sizeof(Integer) == 8) {
auto pointer = reinterpret_cast<std::uint64_t*>(&atomic);
asm volatile("lock; btsq %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint64_t>(bit)), "r"(pointer)
: "memory", "flags");
} else {
assert(sizeof(Integer) == 1);
return atomic_fetch_set_fallback(atomic, bit, order);
}
return previous;
auto op = atomic_fetch_bit_op_native_bts;
auto fb = atomic_fetch_set_fallback;
return atomic_fetch_bit_op_native_(atomic, bit, order, op, fb);
}
template <typename Atomic>
......@@ -255,32 +294,9 @@ inline bool atomic_fetch_set_native(
template <typename Integer>
inline bool atomic_fetch_reset_native(
std::atomic<Integer>& atomic, std::size_t bit, std::memory_order order) {
auto previous = false;
if /* constexpr */ (sizeof(Integer) == 2) {
auto pointer = reinterpret_cast<std::uint16_t*>(&atomic);
asm volatile("lock; btrw %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint16_t>(bit)), "r"(pointer)
: "memory", "flags");
} else if /* constexpr */ (sizeof(Integer) == 4) {
auto pointer = reinterpret_cast<std::uint32_t*>(&atomic);
asm volatile("lock; btrl %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint32_t>(bit)), "r"(pointer)
: "memory", "flags");
} else if /* constexpr */ (sizeof(Integer) == 8) {
auto pointer = reinterpret_cast<std::uint64_t*>(&atomic);
asm volatile("lock; btrq %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint64_t>(bit)), "r"(pointer)
: "memory", "flags");
} else {
assert(sizeof(Integer) == 1);
return atomic_fetch_reset_fallback(atomic, bit, order);
}
return previous;
auto op = atomic_fetch_bit_op_native_btr;
auto fb = atomic_fetch_reset_fallback;
return atomic_fetch_bit_op_native_(atomic, bit, order, op, fb);
}
template <typename Atomic>
......@@ -293,32 +309,9 @@ bool atomic_fetch_reset_native(
template <typename Integer>
inline bool atomic_fetch_flip_native(
std::atomic<Integer>& atomic, std::size_t bit, std::memory_order order) {
auto previous = false;
if /* constexpr */ (sizeof(Integer) == 2) {
auto pointer = reinterpret_cast<std::uint16_t*>(&atomic);
asm volatile("lock; btcw %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint16_t>(bit)), "r"(pointer)
: "memory", "flags");
} else if /* constexpr */ (sizeof(Integer) == 4) {
auto pointer = reinterpret_cast<std::uint32_t*>(&atomic);
asm volatile("lock; btcl %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint32_t>(bit)), "r"(pointer)
: "memory", "flags");
} else if /* constexpr */ (sizeof(Integer) == 8) {
auto pointer = reinterpret_cast<std::uint64_t*>(&atomic);
asm volatile("lock; btcq %1, (%2); setc %0"
: "=r"(previous)
: "ri"(static_cast<std::uint64_t>(bit)), "r"(pointer)
: "memory", "flags");
} else {
assert(sizeof(Integer) == 1);
return atomic_fetch_flip_fallback(atomic, bit, order);
}
return previous;
auto op = atomic_fetch_bit_op_native_btc;
auto fb = atomic_fetch_flip_fallback;
return atomic_fetch_bit_op_native_(atomic, bit, order, op, fb);
}
template <typename Atomic>
......@@ -332,77 +325,50 @@ bool atomic_fetch_flip_native(
#else
template <typename Atomic>
bool atomic_fetch_set_native(Atomic&, std::size_t, std::memory_order) noexcept {
// This should never be called on non x86_64 platforms.
std::terminate();
}
template <typename Atomic>
bool atomic_fetch_reset_native(
Atomic&, std::size_t, std::memory_order) noexcept {
// This should never be called on non x86_64 platforms.
std::terminate();
}
template <typename Atomic>
bool atomic_fetch_flip_native(
Atomic&, std::size_t, std::memory_order) noexcept {
// This should never be called on non x86_64 platforms.
std::terminate();
}
using atomic_fetch_set_native_fn = detail::atomic_fetch_set_fallback_fn;
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_set_native_fn
atomic_fetch_set_native{};
#endif
using atomic_fetch_reset_native_fn = detail::atomic_fetch_reset_fallback_fn;
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_reset_native_fn
atomic_fetch_reset_native{};
} // namespace detail
using atomic_fetch_flip_native_fn = detail::atomic_fetch_flip_fallback_fn;
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_flip_native_fn
atomic_fetch_flip_native{};
#endif
template <typename Atomic>
bool atomic_fetch_set(Atomic& atomic, std::size_t bit, std::memory_order mo) {
void atomic_fetch_bit_op_check_(Atomic& atomic, std::size_t bit) {
using Integer = decltype(atomic.load());
static_assert(std::is_unsigned<Integer>{}, "");
static_assert(!std::is_const<Atomic>{}, "");
assert(bit < (sizeof(Integer) * 8));
// do the optimized thing on x86 builds. Also, some versions of TSAN do not
// properly instrument the inline assembly, so avoid it when TSAN is enabled
if (folly::kIsArchAmd64 && !folly::kIsSanitizeThread) {
return detail::atomic_fetch_set_native(atomic, bit, mo);
} else {
// otherwise default to the default implementation using fetch_or()
return detail::atomic_fetch_set_fallback(atomic, bit, mo);
}
(void)bit;
}
template <typename Atomic>
bool atomic_fetch_reset(Atomic& atomic, std::size_t bit, std::memory_order mo) {
using Integer = decltype(atomic.load());
static_assert(std::is_unsigned<Integer>{}, "");
static_assert(!std::is_const<Atomic>{}, "");
assert(bit < (sizeof(Integer) * 8));
} // namespace detail
// do the optimized thing on x86 builds. Also, some versions of TSAN do not
// properly instrument the inline assembly, so avoid it when TSAN is enabled
if (folly::kIsArchAmd64 && !folly::kIsSanitizeThread) {
return detail::atomic_fetch_reset_native(atomic, bit, mo);
} else {
// otherwise default to the default implementation using fetch_and()
return detail::atomic_fetch_reset_fallback(atomic, bit, mo);
}
template <typename Atomic>
bool atomic_fetch_set_fn::operator()(
Atomic& atomic, std::size_t bit, std::memory_order mo) const {
detail::atomic_fetch_bit_op_check_(atomic, bit);
return detail::atomic_fetch_set_native(atomic, bit, mo);
}
template <typename Atomic>
bool atomic_fetch_flip(Atomic& atomic, std::size_t bit, std::memory_order mo) {
using Integer = decltype(atomic.load());
static_assert(std::is_unsigned<Integer>{}, "");
static_assert(!std::is_const<Atomic>{}, "");
assert(bit < (sizeof(Integer) * 8));
bool atomic_fetch_reset_fn::operator()(
Atomic& atomic, std::size_t bit, std::memory_order mo) const {
detail::atomic_fetch_bit_op_check_(atomic, bit);
return detail::atomic_fetch_reset_native(atomic, bit, mo);
}
// do the optimized thing on x86 builds. Also, some versions of TSAN do not
// properly instrument the inline assembly, so avoid it when TSAN is enabled
if (folly::kIsArchAmd64 && !folly::kIsSanitizeThread) {
return detail::atomic_fetch_flip_native(atomic, bit, mo);
} else {
// otherwise default to the default implementation using fetch_and()
return detail::atomic_fetch_flip_fallback(atomic, bit, mo);
}
template <typename Atomic>
bool atomic_fetch_flip_fn::operator()(
Atomic& atomic, std::size_t bit, std::memory_order mo) const {
detail::atomic_fetch_bit_op_check_(atomic, bit);
return detail::atomic_fetch_flip_native(atomic, bit, mo);
}
} // namespace folly
......@@ -19,6 +19,7 @@
#include <atomic>
#include <cstdint>
#include <folly/Portability.h>
#include <folly/Traits.h>
namespace folly {
......@@ -64,11 +65,14 @@ bool atomic_compare_exchange_strong_explicit(
// Uses an optimized implementation when available, otherwise falling back to
// Atomic::fetch_or with mask. The optimization is currently available for
// std::atomic on x86, using the bts instruction.
template <typename Atomic>
bool atomic_fetch_set(
Atomic& atomic,
std::size_t bit,
std::memory_order order = std::memory_order_seq_cst);
struct atomic_fetch_set_fn {
template <typename Atomic>
bool operator()(
Atomic& atomic,
std::size_t bit,
std::memory_order order = std::memory_order_seq_cst) const;
};
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_set_fn atomic_fetch_set{};
// atomic_fetch_reset
//
......@@ -83,11 +87,14 @@ bool atomic_fetch_set(
// Uses an optimized implementation when available, otherwise falling back to
// Atomic::fetch_and with mask. The optimization is currently available for
// std::atomic on x86, using the btr instruction.
template <typename Atomic>
bool atomic_fetch_reset(
Atomic& atomic,
std::size_t bit,
std::memory_order order = std::memory_order_seq_cst);
struct atomic_fetch_reset_fn {
template <typename Atomic>
bool operator()(
Atomic& atomic,
std::size_t bit,
std::memory_order order = std::memory_order_seq_cst) const;
};
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_reset_fn atomic_fetch_reset{};
// atomic_fetch_flip
//
......@@ -101,11 +108,14 @@ bool atomic_fetch_reset(
// Uses an optimized implementation when available, otherwise falling back to
// Atomic::fetch_xor with mask. The optimization is currently available for
// std::atomic on x86, using the btc instruction.
template <typename Atomic>
bool atomic_fetch_flip(
Atomic& atomic,
std::size_t bit,
std::memory_order order = std::memory_order_seq_cst);
struct atomic_fetch_flip_fn {
template <typename Atomic>
bool operator()(
Atomic& atomic,
std::size_t bit,
std::memory_order order = std::memory_order_seq_cst) const;
};
FOLLY_INLINE_VARIABLE constexpr atomic_fetch_flip_fn atomic_fetch_flip{};
} // namespace folly
......
......@@ -119,15 +119,6 @@ FOLLY_ATOMIC_FETCH_BIT_OP_CHECK_FIX(flip, 16, 11)
#undef FOLLY_ATOMIC_FETCH_BIT_OP_CHECK_FIX
#undef FOLLY_ATOMIC_FETCH_BIT_OP_CHECK_VAR
namespace atomic_util_access {
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_set, folly);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_reset, folly);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_flip, folly);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_set_fallback, folly::detail);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_reset_fallback, folly::detail);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_flip_fallback, folly::detail);
} // namespace atomic_util_access
namespace {
enum class what { drop, keep, cond };
......@@ -247,12 +238,12 @@ BENCHMARK(atomic_fetch_set_u8_var_drop_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u8_var_drop_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_var_(uint8_t(0), op, what_constant<what::drop>{}, iters);
}
BENCHMARK(atomic_fetch_set_u8_var_drop_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_var_(uint8_t(0), op, what_constant<what::drop>{}, iters);
}
......@@ -262,12 +253,12 @@ BENCHMARK(atomic_fetch_set_u8_var_keep_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u8_var_keep_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_var_(uint8_t(0), op, what_constant<what::keep>{}, iters);
}
BENCHMARK(atomic_fetch_set_u8_var_keep_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_var_(uint8_t(0), op, what_constant<what::keep>{}, iters);
}
......@@ -277,12 +268,12 @@ BENCHMARK(atomic_fetch_set_u8_var_cond_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u8_var_cond_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_var_(uint8_t(0), op, what_constant<what::cond>{}, iters);
}
BENCHMARK(atomic_fetch_set_u8_var_cond_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_var_(uint8_t(0), op, what_constant<what::cond>{}, iters);
}
......@@ -294,12 +285,12 @@ BENCHMARK(atomic_fetch_set_u8_fix_drop_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u8_fix_drop_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_fix_(uint8_t(0), op, what_constant<what::drop>{}, iters);
}
BENCHMARK(atomic_fetch_set_u8_fix_drop_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_fix_(uint8_t(0), op, what_constant<what::drop>{}, iters);
}
......@@ -309,12 +300,12 @@ BENCHMARK(atomic_fetch_set_u8_fix_keep_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u8_fix_keep_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_fix_(uint8_t(0), op, what_constant<what::keep>{}, iters);
}
BENCHMARK(atomic_fetch_set_u8_fix_keep_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_fix_(uint8_t(0), op, what_constant<what::keep>{}, iters);
}
......@@ -324,12 +315,12 @@ BENCHMARK(atomic_fetch_set_u8_fix_cond_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u8_fix_cond_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_fix_(uint8_t(0), op, what_constant<what::cond>{}, iters);
}
BENCHMARK(atomic_fetch_set_u8_fix_cond_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_fix_(uint8_t(0), op, what_constant<what::cond>{}, iters);
}
......@@ -341,12 +332,12 @@ BENCHMARK(atomic_fetch_set_u16_var_drop_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u16_var_drop_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_var_(uint16_t(0), op, what_constant<what::drop>{}, iters);
}
BENCHMARK(atomic_fetch_set_u16_var_drop_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_var_(uint16_t(0), op, what_constant<what::drop>{}, iters);
}
......@@ -356,12 +347,12 @@ BENCHMARK(atomic_fetch_set_u16_var_keep_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u16_var_keep_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_var_(uint16_t(0), op, what_constant<what::keep>{}, iters);
}
BENCHMARK(atomic_fetch_set_u16_var_keep_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_var_(uint16_t(0), op, what_constant<what::keep>{}, iters);
}
......@@ -371,12 +362,12 @@ BENCHMARK(atomic_fetch_set_u16_var_cond_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u16_var_cond_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_var_(uint16_t(0), op, what_constant<what::cond>{}, iters);
}
BENCHMARK(atomic_fetch_set_u16_var_cond_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_var_(uint16_t(0), op, what_constant<what::cond>{}, iters);
}
......@@ -388,12 +379,12 @@ BENCHMARK(atomic_fetch_set_u16_fix_drop_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u16_fix_drop_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_fix_(uint16_t(0), op, what_constant<what::drop>{}, iters);
}
BENCHMARK(atomic_fetch_set_u16_fix_drop_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_fix_(uint16_t(0), op, what_constant<what::drop>{}, iters);
}
......@@ -403,12 +394,12 @@ BENCHMARK(atomic_fetch_set_u16_fix_keep_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u16_fix_keep_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_fix_(uint16_t(0), op, what_constant<what::keep>{}, iters);
}
BENCHMARK(atomic_fetch_set_u16_fix_keep_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_fix_(uint16_t(0), op, what_constant<what::keep>{}, iters);
}
......@@ -418,12 +409,12 @@ BENCHMARK(atomic_fetch_set_u16_fix_cond_monopoly, iters) {
}
BENCHMARK(atomic_fetch_set_u16_fix_cond_fallback, iters) {
auto op = atomic_util_access::atomic_fetch_set_fallback;
auto op = folly::detail::atomic_fetch_set_fallback;
atomic_fetch_op_fix_(uint16_t(0), op, what_constant<what::cond>{}, iters);
}
BENCHMARK(atomic_fetch_set_u16_fix_cond_native, iters) {
auto op = atomic_util_access::atomic_fetch_set;
auto op = folly::atomic_fetch_set;
atomic_fetch_op_fix_(uint16_t(0), op, what_constant<what::cond>{}, iters);
}
......
......@@ -105,15 +105,6 @@ TEST_F(AtomicCompareExchangeSuccTest, examples) {
}
}
namespace access {
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_set, folly);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_reset, folly);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_flip, folly);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_set_fallback, folly::detail);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_reset_fallback, folly::detail);
FOLLY_CREATE_FREE_INVOKER_SUITE(atomic_fetch_flip_fallback, folly::detail);
} // namespace access
namespace {
template <typename I>
......@@ -125,7 +116,7 @@ struct with_seq_cst : private I {
}
};
template <typename Integer, typename Op = access::atomic_fetch_set_fn>
template <typename Integer, typename Op = folly::atomic_fetch_set_fn>
void atomic_fetch_set_basic(Op fetch_set = {}) {
{
auto&& atomic = std::atomic<Integer>{0};
......@@ -165,7 +156,7 @@ void atomic_fetch_set_basic(Op fetch_set = {}) {
}
}
template <typename Integer, typename Op = access::atomic_fetch_reset_fn>
template <typename Integer, typename Op = folly::atomic_fetch_reset_fn>
void atomic_fetch_reset_basic(Op fetch_reset = {}) {
{
auto&& atomic = std::atomic<Integer>{0};
......@@ -188,7 +179,7 @@ void atomic_fetch_reset_basic(Op fetch_reset = {}) {
}
}
template <typename Integer, typename Op = access::atomic_fetch_flip_fn>
template <typename Integer, typename Op = folly::atomic_fetch_flip_fn>
void atomic_fetch_flip_basic(Op fetch_flip = {}) {
{
auto&& atomic = std::atomic<Integer>{0};
......@@ -214,6 +205,8 @@ void atomic_fetch_flip_basic(Op fetch_flip = {}) {
template <typename Integer>
class Atomic {
public:
using value_type = Integer;
Integer fetch_or(
Integer value, std::memory_order = std::memory_order_seq_cst) {
++counts.set;
......@@ -244,7 +237,7 @@ class Atomic {
counts_ counts;
};
template <typename Integer, typename Op = access::atomic_fetch_set_fn>
template <typename Integer, typename Op = folly::atomic_fetch_set_fn>
void atomic_fetch_set_non_std_atomic(Op fetch_set = {}) {
auto atomic = Atomic<Integer>{};
auto& sets = atomic.counts.set;
......@@ -264,7 +257,7 @@ void atomic_fetch_set_non_std_atomic(Op fetch_set = {}) {
EXPECT_EQ(atomic.integer_, 0b101);
}
template <typename Integer, typename Op = access::atomic_fetch_reset_fn>
template <typename Integer, typename Op = folly::atomic_fetch_reset_fn>
void atomic_fetch_reset_non_std_atomic(Op fetch_reset = {}) {
auto atomic = Atomic<Integer>{};
auto& sets = atomic.counts.set;
......@@ -285,7 +278,7 @@ void atomic_fetch_reset_non_std_atomic(Op fetch_reset = {}) {
EXPECT_EQ(atomic.integer_, 0b010);
}
template <typename Integer, typename Op = access::atomic_fetch_flip_fn>
template <typename Integer, typename Op = folly::atomic_fetch_flip_fn>
void atomic_fetch_flip_non_std_atomic(Op fetch_flip = {}) {
auto atomic = Atomic<Integer>{};
auto& sets = atomic.counts.set;
......@@ -354,7 +347,7 @@ TEST_F(AtomicFetchFlipTest, EnsureFetchXorUsed) {
}
TEST_F(AtomicFetchSetTest, FetchSetFallback) {
auto fetch_set = with_seq_cst{access::atomic_fetch_set_fallback};
auto fetch_set = with_seq_cst{folly::detail::atomic_fetch_set_fallback};
atomic_fetch_set_basic<std::uint16_t>(fetch_set);
atomic_fetch_set_basic<std::uint32_t>(fetch_set);
......@@ -368,7 +361,7 @@ TEST_F(AtomicFetchSetTest, FetchSetFallback) {
}
TEST_F(AtomicFetchResetTest, FetchResetFallback) {
auto fetch_reset = with_seq_cst{access::atomic_fetch_reset_fallback};
auto fetch_reset = with_seq_cst{folly::detail::atomic_fetch_reset_fallback};
atomic_fetch_reset_basic<std::uint16_t>(fetch_reset);
atomic_fetch_reset_basic<std::uint32_t>(fetch_reset);
......@@ -382,7 +375,7 @@ TEST_F(AtomicFetchResetTest, FetchResetFallback) {
}
TEST_F(AtomicFetchFlipTest, FetchFlipFallback) {
auto fetch_flip = with_seq_cst{access::atomic_fetch_flip_fallback};
auto fetch_flip = with_seq_cst{folly::detail::atomic_fetch_flip_fallback};
atomic_fetch_flip_basic<std::uint16_t>(fetch_flip);
atomic_fetch_flip_basic<std::uint32_t>(fetch_flip);
......@@ -396,7 +389,7 @@ TEST_F(AtomicFetchFlipTest, FetchFlipFallback) {
}
TEST_F(AtomicFetchSetTest, FetchSetDefault) {
auto fetch_set = access::atomic_fetch_set;
auto fetch_set = folly::atomic_fetch_set;
atomic_fetch_set_basic<std::uint16_t>(fetch_set);
atomic_fetch_set_basic<std::uint32_t>(fetch_set);
......@@ -410,7 +403,7 @@ TEST_F(AtomicFetchSetTest, FetchSetDefault) {
}
TEST_F(AtomicFetchResetTest, FetchResetDefault) {
auto fetch_reset = access::atomic_fetch_reset;
auto fetch_reset = folly::atomic_fetch_reset;
atomic_fetch_reset_basic<std::uint16_t>(fetch_reset);
atomic_fetch_reset_basic<std::uint32_t>(fetch_reset);
......@@ -424,7 +417,7 @@ TEST_F(AtomicFetchResetTest, FetchResetDefault) {
}
TEST_F(AtomicFetchFlipTest, FetchFlipDefault) {
auto fetch_flip = access::atomic_fetch_flip;
auto fetch_flip = folly::atomic_fetch_flip;
atomic_fetch_flip_basic<std::uint16_t>(fetch_flip);
atomic_fetch_flip_basic<std::uint32_t>(fetch_flip);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment