Commit 39e19a49 authored by Denis Dydychkin's avatar Denis Dydychkin Committed by Facebook GitHub Bot

ConcurrentHashMap: Fix race condition in clear()

Summary: ConcurrentHashMap's clear() implementation had a race condition where bucket_count_ / chunk_count_ might be changed after they are read, resulting in smaller than required replacement buffer being created. This change addresses race condition and adds test to reproduce the issue.

Reviewed By: magedm

Differential Revision: D34022761

fbshipit-source-id: 1c8e8c382adf3839cbb4031e85314a02d963a762
parent ad51d2b1
......@@ -462,11 +462,12 @@ class alignas(64) BucketTable {
}
void clear(hazptr_obj_cohort<Atom>* cohort) {
size_t bcount = bucket_count_.load(std::memory_order_relaxed);
size_t bcount;
Buckets* buckets;
auto newbuckets = Buckets::create(bcount, cohort);
{
std::lock_guard<Mutex> g(m_);
bcount = bucket_count_.load(std::memory_order_relaxed);
auto newbuckets = Buckets::create(bcount, cohort);
buckets = buckets_.load(std::memory_order_relaxed);
buckets_.store(newbuckets, std::memory_order_release);
clearSize();
......@@ -1405,11 +1406,12 @@ class alignas(64) SIMDTable {
}
void clear(hazptr_obj_cohort<Atom>* cohort) {
size_t ccount = chunk_count_.load(std::memory_order_relaxed);
size_t ccount;
Chunks* chunks;
auto newchunks = Chunks::create(ccount, cohort);
{
std::lock_guard<Mutex> g(m_);
ccount = chunk_count_.load(std::memory_order_relaxed);
auto newchunks = Chunks::create(ccount, cohort);
chunks = chunks_.load(std::memory_order_relaxed);
chunks_.store(newchunks, std::memory_order_release);
clearSize();
......
......@@ -1047,6 +1047,57 @@ TYPED_TEST_P(ConcurrentHashMapTest, EraseClonedNonCopyable) {
EXPECT_EQ(iter->first, 256 * cloned);
}
TYPED_TEST_P(ConcurrentHashMapTest, ConcurrentInsertClear) {
DeterministicSchedule sched(DeterministicSchedule::uniform(FLAGS_seed));
/* 8192 and 8x / 9x multipliers are values that tend to reproduce
* race condition (fixed by this change) more frequently.
* Test keeps CHM size limited to chm_max_size and clear() if it exceeds.
* Key space for CHM is limited to chm_max_key and exceeds max size by
* small margin.
* Test imitates serial traversal over key space by multiple threads,
* triggering clear() by multiple threads at once.
*/
constexpr unsigned long chm_base_size = 8192;
constexpr unsigned long chm_max_size = chm_base_size * 8;
constexpr unsigned long chm_max_key = chm_base_size * 9;
CHM<unsigned long,
unsigned long,
std::hash<unsigned long>,
std::equal_to<unsigned long>,
std::allocator<uint8_t>,
8,
Atom,
Mutex>
m(chm_base_size);
std::vector<std::thread> threads;
constexpr size_t num_threads = 32;
constexpr size_t rounds_per_thread = 100000;
threads.reserve(num_threads);
for (size_t t = 0; t < num_threads; t++) {
threads.emplace_back([&, t]() {
for (size_t i = 0; i < rounds_per_thread; ++i) {
/* Code simulates traversal over whole key space by all threads
* combined, with each thread contributing to semi-unique set of keys.
* Each thread is assigned its own key sequence,
* with thread_X traversing values X, 32+X, 32*2+X, 32*3+X, ...
* 32*N+X mod chm_max_key.
*/
const unsigned long k = (i * num_threads + t) % chm_max_key;
if (m.size() >= chm_max_size) {
m.clear();
}
m.insert_or_assign(k, k);
}
});
}
for (auto& t : threads) {
join;
}
}
REGISTER_TYPED_TEST_SUITE_P(
ConcurrentHashMapTest,
MapTest,
......@@ -1088,7 +1139,8 @@ REGISTER_TYPED_TEST_SUITE_P(
HeterogeneousLookup,
HeterogeneousInsert,
InsertOrAssignIterator,
EraseClonedNonCopyable);
EraseClonedNonCopyable,
ConcurrentInsertClear);
using folly::detail::concurrenthashmap::bucket::BucketTable;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment