Commit a9a04804 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Chip Turner

Fix a folly build failure under clang: MPMCQueueTest.cpp.

Summary:
[Folly] Fix a folly build failure under clang: MPMCQueueTest.cpp.

In clang-v3.4, there is a bug with the combination of a lambda expression inside a function template taking a template name (rather than a type name) as a template argument.

This diff, in the interest of building folly under clang-v3.4, extracts the lambda expression into a separate function so that the function template taking a template name as a template argument no longer has a lambda expression in it.

Test Plan: Build folly/test/MPMCQueueTest.cpp under clang.

Reviewed By: njormrod@fb.com

Subscribers: mathieubaudet, dougw

FB internal diff: D1446279

Tasks: 4723132
parent 3c001a31
......@@ -39,6 +39,22 @@ using namespace test;
typedef DeterministicSchedule DSched;
template <template<typename> class Atom>
void run_mt_sequencer_thread(
int numThreads,
int numOps,
uint32_t init,
TurnSequencer<Atom>& seq,
Atom<int>& spinThreshold,
int& prev,
int i) {
for (int op = i; op < numOps; op += numThreads) {
seq.waitForTurn(init + op, spinThreshold, (op % 32) == 0);
EXPECT_EQ(prev, op - 1);
prev = op;
seq.completeTurn(init + op);
}
}
template <template<typename> class Atom>
void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) {
......@@ -48,14 +64,9 @@ void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) {
int prev = -1;
std::vector<std::thread> threads(numThreads);
for (int i = 0; i < numThreads; ++i) {
threads[i] = DSched::thread([&, i]{
for (int op = i; op < numOps; op += numThreads) {
seq.waitForTurn(init + op, spinThreshold, (op % 32) == 0);
EXPECT_EQ(prev, op - 1);
prev = op;
seq.completeTurn(init + op);
}
});
threads[i] = DSched::thread(std::bind(run_mt_sequencer_thread<Atom>,
numThreads, numOps, init, std::ref(seq), std::ref(spinThreshold),
std::ref(prev), i));
}
for (auto& thr : threads) {
......@@ -175,6 +186,33 @@ TEST(MPMCQueue, enq_capacity_test) {
}
}
template <template<typename> class Atom>
void runTryEnqDeqThread(
int numThreads,
int n, /*numOps*/
MPMCQueue<int, Atom>& cq,
std::atomic<uint64_t>& sum,
int t) {
uint64_t threadSum = 0;
int src = t;
// received doesn't reflect any actual values, we just start with
// t and increment by numThreads to get the rounding of termination
// correct if numThreads doesn't evenly divide numOps
int received = t;
while (src < n || received < n) {
if (src < n && cq.write(src)) {
src += numThreads;
}
int dst;
if (received < n && cq.read(dst)) {
received += numThreads;
threadSum += dst;
}
}
sum += threadSum;
}
template <template<typename> class Atom>
void runTryEnqDeqTest(int numThreads, int numOps) {
// write and read aren't linearizable, so we don't have
......@@ -186,26 +224,8 @@ void runTryEnqDeqTest(int numThreads, int numOps) {
std::vector<std::thread> threads(numThreads);
std::atomic<uint64_t> sum(0);
for (int t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread([&,t]{
uint64_t threadSum = 0;
int src = t;
// received doesn't reflect any actual values, we just start with
// t and increment by numThreads to get the rounding of termination
// correct if numThreads doesn't evenly divide numOps
int received = t;
while (src < n || received < n) {
if (src < n && cq.write(src)) {
src += numThreads;
}
int dst;
if (received < n && cq.read(dst)) {
received += numThreads;
threadSum += dst;
}
}
sum += threadSum;
});
threads[t] = DSched::thread(std::bind(runTryEnqDeqThread<Atom>,
numThreads, n, std::ref(cq), std::ref(sum), t));
}
for (auto& t : threads) {
DSched::join(t);
......@@ -342,6 +362,26 @@ TEST(MPMCQueue, mt_prod_cons) {
LOG(INFO) << PC_BENCH(MPMCQueue<int>(100000), 32, 100, n);
}
template <template<typename> class Atom>
void runNeverFailThread(
int numThreads,
int n, /*numOps*/
MPMCQueue<int, Atom>& cq,
std::atomic<uint64_t>& sum,
int t) {
uint64_t threadSum = 0;
for (int i = t; i < n; i += numThreads) {
// enq + deq
EXPECT_TRUE(cq.writeIfNotFull(i));
int dest = -1;
EXPECT_TRUE(cq.readIfNotEmpty(dest));
EXPECT_TRUE(dest >= 0);
threadSum += dest;
}
sum += threadSum;
}
template <template<typename> class Atom>
uint64_t runNeverFailTest(int numThreads, int numOps) {
// always #enq >= #deq
......@@ -353,19 +393,8 @@ uint64_t runNeverFailTest(int numThreads, int numOps) {
std::vector<std::thread> threads(numThreads);
std::atomic<uint64_t> sum(0);
for (int t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread([&,t]{
uint64_t threadSum = 0;
for (int i = t; i < n; i += numThreads) {
// enq + deq
EXPECT_TRUE(cq.writeIfNotFull(i));
int dest = -1;
EXPECT_TRUE(cq.readIfNotEmpty(dest));
EXPECT_TRUE(dest >= 0);
threadSum += dest;
}
sum += threadSum;
});
threads[t] = DSched::thread(std::bind(runNeverFailThread<Atom>,
numThreads, n, std::ref(cq), std::ref(sum), t));
}
for (auto& t : threads) {
DSched::join(t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment