Commit a9a04804 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Chip Turner

Fix a folly build failure under clang: MPMCQueueTest.cpp.

Summary:
[Folly] Fix a folly build failure under clang: MPMCQueueTest.cpp.

In clang-v3.4, there is a bug with the combination of a lambda expression inside a function template taking a template name (rather than a type name) as a template argument.

This diff, in the interest of building folly under clang-v3.4, extracts the lambda expression into a separate function so that the function template taking a template name as a template argument no longer has a lambda expression in it.

Test Plan: Build folly/test/MPMCQueueTest.cpp under clang.

Reviewed By: njormrod@fb.com

Subscribers: mathieubaudet, dougw

FB internal diff: D1446279

Tasks: 4723132
parent 3c001a31
......@@ -39,6 +39,22 @@ using namespace test;
typedef DeterministicSchedule DSched;
template <template<typename> class Atom>
void run_mt_sequencer_thread(
int numThreads,
int numOps,
uint32_t init,
TurnSequencer<Atom>& seq,
Atom<int>& spinThreshold,
int& prev,
int i) {
for (int op = i; op < numOps; op += numThreads) {
seq.waitForTurn(init + op, spinThreshold, (op % 32) == 0);
EXPECT_EQ(prev, op - 1);
prev = op;
seq.completeTurn(init + op);
}
}
template <template<typename> class Atom>
void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) {
......@@ -48,14 +64,9 @@ void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) {
int prev = -1;
std::vector<std::thread> threads(numThreads);
for (int i = 0; i < numThreads; ++i) {
threads[i] = DSched::thread([&, i]{
for (int op = i; op < numOps; op += numThreads) {
seq.waitForTurn(init + op, spinThreshold, (op % 32) == 0);
EXPECT_EQ(prev, op - 1);
prev = op;
seq.completeTurn(init + op);
}
});
threads[i] = DSched::thread(std::bind(run_mt_sequencer_thread<Atom>,
numThreads, numOps, init, std::ref(seq), std::ref(spinThreshold),
std::ref(prev), i));
}
for (auto& thr : threads) {
......@@ -176,17 +187,12 @@ TEST(MPMCQueue, enq_capacity_test) {
}
template <template<typename> class Atom>
void runTryEnqDeqTest(int numThreads, int numOps) {
// write and read aren't linearizable, so we don't have
// hard guarantees on their individual behavior. We can still test
// correctness in aggregate
MPMCQueue<int,Atom> cq(numThreads);
uint64_t n = numOps;
std::vector<std::thread> threads(numThreads);
std::atomic<uint64_t> sum(0);
for (int t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread([&,t]{
void runTryEnqDeqThread(
int numThreads,
int n, /*numOps*/
MPMCQueue<int, Atom>& cq,
std::atomic<uint64_t>& sum,
int t) {
uint64_t threadSum = 0;
int src = t;
// received doesn't reflect any actual values, we just start with
......@@ -205,7 +211,21 @@ void runTryEnqDeqTest(int numThreads, int numOps) {
}
}
sum += threadSum;
});
}
template <template<typename> class Atom>
void runTryEnqDeqTest(int numThreads, int numOps) {
// write and read aren't linearizable, so we don't have
// hard guarantees on their individual behavior. We can still test
// correctness in aggregate
MPMCQueue<int,Atom> cq(numThreads);
uint64_t n = numOps;
std::vector<std::thread> threads(numThreads);
std::atomic<uint64_t> sum(0);
for (int t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread(std::bind(runTryEnqDeqThread<Atom>,
numThreads, n, std::ref(cq), std::ref(sum), t));
}
for (auto& t : threads) {
DSched::join(t);
......@@ -343,17 +363,12 @@ TEST(MPMCQueue, mt_prod_cons) {
}
template <template<typename> class Atom>
uint64_t runNeverFailTest(int numThreads, int numOps) {
// always #enq >= #deq
MPMCQueue<int,Atom> cq(numThreads);
uint64_t n = numOps;
auto beginMicro = nowMicro();
std::vector<std::thread> threads(numThreads);
std::atomic<uint64_t> sum(0);
for (int t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread([&,t]{
void runNeverFailThread(
int numThreads,
int n, /*numOps*/
MPMCQueue<int, Atom>& cq,
std::atomic<uint64_t>& sum,
int t) {
uint64_t threadSum = 0;
for (int i = t; i < n; i += numThreads) {
// enq + deq
......@@ -365,7 +380,21 @@ uint64_t runNeverFailTest(int numThreads, int numOps) {
threadSum += dest;
}
sum += threadSum;
});
}
template <template<typename> class Atom>
uint64_t runNeverFailTest(int numThreads, int numOps) {
// always #enq >= #deq
MPMCQueue<int,Atom> cq(numThreads);
uint64_t n = numOps;
auto beginMicro = nowMicro();
std::vector<std::thread> threads(numThreads);
std::atomic<uint64_t> sum(0);
for (int t = 0; t < numThreads; ++t) {
threads[t] = DSched::thread(std::bind(runNeverFailThread<Atom>,
numThreads, n, std::ref(cq), std::ref(sum), t));
}
for (auto& t : threads) {
DSched::join(t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment