Commit 2750533d authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Fix violations of unused-lambda-capture

Summary: [Folly] Fix violations of `unused-lambda-capture`.

Reviewed By: Orvid

Differential Revision: D5060391

fbshipit-source-id: 34aabc847719603d13da8f2b52a7ec187d803a4a
parent 5bd3bab0
......@@ -156,7 +156,7 @@ TEST(AtomicSharedPtr, DeterministicTest) {
fooptr(foo);
std::vector<std::thread> threads(FLAGS_num_threads);
for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
threads[tid] = DSched::thread([&, tid]() {
threads[tid] = DSched::thread([&]() {
for (int i = 0; i < 1000; i++) {
auto l = fooptr.load();
EXPECT_TRUE(l.get() != nullptr);
......
......@@ -36,29 +36,29 @@ void basicTest() {
std::vector<std::thread> ts;
folly::Baton<> threadBatons[numThreads];
for (size_t t = 0; t < numThreads; ++t) {
ts.emplace_back([&count, &b, &got0, numIters, t, &threadBatons]() {
for (size_t i = 0; i < numIters; ++i) {
auto ret = ++count;
EXPECT_TRUE(ret > 1);
if (i == 0) {
threadBatons[t].post();
}
}
ts.emplace_back([&count, &b, &got0, t, &threadBatons] {
for (size_t i = 0; i < numIters; ++i) {
auto ret = ++count;
if (t == 0) {
b.post();
EXPECT_TRUE(ret > 1);
if (i == 0) {
threadBatons[t].post();
}
}
if (t == 0) {
b.post();
}
for (size_t i = 0; i < numIters; ++i) {
auto ret = --count;
for (size_t i = 0; i < numIters; ++i) {
auto ret = --count;
if (ret == 0) {
++got0;
EXPECT_EQ(numIters - 1, i);
}
if (ret == 0) {
++got0;
EXPECT_EQ(numIters - 1, i);
}
});
}
});
}
for (size_t t = 0; t < numThreads; ++t) {
......
......@@ -414,7 +414,7 @@ TEST(FiberManager, addTasksVoid) {
manager.addTask([&]() {
std::vector<std::function<void()>> funcs;
for (size_t i = 0; i < 3; ++i) {
funcs.push_back([i, &pendingFibers]() {
funcs.push_back([&pendingFibers]() {
await([&pendingFibers](Promise<int> promise) {
pendingFibers.push_back(std::move(promise));
});
......@@ -679,7 +679,7 @@ TEST(FiberManager, collectNThrow) {
manager.addTask([&]() {
std::vector<std::function<int()>> funcs;
for (size_t i = 0; i < 3; ++i) {
funcs.push_back([i, &pendingFibers]() -> size_t {
funcs.push_back([&pendingFibers]() -> size_t {
await([&pendingFibers](Promise<int> promise) {
pendingFibers.push_back(std::move(promise));
});
......@@ -718,7 +718,7 @@ TEST(FiberManager, collectNVoid) {
manager.addTask([&]() {
std::vector<std::function<void()>> funcs;
for (size_t i = 0; i < 3; ++i) {
funcs.push_back([i, &pendingFibers]() {
funcs.push_back([&pendingFibers]() {
await([&pendingFibers](Promise<int> promise) {
pendingFibers.push_back(std::move(promise));
});
......@@ -754,7 +754,7 @@ TEST(FiberManager, collectNVoidThrow) {
manager.addTask([&]() {
std::vector<std::function<void()>> funcs;
for (size_t i = 0; i < 3; ++i) {
funcs.push_back([i, &pendingFibers]() {
funcs.push_back([&pendingFibers]() {
await([&pendingFibers](Promise<int> promise) {
pendingFibers.push_back(std::move(promise));
});
......@@ -832,7 +832,7 @@ TEST(FiberManager, collectAllVoid) {
manager.addTask([&]() {
std::vector<std::function<void()>> funcs;
for (size_t i = 0; i < 3; ++i) {
funcs.push_back([i, &pendingFibers]() {
funcs.push_back([&pendingFibers]() {
await([&pendingFibers](Promise<int> promise) {
pendingFibers.push_back(std::move(promise));
});
......@@ -1553,8 +1553,7 @@ TEST(FiberManager, semaphore) {
int counterA = 0;
int counterB = 0;
auto task = [&sem, kTasks, kIterations, kNumTokens](
int& counter, folly::fibers::Baton& baton) {
auto task = [&sem, kNumTokens](int& counter, folly::fibers::Baton& baton) {
FiberManager manager(std::make_unique<EventBaseLoopController>());
folly::EventBase evb;
dynamic_cast<EventBaseLoopController&>(manager.loopController())
......@@ -1733,7 +1732,7 @@ TEST(FiberManager, doubleBatchDispatchTest) {
template <typename ExecutorT>
void batchDispatchExceptionHandling(ExecutorT& executor, int i) {
thread_local BatchDispatcher<int, int, ExecutorT> batchDispatcher(
executor, [=, &executor](std::vector<int> &&) -> std::vector<int> {
executor, [](std::vector<int> &&) -> std::vector<int> {
throw std::runtime_error("Surprise!!");
});
......
......@@ -137,23 +137,21 @@ TEST(BarrierTest, Random) {
for (auto& tinfo : threads) {
auto pinfo = &tinfo;
tinfo.thread = std::thread(
[numIterations, pinfo, &barrier] () {
std::vector<folly::Future<bool>> futures;
futures.reserve(pinfo->numFutures);
for (uint32_t i = 0; i < numIterations; ++i, ++pinfo->iteration) {
futures.clear();
for (uint32_t j = 0; j < pinfo->numFutures; ++j) {
futures.push_back(barrier.wait());
auto nanos = folly::Random::rand32(10 * 1000 * 1000);
/* sleep override */
std::this_thread::sleep_for(std::chrono::nanoseconds(nanos));
}
auto results = folly::collect(futures).get();
pinfo->trueSeen[i] =
std::count(results.begin(), results.end(), true);
}
});
tinfo.thread = std::thread([pinfo, &barrier] {
std::vector<folly::Future<bool>> futures;
futures.reserve(pinfo->numFutures);
for (uint32_t i = 0; i < numIterations; ++i, ++pinfo->iteration) {
futures.clear();
for (uint32_t j = 0; j < pinfo->numFutures; ++j) {
futures.push_back(barrier.wait());
auto nanos = folly::Random::rand32(10 * 1000 * 1000);
/* sleep override */
std::this_thread::sleep_for(std::chrono::nanoseconds(nanos));
}
auto results = folly::collect(futures).get();
pinfo->trueSeen[i] = std::count(results.begin(), results.end(), true);
}
});
}
for (auto& tinfo : threads) {
......
......@@ -618,7 +618,10 @@ TEST(Future, finishBigLambda) {
EXPECT_EQ(bulk_data[0], 0);
Promise<int> p;
auto f = p.getFuture().then([x, bulk_data](Try<int>&& t) { *x = t.value(); });
auto f = p.getFuture().then([x, bulk_data](Try<int>&& t) {
(void)bulk_data;
*x = t.value();
});
// The callback hasn't executed
EXPECT_EQ(0, *x);
......
......@@ -1809,7 +1809,7 @@ TEST(EventBaseTest, LoopKeepAliveWithLoopForever) {
/* sleep override */
std::this_thread::sleep_for(std::chrono::milliseconds(30));
ASSERT_FALSE(done) << "Loop terminated early";
ev->runInEventBaseThread([&ev, keepAlive = std::move(keepAlive) ]{});
ev->runInEventBaseThread([keepAlive = std::move(keepAlive)]{});
}
evThread.join();
......
......@@ -307,7 +307,7 @@ TEST_F(HHWheelTimerTest, Stress) {
runtimeouts++;
/* sleep override */ usleep(1000);
LOG(INFO) << "Ran " << runtimeouts << " timeouts of " << timeoutcount;
timeouts[i].fn = [&, i]() {
timeouts[i].fn = [&]() {
runtimeouts++;
LOG(INFO) << "Ran " << runtimeouts << " timeouts of " << timeoutcount;
};
......
......@@ -91,15 +91,14 @@ RandomDataHolder::RandomDataHolder(size_t sizeLog2)
std::vector<std::thread> threads;
threads.reserve(numThreads);
for (size_t t = 0; t < numThreads; ++t) {
threads.emplace_back(
[this, seed, t, numThreadsLog2, sizeLog2] () {
std::mt19937 rng(seed + t);
size_t countLog2 = sizeLog2 - numThreadsLog2;
size_t start = size_t(t) << countLog2;
for (size_t i = 0; i < countLog2; ++i) {
this->data_[start + i] = rng();
}
});
threads.emplace_back([this, seed, t, sizeLog2] {
std::mt19937 rng(seed + t);
size_t countLog2 = sizeLog2 - numThreadsLog2;
size_t start = size_t(t) << countLog2;
for (size_t i = 0; i < countLog2; ++i) {
this->data_[start + i] = rng();
}
});
}
for (auto& t : threads) {
......
......@@ -108,17 +108,15 @@ TEST(AHMIntStressTest, Test) {
std::vector<std::thread> threads;
for (int threadId = 0; threadId < 64; ++threadId) {
threads.emplace_back(
[objs,threadId] {
for (int recycles = 0; recycles < 500; ++recycles) {
for (int i = 0; i < 10; i++) {
auto val = objs->get(i);
}
objs->archive();
threads.emplace_back([objs] {
for (int recycles = 0; recycles < 500; ++recycles) {
for (int i = 0; i < 10; i++) {
auto val = objs->get(i);
}
objs->archive();
}
);
});
}
for (auto& t : threads) t.join();
......
......@@ -133,12 +133,11 @@ TEST(AtomicIntrusiveLinkedList, Stress) {
std::vector<std::thread> threads;
for (size_t threadId = 0; threadId < kNumThreads; ++threadId) {
threads.emplace_back(
[threadId, kNumThreads, kNumElements, &list, &elements]() {
for (size_t id = 0; id < kNumElements; ++id) {
list.insertHead(&elements[threadId + kNumThreads * id]);
}
});
threads.emplace_back([threadId, &list, &elements] {
for (size_t id = 0; id < kNumElements; ++id) {
list.insertHead(&elements[threadId + kNumThreads * id]);
}
});
}
std::vector<size_t> ids;
......
......@@ -322,7 +322,10 @@ TEST(Function, NonCopyableLambda) {
(void)fooData; // suppress gcc warning about fooData not being used
auto functor = std::bind(
[fooData](std::unique_ptr<int>& up) mutable { return ++*up; },
[fooData](std::unique_ptr<int>& up) mutable {
(void)fooData;
return ++*up;
},
std::move(unique_ptr_int));
EXPECT_EQ(901, functor());
......
......@@ -81,7 +81,7 @@ TEST(MPMCPipeline, MultiThreaded) {
std::vector<std::thread> threads;
threads.reserve(numThreadsPerStage * 2 + 1);
for (size_t i = 0; i < numThreadsPerStage; ++i) {
threads.emplace_back([&a, i] () {
threads.emplace_back([&a] {
for (;;) {
int val;
auto ticket = a.blockingReadStage<0>(val);
......@@ -97,7 +97,7 @@ TEST(MPMCPipeline, MultiThreaded) {
}
for (size_t i = 0; i < numThreadsPerStage; ++i) {
threads.emplace_back([&a, i] () {
threads.emplace_back([&a] {
for (;;) {
std::string val;
auto ticket = a.blockingReadStage<1>(val);
......
......@@ -44,14 +44,13 @@ TEST(SingletonThreadLocalTest, OneSingletonPerThread) {
std::atomic<std::size_t> completedThreadCount{0};
Synchronized<std::unordered_set<Foo*>> fooAddresses{};
std::vector<std::thread> threads{};
auto threadFunction =
[&fooAddresses, targetThreadCount, &completedThreadCount] {
fooAddresses.wlock()->emplace(&FooSingletonTL::get());
++completedThreadCount;
while (completedThreadCount < targetThreadCount) {
std::this_thread::yield();
}
};
auto threadFunction = [&fooAddresses, &completedThreadCount] {
fooAddresses.wlock()->emplace(&FooSingletonTL::get());
++completedThreadCount;
while (completedThreadCount < targetThreadCount) {
std::this_thread::yield();
}
};
{
for (std::size_t threadCount{0}; threadCount < targetThreadCount;
++threadCount) {
......
......@@ -106,7 +106,7 @@ static void runContended(size_t numOps, size_t numThreads) {
SimpleBarrier runbarrier(totalthreads + 1);
for (size_t t = 0; t < totalthreads; ++t) {
threads[t] = std::thread([&, t, totalthreads] {
threads[t] = std::thread([&, t] {
lockstruct* lock = &locks[t % threadgroups];
runbarrier.wait();
for (size_t op = 0; op < numOps; op += 1) {
......@@ -159,7 +159,7 @@ static void runFairness() {
SimpleBarrier runbarrier(totalthreads + 1);
for (size_t t = 0; t < totalthreads; ++t) {
threads[t] = std::thread([&, t, totalthreads] {
threads[t] = std::thread([&, t] {
lockstruct* lock = &locks[t % threadgroups];
long value = 0;
std::chrono::microseconds max(0);
......
......@@ -313,10 +313,12 @@ TEST(ThreadLocalPtr, AccessAllThreadsCounter) {
std::atomic<int> totalAtomic(0);
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(std::thread([&,i]() {
threads.push_back(std::thread([&]() {
stci.add(1);
totalAtomic.fetch_add(1);
while (run.load()) { usleep(100); }
while (run.load()) {
usleep(100);
}
}));
}
while (totalAtomic.load() != kNumThreads) { usleep(100); }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment