Commit ba0cf877 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Formatting of FOR_EACH etc uses

Summary: [Folly] Formatting of `FOR_EACH` etc uses.

Reviewed By: Orvid

Differential Revision: D9595505

fbshipit-source-id: bdb506f8904de2577c42ada6755a2af4f3efb095
parent c07f4bb4
......@@ -338,7 +338,7 @@ create(size_t maxSize, const Config& c) {
* have an expensive default constructor for the value type this can
* noticeably speed construction time for an AHA.
*/
FOR_EACH_RANGE(i, 0, map->capacity_) {
FOR_EACH_RANGE (i, 0, map->capacity_) {
cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
std::memory_order_relaxed);
}
......@@ -360,7 +360,7 @@ destroy(AtomicHashArray* p) {
size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
FOR_EACH_RANGE(i, 0, p->capacity_) {
FOR_EACH_RANGE (i, 0, p->capacity_) {
if (p->cells_[i].first != p->kEmptyKey_) {
p->cells_[i].~value_type();
}
......@@ -382,7 +382,7 @@ template <
void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
clear() {
FOR_EACH_RANGE(i, 0, capacity_) {
FOR_EACH_RANGE (i, 0, capacity_) {
if (cells_[i].first != kEmptyKey_) {
cells_[i].~value_type();
*const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
......
......@@ -47,7 +47,7 @@ AtomicHashMap<
subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
std::memory_order_relaxed);
auto subMapCount = kNumSubMaps_;
FOR_EACH_RANGE(i, 1, subMapCount) {
FOR_EACH_RANGE (i, 1, subMapCount) {
subMaps_[i].store(nullptr, std::memory_order_relaxed);
}
numMapsAllocated_.store(1, std::memory_order_relaxed);
......@@ -108,7 +108,7 @@ insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
auto nextMapIdx = // this maintains our state
numMapsAllocated_.load(std::memory_order_acquire);
typename SubMap::SimpleRetT ret;
FOR_EACH_RANGE(i, 0, nextMapIdx) {
FOR_EACH_RANGE (i, 0, nextMapIdx) {
// insert in each map successively. If one succeeds, we're done!
SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
ret = subMap->template insertInternal<LookupKeyT,
......@@ -248,7 +248,7 @@ AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
}
const unsigned int numMaps =
numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 1, numMaps) {
FOR_EACH_RANGE (i, 1, numMaps) {
// Check each map successively. If one succeeds, we're done!
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
ret = thisMap->template findInternal<LookupKeyT,
......@@ -308,7 +308,7 @@ AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
erase(const KeyT k) {
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
FOR_EACH_RANGE (i, 0, numMaps) {
// Check each map successively. If one succeeds, we're done!
if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
return 1;
......@@ -332,7 +332,7 @@ size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
capacity() const {
size_t totalCap(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
FOR_EACH_RANGE (i, 0, numMaps) {
totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
}
return totalCap;
......@@ -353,7 +353,7 @@ size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
spaceRemaining() const {
size_t spaceRem(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
FOR_EACH_RANGE (i, 0, numMaps) {
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
spaceRem += std::max(
0,
......@@ -379,7 +379,7 @@ clear() {
subMaps_[0].load(std::memory_order_relaxed)->clear();
int const numMaps = numMapsAllocated_
.load(std::memory_order_relaxed);
FOR_EACH_RANGE(i, 1, numMaps) {
FOR_EACH_RANGE (i, 1, numMaps) {
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
DCHECK(thisMap);
SubMap::destroy(thisMap);
......@@ -402,7 +402,7 @@ size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
size() const {
size_t totalSize(0);
int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 0, numMaps) {
FOR_EACH_RANGE (i, 0, numMaps) {
totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
}
return totalSize;
......
......@@ -308,7 +308,7 @@ BENCHMARK(ForEachKVMacro, iters) {
BENCHMARK_SUSPEND { setupBenchmark(iters); }
FOR_EACH_KV(k, v, bmMap) {
FOR_EACH_KV (k, v, bmMap) {
sumKeys += k;
sumValues += v;
}
......@@ -324,7 +324,7 @@ BENCHMARK(ForEachManual, iters) {
BENCHMARK(ForEachRange, iters) {
int sum = 1;
FOR_EACH_RANGE(i, 1, iters) { sum *= i; }
FOR_EACH_RANGE (i, 1, iters) { sum *= i; }
doNotOptimizeAway(sum);
}
......@@ -338,7 +338,7 @@ BENCHMARK(ForEachDescendingManual, iters) {
BENCHMARK(ForEachRangeR, iters) {
int sum = 1;
FOR_EACH_RANGE_R(i, 1U, iters) { sum *= i; }
FOR_EACH_RANGE_R (i, 1U, iters) { sum *= i; }
doNotOptimizeAway(sum);
}
......
......@@ -327,7 +327,7 @@ TEST(Foreach, ForEachRvalue) {
++n;
}
EXPECT_EQ(strlen(hello), n);
FOR_EACH_R(it, std::string(hello)) {
FOR_EACH_R (it, std::string(hello)) {
--n;
EXPECT_EQ(hello[n], *it);
}
......@@ -404,7 +404,7 @@ TEST(Foreach, ForEachEnumerate) {
int sumAA = 0;
int sumIter = 0;
int numIterations = 0;
FOR_EACH_ENUMERATE(aa, iter, vv) {
FOR_EACH_ENUMERATE (aa, iter, vv) {
sumAA += aa;
sumIter += *iter;
++numIterations;
......@@ -416,7 +416,7 @@ TEST(Foreach, ForEachEnumerate) {
vv.push_back(1);
vv.push_back(3);
vv.push_back(5);
FOR_EACH_ENUMERATE(aa, iter, vv) {
FOR_EACH_ENUMERATE (aa, iter, vv) {
sumAA += aa;
sumIter += *iter;
++numIterations;
......@@ -435,7 +435,7 @@ TEST(Foreach, ForEachEnumerateBreak) {
vv.push_back(2);
vv.push_back(4);
vv.push_back(8);
FOR_EACH_ENUMERATE(aa, iter, vv) {
FOR_EACH_ENUMERATE (aa, iter, vv) {
sumAA += aa;
sumIter += *iter;
++numIterations;
......
......@@ -196,7 +196,7 @@ BENCHMARK_DRAW_LINE();
void StringUnsplit_Gen(size_t iters, size_t joinSize) {
std::vector<fbstring> v;
BENCHMARK_SUSPEND {
FOR_EACH_RANGE(i, 0, joinSize) {
FOR_EACH_RANGE (i, 0, joinSize) {
v.push_back(to<fbstring>(rand()));
}
}
......
......@@ -308,12 +308,12 @@ TEST(Ahm, counter) {
const int mult = 10;
Counters c(numKeys);
vector<int64_t> keys;
FOR_EACH_RANGE(i, 1, numKeys) {
FOR_EACH_RANGE (i, 1, numKeys) {
keys.push_back(i);
}
vector<std::thread> threads;
for (auto key : keys) {
FOR_EACH_RANGE(i, 0, key * mult) {
FOR_EACH_RANGE (i, 0, key * mult) {
threads.push_back(std::thread([&, key] { c.increment(key); }));
}
}
......
......@@ -305,14 +305,14 @@ static StringPiece pc1 = "1234567890123456789";
void handwrittenAtoiMeasure(unsigned int n, unsigned int digits) {
auto p = pc1.subpiece(pc1.size() - digits, digits);
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
doNotOptimizeAway(handwrittenAtoi(p.begin(), p.end()));
}
}
void follyAtoiMeasure(unsigned int n, unsigned int digits) {
auto p = pc1.subpiece(pc1.size() - digits, digits);
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
doNotOptimizeAway(folly::to<int64_t>(p.begin(), p.end()));
}
}
......@@ -320,13 +320,13 @@ void follyAtoiMeasure(unsigned int n, unsigned int digits) {
void clibAtoiMeasure(unsigned int n, unsigned int digits) {
auto p = pc1.subpiece(pc1.size() - digits, digits);
assert(*p.end() == 0);
FOR_EACH_RANGE(i, 0, n) { doNotOptimizeAway(atoll(p.begin())); }
FOR_EACH_RANGE (i, 0, n) { doNotOptimizeAway(atoll(p.begin())); }
}
void lexicalCastMeasure(unsigned int n, unsigned int digits) {
auto p = pc1.subpiece(pc1.size() - digits, digits);
assert(*p.end() == 0);
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
doNotOptimizeAway(boost::lexical_cast<uint64_t>(p.begin()));
}
}
......@@ -369,7 +369,7 @@ unsigned u64ToAsciiTable(uint64_t value, char* dst) {
void u64ToAsciiTableBM(unsigned int n, size_t index) {
checkArrayIndex(uint64Num, index);
char buf[20];
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
doNotOptimizeAway(u64ToAsciiTable(uint64Num[index] + (i % 8), buf));
}
}
......@@ -399,7 +399,7 @@ unsigned u64ToAsciiClassic(uint64_t value, char* dst) {
void u64ToAsciiClassicBM(unsigned int n, size_t index) {
checkArrayIndex(uint64Num, index);
char buf[20];
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
doNotOptimizeAway(u64ToAsciiClassic(uint64Num[index] + (i % 8), buf));
}
}
......@@ -407,7 +407,7 @@ void u64ToAsciiClassicBM(unsigned int n, size_t index) {
void u64ToAsciiFollyBM(unsigned int n, size_t index) {
checkArrayIndex(uint64Num, index);
char buf[20];
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
doNotOptimizeAway(uint64ToBufferUnsafe(uint64Num[index] + (i % 8), buf));
}
}
......@@ -452,7 +452,7 @@ void i64ToStringFollyMeasureNeg(unsigned int n, size_t index) {
void u2aAppendClassicBM(unsigned int n, size_t index) {
checkArrayIndex(uint64Num, index);
string s;
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
// auto buf = &s.back() + 1;
char buffer[20];
s.append(buffer, u64ToAsciiClassic(uint64Num[index] + (i % 8), buffer));
......@@ -463,7 +463,7 @@ void u2aAppendClassicBM(unsigned int n, size_t index) {
void u2aAppendFollyBM(unsigned int n, size_t index) {
checkArrayIndex(uint64Num, index);
string s;
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
// auto buf = &s.back() + 1;
char buffer[20];
s.append(buffer, uint64ToBufferUnsafe(uint64Num[index] + (i % 8), buffer));
......@@ -477,7 +477,7 @@ struct StringIdenticalToBM {
void operator()(unsigned int n, size_t len) const {
String s;
BENCHMARK_SUSPEND { s.append(len, '0'); }
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
String result = to<String>(s);
doNotOptimizeAway(result.size());
}
......@@ -490,7 +490,7 @@ struct StringVariadicToBM {
void operator()(unsigned int n, size_t len) const {
String s;
BENCHMARK_SUSPEND { s.append(len, '0'); }
FOR_EACH_RANGE(i, 0, n) {
FOR_EACH_RANGE (i, 0, n) {
String result = to<String>(s, nullptr);
doNotOptimizeAway(result.size());
}
......
......@@ -258,7 +258,7 @@ void BENCHFUN(getline)(size_t iters, size_t arg) {
BENCHMARK_SUSPEND {
string line;
FOR_EACH_RANGE(i, 0, 512) {
FOR_EACH_RANGE (i, 0, 512) {
randomString(&line, arg);
lines += line;
lines += '\n';
......
......@@ -37,7 +37,7 @@ BENCHMARK(minstdrand, n) {
braces.dismiss();
FOR_EACH_RANGE(i, 0, n) { doNotOptimizeAway(rng()); }
FOR_EACH_RANGE (i, 0, n) { doNotOptimizeAway(rng()); }
}
BENCHMARK(mt19937, n) {
......@@ -47,7 +47,7 @@ BENCHMARK(mt19937, n) {
braces.dismiss();
FOR_EACH_RANGE(i, 0, n) { doNotOptimizeAway(rng()); }
FOR_EACH_RANGE (i, 0, n) { doNotOptimizeAway(rng()); }
}
#if FOLLY_HAVE_EXTRANDOM_SFMT19937
......@@ -58,7 +58,7 @@ BENCHMARK(sfmt19937, n) {
braces.dismiss();
FOR_EACH_RANGE(i, 0, n) { doNotOptimizeAway(rng()); }
FOR_EACH_RANGE (i, 0, n) { doNotOptimizeAway(rng()); }
}
#endif
......@@ -69,7 +69,7 @@ BENCHMARK(threadprng, n) {
braces.dismiss();
FOR_EACH_RANGE(i, 0, n) { doNotOptimizeAway(tprng()); }
FOR_EACH_RANGE (i, 0, n) { doNotOptimizeAway(tprng()); }
}
BENCHMARK(RandomDouble) { doNotOptimizeAway(Random::randDouble01()); }
......
......@@ -224,7 +224,7 @@ TEST(ThreadCachedInt, MultiThreadedCached) {
std::vector<std::thread> threads;
for (int i = 0; i < FLAGS_numThreads; ++i) {
threads.push_back(std::thread([&] {
FOR_EACH_RANGE(k, 0, numPerThread) {
FOR_EACH_RANGE (k, 0, numPerThread) {
++TCInt64;
}
std::atomic_fetch_add(&threadsDone, 1);
......@@ -310,7 +310,7 @@ struct ShardedAtomicInt {
int64_t readFast() {
int64_t ret = 0;
static const int numToRead = 8;
FOR_EACH_RANGE(i, 0, numToRead) {
FOR_EACH_RANGE (i, 0, numToRead) {
ret += ints_[i].load(std::memory_order_relaxed);
}
return ret * (kBuckets_ / numToRead);
......
......@@ -289,7 +289,7 @@ TEST(ThreadLocal, InterleavedDestructors) {
++thIter;
}
});
FOR_EACH_RANGE(i, 0, wVersionMax) {
FOR_EACH_RANGE (i, 0, wVersionMax) {
int thIterPrev = 0;
{
std::lock_guard<std::mutex> g(lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment