Commit 3e4e50d5 authored by Nathan Bronson's avatar Nathan Bronson Committed by facebook-github-bot-0

clang-format some code in preparation for real changes

Reviewed By: djwatson

Differential Revision: D2945770

fb-gh-sync-id: 9e4ee3b052b0bbb33ef796b8070edd24c6942589
shipit-source-id: 9e4ee3b052b0bbb33ef796b8070edd24c6942589
parent 13de7740
......@@ -28,7 +28,8 @@
#include <folly/Format.h>
#include <folly/ScopeGuard.h>
namespace folly { namespace detail {
namespace folly {
namespace detail {
///////////// CacheLocality
......@@ -84,11 +85,11 @@ const CacheLocality& CacheLocality::system<std::atomic>() {
/// '\n', or eos.
static size_t parseLeadingNumber(const std::string& line) {
auto raw = line.c_str();
char *end;
char* end;
unsigned long val = strtoul(raw, &end, 10);
if (end == raw || (*end != ',' && *end != '-' && *end != '\n' && *end != 0)) {
throw std::runtime_error(to<std::string>(
"error parsing list '", line, "'").c_str());
throw std::runtime_error(
to<std::string>("error parsing list '", line, "'").c_str());
}
return val;
}
......@@ -107,9 +108,10 @@ CacheLocality CacheLocality::readFromSysfsTree(
while (true) {
auto cpu = cpus.size();
std::vector<size_t> levels;
for (size_t index = 0; ; ++index) {
auto dir = format("/sys/devices/system/cpu/cpu{}/cache/index{}/",
cpu, index).str();
for (size_t index = 0;; ++index) {
auto dir =
format("/sys/devices/system/cpu/cpu{}/cache/index{}/", cpu, index)
.str();
auto cacheType = mapping(dir + "type");
auto equivStr = mapping(dir + "shared_cpu_list");
if (cacheType.size() == 0 || equivStr.size() == 0) {
......@@ -146,22 +148,26 @@ CacheLocality CacheLocality::readFromSysfsTree(
throw std::runtime_error("unable to load cache sharing info");
}
std::sort(cpus.begin(), cpus.end(), [&](size_t lhs, size_t rhs) -> bool {
// sort first by equiv class of cache with highest index, direction
// doesn't matter. If different cpus have different numbers of
// caches then this code might produce a sub-optimal ordering, but
// it won't crash
auto& lhsEquiv = equivClassesByCpu[lhs];
auto& rhsEquiv = equivClassesByCpu[rhs];
for (int i = std::min(lhsEquiv.size(), rhsEquiv.size()) - 1; i >= 0; --i) {
if (lhsEquiv[i] != rhsEquiv[i]) {
return lhsEquiv[i] < rhsEquiv[i];
}
}
// break ties deterministically by cpu
return lhs < rhs;
});
std::sort(cpus.begin(),
cpus.end(),
[&](size_t lhs, size_t rhs) -> bool {
// sort first by equiv class of cache with highest index,
// direction doesn't matter. If different cpus have
// different numbers of caches then this code might produce
// a sub-optimal ordering, but it won't crash
auto& lhsEquiv = equivClassesByCpu[lhs];
auto& rhsEquiv = equivClassesByCpu[rhs];
for (int i = std::min(lhsEquiv.size(), rhsEquiv.size()) - 1;
i >= 0;
--i) {
if (lhsEquiv[i] != rhsEquiv[i]) {
return lhsEquiv[i] < rhsEquiv[i];
}
}
// break ties deterministically by cpu
return lhs < rhs;
});
// the cpus are now sorted by locality, with neighboring entries closer
// to each other than entries that are far away. For striping we want
......@@ -172,7 +178,7 @@ CacheLocality CacheLocality::readFromSysfsTree(
}
return CacheLocality{
cpus.size(), std::move(numCachesByLevel), std::move(indexes) };
cpus.size(), std::move(numCachesByLevel), std::move(indexes)};
}
CacheLocality CacheLocality::readFromSysfs() {
......@@ -184,7 +190,6 @@ CacheLocality CacheLocality::readFromSysfs() {
});
}
CacheLocality CacheLocality::uniform(size_t numCpus) {
CacheLocality rv;
......@@ -235,28 +240,26 @@ Getcpu::Func Getcpu::vdsoFunc() {
#ifdef FOLLY_TLS
/////////////// SequentialThreadId
template<>
template <>
std::atomic<size_t> SequentialThreadId<std::atomic>::prevId(0);
template<>
template <>
FOLLY_TLS size_t SequentialThreadId<std::atomic>::currentId(0);
#endif
/////////////// AccessSpreader
template<>
const AccessSpreader<std::atomic>
AccessSpreader<std::atomic>::stripeByCore(
template <>
const AccessSpreader<std::atomic> AccessSpreader<std::atomic>::stripeByCore(
CacheLocality::system<>().numCachesByLevel.front());
template<>
const AccessSpreader<std::atomic>
AccessSpreader<std::atomic>::stripeByChip(
template <>
const AccessSpreader<std::atomic> AccessSpreader<std::atomic>::stripeByChip(
CacheLocality::system<>().numCachesByLevel.back());
template<>
AccessSpreaderArray<std::atomic,128>
AccessSpreaderArray<std::atomic,128>::sharedInstance = {};
template <>
AccessSpreaderArray<std::atomic, 128>
AccessSpreaderArray<std::atomic, 128>::sharedInstance = {};
/// Always claims to be on CPU zero, node zero
static int degenerateGetcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
......@@ -269,7 +272,7 @@ static int degenerateGetcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
return 0;
}
template<>
template <>
Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc(size_t numStripes) {
if (numStripes == 1) {
// there's no need to call getcpu if there is only one stripe.
......@@ -282,5 +285,5 @@ Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc(size_t numStripes) {
return best ? best : &FallbackGetcpuType::getcpu;
}
}
} } // namespace folly::detail
}
} // namespace folly::detail
......@@ -30,7 +30,8 @@
#include <folly/Likely.h>
#include <folly/Portability.h>
namespace folly { namespace detail {
namespace folly {
namespace detail {
// This file contains several classes that might be useful if you are
// trying to dynamically optimize cache locality: CacheLocality reads
......@@ -73,7 +74,6 @@ struct CacheLocality {
/// cache and cpus with a locality index >= 16 will share the other.
std::vector<size_t> localityIndexByCpu;
/// Returns the best CacheLocality information available for the current
/// system, cached for fast access. This will be loaded from sysfs if
/// possible, otherwise it will be correct in the number of CPUs but
......@@ -88,10 +88,9 @@ struct CacheLocality {
/// that transitively uses it, all components select between the default
/// sysfs implementation and a deterministic implementation by keying
/// off the type of the underlying atomic. See DeterministicScheduler.
template <template<typename> class Atom = std::atomic>
template <template <typename> class Atom = std::atomic>
static const CacheLocality& system();
/// Reads CacheLocality information from a tree structured like
/// the sysfs filesystem. The provided function will be evaluated
/// for each sysfs file that needs to be queried. The function
......@@ -121,7 +120,8 @@ struct CacheLocality {
kFalseSharingRange = 128
};
static_assert(kFalseSharingRange == 128,
static_assert(
kFalseSharingRange == 128,
"FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
};
......@@ -143,7 +143,7 @@ struct Getcpu {
};
#ifdef FOLLY_TLS
template <template<typename> class Atom>
template <template <typename> class Atom>
struct SequentialThreadId {
/// Returns the thread id assigned to the current thread
......@@ -197,7 +197,7 @@ typedef FallbackGetcpu<SequentialThreadId<std::atomic>> FallbackGetcpuType;
typedef FallbackGetcpu<HashingThreadId> FallbackGetcpuType;
#endif
template <template<typename> class Atom, size_t kMaxCpus>
template <template <typename> class Atom, size_t kMaxCpus>
struct AccessSpreaderArray;
/// AccessSpreader arranges access to a striped data structure in such a
......@@ -239,7 +239,7 @@ struct AccessSpreaderArray;
/// testing. See DeterministicScheduler for more. If you aren't using
/// DeterministicScheduler, you can just use the default template parameter
/// all of the time.
template <template<typename> class Atom = std::atomic>
template <template <typename> class Atom = std::atomic>
struct AccessSpreader {
/// Returns a never-destructed shared AccessSpreader instance.
......@@ -249,8 +249,8 @@ struct AccessSpreader {
assert(numStripes > 0);
// the last shared element handles all large sizes
return AccessSpreaderArray<Atom,kMaxCpus>::sharedInstance[
std::min(size_t(kMaxCpus), numStripes)];
return AccessSpreaderArray<Atom, kMaxCpus>::sharedInstance[std::min(
size_t(kMaxCpus), numStripes)];
}
/// Returns the stripe associated with the current CPU, assuming
......@@ -271,19 +271,18 @@ struct AccessSpreader {
/// to see its width, or stripeByChip.current() to get the current stripe
static const AccessSpreader stripeByChip;
/// Constructs an AccessSpreader that will return values from
/// 0 to numStripes-1 (inclusive), precomputing the mapping
/// from CPU to stripe. There is no use in having more than
/// CacheLocality::system<Atom>().localityIndexByCpu.size() stripes or
/// kMaxCpus stripes
explicit AccessSpreader(size_t spreaderNumStripes,
const CacheLocality& cacheLocality =
CacheLocality::system<Atom>(),
Getcpu::Func getcpuFunc = nullptr)
: getcpuFunc_(getcpuFunc ? getcpuFunc : pickGetcpuFunc(spreaderNumStripes))
, numStripes_(spreaderNumStripes)
{
explicit AccessSpreader(
size_t spreaderNumStripes,
const CacheLocality& cacheLocality = CacheLocality::system<Atom>(),
Getcpu::Func getcpuFunc = nullptr)
: getcpuFunc_(getcpuFunc ? getcpuFunc
: pickGetcpuFunc(spreaderNumStripes)),
numStripes_(spreaderNumStripes) {
auto n = cacheLocality.numCpus;
for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
auto index = cacheLocality.localityIndexByCpu[cpu];
......@@ -300,9 +299,7 @@ struct AccessSpreader {
/// Returns 1 more than the maximum value that can be returned from
/// current()
size_t numStripes() const {
return numStripes_;
}
size_t numStripes() const { return numStripes_; }
/// Returns the stripe associated with the current CPU
size_t current() const {
......@@ -312,7 +309,6 @@ struct AccessSpreader {
}
private:
/// If there are more cpus than this nothing will crash, but there
/// might be unnecessary sharing
enum { kMaxCpus = 128 };
......@@ -320,10 +316,9 @@ struct AccessSpreader {
typedef uint8_t CompactStripe;
static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
"kMaxCpus should be a power of two so modulo is fast");
"kMaxCpus should be a power of two so modulo is fast");
static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
"stripeByCpu element type isn't wide enough");
"stripeByCpu element type isn't wide enough");
/// Points to the getcpu-like function we are using to obtain the
/// current cpu. It should not be assumed that the returned cpu value
......@@ -343,13 +338,12 @@ struct AccessSpreader {
static Getcpu::Func pickGetcpuFunc(size_t numStripes);
};
template<>
template <>
Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc(size_t);
/// An array of kMaxCpus+1 AccessSpreader<Atom> instances constructed
/// with default params, with the zero-th element having 1 stripe
template <template<typename> class Atom, size_t kMaxStripe>
template <template <typename> class Atom, size_t kMaxStripe>
struct AccessSpreaderArray {
AccessSpreaderArray() {
......@@ -365,18 +359,16 @@ struct AccessSpreaderArray {
}
}
AccessSpreader<Atom> const& operator[] (size_t index) const {
AccessSpreader<Atom> const& operator[](size_t index) const {
return *static_cast<AccessSpreader<Atom> const*>(
static_cast<void const*>(raw + index));
static_cast<void const*>(raw + index));
}
private:
// AccessSpreader uses sharedInstance
friend AccessSpreader<Atom>;
static AccessSpreaderArray<Atom,kMaxStripe> sharedInstance;
static AccessSpreaderArray<Atom, kMaxStripe> sharedInstance;
/// aligned_storage is uninitialized, we use placement new since there
/// is no AccessSpreader default constructor
......@@ -384,7 +376,7 @@ struct AccessSpreaderArray {
CacheLocality::kFalseSharingRange>::type
raw[kMaxStripe + 1];
};
} }
}
}
#endif /* FOLLY_DETAIL_CacheLocality_H_ */
This diff is collapsed.
......@@ -91,9 +91,10 @@ struct UniformSubset {
void adjustPermSize(size_t numActive) {
if (perm_.size() > numActive) {
perm_.erase(std::remove_if(perm_.begin(), perm_.end(), [=](size_t x) {
return x >= numActive;
}), perm_.end());
perm_.erase(std::remove_if(perm_.begin(),
perm_.end(),
[=](size_t x) { return x >= numActive; }),
perm_.end());
} else {
while (perm_.size() < numActive) {
perm_.push_back(perm_.size());
......@@ -297,18 +298,18 @@ FutexResult Futex<DeterministicAtomic>::futexWaitImpl(
char const* resultStr = "?";
switch (result) {
case FutexResult::AWOKEN:
resultStr = "AWOKEN";
break;
case FutexResult::TIMEDOUT:
resultStr = "TIMEDOUT";
break;
case FutexResult::INTERRUPTED:
resultStr = "INTERRUPTED";
break;
case FutexResult::VALUE_CHANGED:
resultStr = "VALUE_CHANGED";
break;
case FutexResult::AWOKEN:
resultStr = "AWOKEN";
break;
case FutexResult::TIMEDOUT:
resultStr = "TIMEDOUT";
break;
case FutexResult::INTERRUPTED:
resultStr = "INTERRUPTED";
break;
case FutexResult::VALUE_CHANGED:
resultStr = "VALUE_CHANGED";
break;
}
FOLLY_TEST_DSCHED_VLOG(this << ".futexWait(" << std::hex << expected
<< ", .., " << std::hex << waitMask << ") -> "
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment