Commit 65105e81 authored by Yedidya Feldblum's avatar Yedidya Feldblum Committed by Facebook Github Bot

Switch away from using the legacy Synchronized macros

Summary: [Folly] Switch away from using the legacy `Synchronized` macros within Folly.

Reviewed By: simpkins

Differential Revision: D7118324

fbshipit-source-id: 328ecdd572d84bb99a28ddb8689bdc4ae08421a6
parent 2d40a641
......@@ -119,7 +119,7 @@ void throwHandler(void*, std::type_info* exType, void (*)(void*)) noexcept {
auto exceptionId =
folly::hash::SpookyHashV2::Hash64(frames, (n + 1) * sizeof(frames[0]), 0);
SYNCHRONIZED(holder, gExceptionStats->statsHolder) {
gExceptionStats->statsHolder.withWLock([&](auto& holder) {
auto it = holder.find(exceptionId);
if (it != holder.end()) {
++it->second.count;
......@@ -129,7 +129,7 @@ void throwHandler(void*, std::type_info* exType, void (*)(void*)) noexcept {
info.frames.assign(frames + 1, frames + 1 + n);
holder.emplace(exceptionId, ExceptionStats{1, std::move(info)});
}
}
});
}
struct Initializer {
......
......@@ -47,18 +47,15 @@ template <typename Function>
class CallbackHolder {
public:
void registerCallback(Function f) {
SYNCHRONIZED(callbacks_) {
callbacks_.push_back(std::move(f));
}
callbacks_.wlock()->push_back(std::move(f));
}
// always inline to enforce kInternalFramesNumber
template <typename... Args>
FOLLY_ALWAYS_INLINE void invoke(Args... args) {
SYNCHRONIZED_CONST(callbacks_) {
for (auto& cb : callbacks_) {
cb(args...);
}
auto callbacksLock = callbacks_.rlock();
for (auto& cb : *callbacksLock) {
cb(args...);
}
}
......
......@@ -99,14 +99,14 @@ class ThreadLocalCache {
static void erase(EventBaseT& evb) {
for (auto& localInstance : instance().accessAllThreads()) {
SYNCHRONIZED(info, localInstance.eraseInfo_) {
localInstance.eraseInfo_.withWLock([&](auto& info) {
if (info.eraseList.size() >= kEraseListMaxSize) {
info.eraseAll = true;
} else {
info.eraseList.push_back(&evb);
}
localInstance.eraseRequested_ = true;
}
});
}
}
......@@ -143,7 +143,7 @@ class ThreadLocalCache {
return;
}
SYNCHRONIZED(info, eraseInfo_) {
eraseInfo_.withWLock([&](auto& info) {
if (info.eraseAll) {
map_.clear();
} else {
......@@ -155,7 +155,7 @@ class ThreadLocalCache {
info.eraseList.clear();
info.eraseAll = false;
eraseRequested_ = false;
}
});
}
std::unordered_map<EventBaseT*, FiberManager*> map_;
......
......@@ -90,9 +90,7 @@ class StackCache {
auto p = freeList_.back().first;
if (!freeList_.back().second) {
PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
SYNCHRONIZED(pages, protectedPages()) {
pages.insert(reinterpret_cast<intptr_t>(p));
}
protectedPages().wlock()->insert(reinterpret_cast<intptr_t>(p));
}
freeList_.pop_back();
......@@ -132,25 +130,25 @@ class StackCache {
~StackCache() {
assert(storage_);
SYNCHRONIZED(pages, protectedPages()) {
protectedPages().withWLock([&](auto& pages) {
for (const auto& item : freeList_) {
pages.erase(reinterpret_cast<intptr_t>(item.first));
}
}
});
PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
}
static bool isProtected(intptr_t addr) {
// Use a read lock for reading.
SYNCHRONIZED_CONST(pages, protectedPages()) {
return protectedPages().withRLock([&](auto const& pages) {
for (const auto& page : pages) {
intptr_t pageEnd = intptr_t(page + pagesize());
if (page <= addr && addr < pageEnd) {
return true;
}
}
}
return false;
return false;
});
}
private:
......
......@@ -20,23 +20,24 @@ namespace fibers {
bool Semaphore::signalSlow() {
// If we signalled a release, notify the waitlist
SYNCHRONIZED(waitList_) {
auto testVal = tokens_.load(std::memory_order_acquire);
if (testVal != 0) {
return false;
}
auto waitListLock = waitList_.wlock();
auto& waitList = *waitListLock;
if (waitList_.empty()) {
// If the waitlist is now empty, ensure the token count increments
// No need for CAS here as we will always be under the mutex
CHECK(tokens_.compare_exchange_strong(
testVal, testVal + 1, std::memory_order_relaxed));
} else {
// trigger waiter if there is one
waitList_.front()->post();
waitList_.pop();
}
} // SYNCHRONIZED(waitList_)
auto testVal = tokens_.load(std::memory_order_acquire);
if (testVal != 0) {
return false;
}
if (waitList.empty()) {
// If the waitlist is now empty, ensure the token count increments
// No need for CAS here as we will always be under the mutex
CHECK(tokens_.compare_exchange_strong(
testVal, testVal + 1, std::memory_order_relaxed));
} else {
// trigger waiter if there is one
waitList.front()->post();
waitList.pop();
}
return true;
}
......@@ -59,13 +60,16 @@ bool Semaphore::waitSlow() {
// Slow path, create a baton and acquire a mutex to update the wait list
folly::fibers::Baton waitBaton;
SYNCHRONIZED(waitList_) {
{
auto waitListLock = waitList_.wlock();
auto& waitList = *waitListLock;
auto testVal = tokens_.load(std::memory_order_acquire);
if (testVal != 0) {
return false;
}
// prepare baton and add to queue
waitList_.push(&waitBaton);
waitList.push(&waitBaton);
}
// If we managed to create a baton, wait on it
// This has to be done here so the mutex has been released
......
......@@ -43,17 +43,13 @@ void EventBaseLocalBase::erase(EventBase& evb) {
evb.localStorage_.erase(key_);
evb.localStorageToDtor_.erase(this);
SYNCHRONIZED(eventBases_) {
eventBases_.erase(&evb);
}
eventBases_.wlock()->erase(&evb);
}
void EventBaseLocalBase::onEventBaseDestruction(EventBase& evb) {
evb.dcheckIsInEventBaseThread();
SYNCHRONIZED(eventBases_) {
eventBases_.erase(&evb);
}
eventBases_.wlock()->erase(&evb);
}
void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment