Commit 09e27c74 authored by Dylan Yudaken's avatar Dylan Yudaken Committed by Facebook GitHub Bot

remove unneeded spinlock

Summary: This spin lock is not required, and guards quite a complex region that could easily fallback to the slow path

Reviewed By: andriigrynenko

Differential Revision: D22839268

fbshipit-source-id: 045a3562a2d4358b4c95e155a4b9958b2b7f67d4
parent 737ccb70
......@@ -276,25 +276,25 @@ class CacheManager {
std::unique_ptr<StackCacheEntry> getStackCache(
size_t stackSize,
size_t guardPagesPerStack) {
std::lock_guard<folly::SpinLock> lg(lock_);
if (inUse_ < kMaxInUse) {
++inUse_;
return std::make_unique<StackCacheEntry>(stackSize, guardPagesPerStack);
}
return nullptr;
auto used = inUse_.load(std::memory_order_relaxed);
do {
if (used >= kMaxInUse) {
return nullptr;
}
} while (!inUse_.compare_exchange_weak(
used, used + 1, std::memory_order_acquire, std::memory_order_relaxed));
return std::make_unique<StackCacheEntry>(stackSize, guardPagesPerStack);
}
private:
folly::SpinLock lock_;
size_t inUse_{0};
std::atomic<size_t> inUse_{0};
friend class StackCacheEntry;
void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
std::lock_guard<folly::SpinLock> lg(lock_);
assert(inUse_ > 0);
--inUse_;
FOLLY_MAYBE_UNUSED auto wasUsed =
inUse_.fetch_sub(1, std::memory_order_release);
assert(wasUsed > 0);
/* Note: we can add a free list for each size bucket
if stack re-use is important.
In this case this needs to be a folly::Singleton
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment