Commit 1805d922 authored by Nathan Bronson's avatar Nathan Bronson Committed by Jordan DeLong

add sched_yield in RWTicketSpinLock

Summary:
This diff makes RWTicketSpinLock eventually start calling
sched_yield() during shared and aggressive exclusive lock access, to
avoid pathologies that can arise when the number of threads far
exceeds the number of actual cores.

Test Plan:
1. unit tests
2. benchmark w/o + w/ diff

Reviewed By: nathan@fb.com

FB internal diff: D524897
parent 9aa33f6a
......@@ -564,9 +564,16 @@ class RWTicketSpinLockT : boost::noncopyable {
* turns.
*/
void writeLockAggressive() {
// sched_yield() is needed here to avoid a pathology if the number
// of threads attempting concurrent writes is >= the number of real
// cores allocated to this process. This is less likely than the
// corresponding situation in lock_shared(), but we still want to
// avoid it
int count = 0;
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) {
asm volatile("pause");
if (UNLIKELY(++count > 1000)) sched_yield();
}
}
......@@ -578,6 +585,9 @@ class RWTicketSpinLockT : boost::noncopyable {
// writers, so the writer has less chance to get the lock when
// there are a lot of competing readers. The aggressive spinning
// can help to avoid starving writers.
//
// We don't worry about sched_yield() here because the caller
// has already explicitly abandoned fairness.
while (!try_lock()) {}
}
......@@ -606,8 +616,13 @@ class RWTicketSpinLockT : boost::noncopyable {
}
void lock_shared() {
// sched_yield() is important here because we can't grab the
// shared lock if there is a pending writeLockAggressive, so we
// need to let threads that already have a shared lock complete
int count = 0;
while (!LIKELY(try_lock_shared())) {
asm volatile("pause");
if (UNLIKELY((++count & 1023) == 0)) sched_yield();
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment