Commit c3ca39e2 authored by Keith Adams's avatar Keith Adams Committed by Alecs King

std::atomicize MicroSpinLock.

Summary:
A colleague at another company started making fun of
MicroSpinLock for its x86 assembly and ad hoc compiler memory
barriers. Use C++11 (which wasn't really a thing at the time I wrote
this).

Test Plan: folly's runtests. What else would we like?

Reviewed By: andrei.alexandrescu@fb.com

Subscribers: folly-diffs@, yfeldblum

FB internal diff: D1841563

Signature: t1:1841563:1423780458:a447c081fbd72e3420b23e95dcf26575c9a06798
parent c2c47da2
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <cstdlib> #include <cstdlib>
#include <pthread.h> #include <pthread.h>
#include <mutex> #include <mutex>
#include <atomic>
#include <glog/logging.h> #include <glog/logging.h>
#include <folly/Portability.h> #include <folly/Portability.h>
...@@ -102,29 +103,13 @@ namespace detail { ...@@ -102,29 +103,13 @@ namespace detail {
*/ */
struct MicroSpinLock { struct MicroSpinLock {
enum { FREE = 0, LOCKED = 1 }; enum { FREE = 0, LOCKED = 1 };
// lock_ can't be std::atomic<> to preserve POD-ness.
uint8_t lock_; uint8_t lock_;
/*
* Atomically move lock_ from "compare" to "newval". Return boolean
* success. Do not play on or around.
*/
bool cas(uint8_t compare, uint8_t newVal) {
bool out;
bool memVal; // only set if the cmpxchg fails
asm volatile("lock; cmpxchgb %[newVal], (%[lockPtr]);"
"setz %[output];"
: [output] "=r" (out), "=a" (memVal)
: "a" (compare), // cmpxchgb constrains this to be in %al
[newVal] "q" (newVal), // Needs to be byte-accessible
[lockPtr] "r" (&lock_)
: "memory", "flags");
return out;
}
// Initialize this MSL. It is unnecessary to call this if you // Initialize this MSL. It is unnecessary to call this if you
// zero-initialize the MicroSpinLock. // zero-initialize the MicroSpinLock.
void init() { void init() {
lock_ = FREE; payload()->store(FREE);
} }
bool try_lock() { bool try_lock() {
...@@ -134,18 +119,27 @@ struct MicroSpinLock { ...@@ -134,18 +119,27 @@ struct MicroSpinLock {
void lock() { void lock() {
detail::Sleeper sleeper; detail::Sleeper sleeper;
do { do {
while (lock_ != FREE) { while (payload()->load() != FREE) {
asm volatile("" : : : "memory");
sleeper.wait(); sleeper.wait();
} }
} while (!try_lock()); } while (!try_lock());
DCHECK(lock_ == LOCKED); DCHECK(payload()->load() == LOCKED);
} }
void unlock() { void unlock() {
CHECK(lock_ == LOCKED); CHECK(payload()->load() == LOCKED);
asm volatile("" : : : "memory"); payload()->store(FREE, std::memory_order_release);
lock_ = FREE; // release barrier on x86 }
private:
std::atomic<uint8_t>* payload() {
return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
}
bool cas(uint8_t compare, uint8_t newVal) {
return std::atomic_compare_exchange_strong_explicit(payload(), &compare, newVal,
std::memory_order_acquire,
std::memory_order_relaxed);
} }
}; };
......
...@@ -48,10 +48,10 @@ struct LockedVal { ...@@ -48,10 +48,10 @@ struct LockedVal {
// these classes are POD). // these classes are POD).
FOLLY_PACK_PUSH FOLLY_PACK_PUSH
struct ignore1 { MicroSpinLock msl; int16_t foo; } FOLLY_PACK_ATTR; struct ignore1 { MicroSpinLock msl; int16_t foo; } FOLLY_PACK_ATTR;
struct ignore2 { PicoSpinLock<uint32_t> psl; int16_t foo; } struct ignore2 { PicoSpinLock<uint32_t> psl; int16_t foo; } FOLLY_PACK_ATTR;
FOLLY_PACK_ATTR;
static_assert(sizeof(ignore1) == 3, "Size check failed"); static_assert(sizeof(ignore1) == 3, "Size check failed");
static_assert(sizeof(ignore2) == 6, "Size check failed"); static_assert(sizeof(ignore2) == 6, "Size check failed");
static_assert(sizeof(MicroSpinLock) == 1, "Size check failed");
FOLLY_PACK_POP FOLLY_PACK_POP
LockedVal v; LockedVal v;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment