Commit 88afc741 authored by Giuseppe Ottaviano's avatar Giuseppe Ottaviano Committed by Facebook GitHub Bot

Use granular locking in SmartExceptionTracer

Summary:
`throwCallback()` holds a read lock on the global map while unwinding the stack trace, blocking all other threads from deleting (and thus throwing) exceptions, since the deleter needs a wlock.

Switch to per-`ExceptionMeta` locking (on top of the map's locks), so we can hold the global lock only for the time to update the map, and move all unwinding, allocation, deallocation, and callbacks outside of it.

Reviewed By: luciang

Differential Revision: D31905278

fbshipit-source-id: f46228dd815bfde844165dd7aee92d6c023ff049
parent b28c0ba2
...@@ -16,9 +16,11 @@ ...@@ -16,9 +16,11 @@
#include <folly/experimental/exception_tracer/SmartExceptionTracer.h> #include <folly/experimental/exception_tracer/SmartExceptionTracer.h>
#include <glog/logging.h>
#include <folly/MapUtil.h> #include <folly/MapUtil.h>
#include <folly/ScopeGuard.h> #include <folly/ScopeGuard.h>
#include <folly/Synchronized.h> #include <folly/Synchronized.h>
#include <folly/container/F14Map.h>
#include <folly/experimental/exception_tracer/ExceptionTracerLib.h> #include <folly/experimental/exception_tracer/ExceptionTracerLib.h>
#include <folly/experimental/exception_tracer/StackTrace.h> #include <folly/experimental/exception_tracer/StackTrace.h>
#include <folly/experimental/symbolizer/Symbolizer.h> #include <folly/experimental/symbolizer/Symbolizer.h>
...@@ -39,24 +41,28 @@ struct ExceptionMeta { ...@@ -39,24 +41,28 @@ struct ExceptionMeta {
StackTrace traceAsync; StackTrace traceAsync;
}; };
Synchronized<std::unordered_map<void*, ExceptionMeta>>& getMeta() { using SynchronizedExceptionMeta = folly::Synchronized<ExceptionMeta>;
auto& getMetaMap() {
// Leaky Meyers Singleton // Leaky Meyers Singleton
static Indestructible<Synchronized<std::unordered_map<void*, ExceptionMeta>>> static Indestructible<Synchronized<
folly::F14FastMap<void*, std::unique_ptr<SynchronizedExceptionMeta>>>>
meta; meta;
return *meta; return *meta;
} }
void metaDeleter(void* ex) noexcept { void metaDeleter(void* ex) noexcept {
auto deleter = getMeta().withWLock([&](auto& locked) noexcept { auto syncMeta = getMetaMap().withWLock([ex](auto& locked) noexcept {
auto iter = locked.find(ex); auto iter = locked.find(ex);
auto* innerDeleter = iter->second.deleter; auto ret = std::move(iter->second);
locked.erase(iter); locked.erase(iter);
return innerDeleter; return ret;
}); });
// If the thrown object was allocated statically it may not have a deleter. // If the thrown object was allocated statically it may not have a deleter.
if (deleter) { auto meta = syncMeta->wlock();
deleter(ex); if (meta->deleter) {
meta->deleter(ex);
} }
} }
...@@ -74,27 +80,33 @@ void throwCallback( ...@@ -74,27 +80,33 @@ void throwCallback(
SCOPE_EXIT { handlingThrow = false; }; SCOPE_EXIT { handlingThrow = false; };
handlingThrow = true; handlingThrow = true;
getMeta().withWLockPtr([&](auto wlock) noexcept { // This can allocate memory potentially causing problems in an OOM
// This can allocate memory potentially causing problems in an OOM // situation so we catch and short circuit.
// situation so we catch and short circuit. try {
try { auto newMeta = std::make_unique<SynchronizedExceptionMeta>();
auto& meta = (*wlock)[ex]; newMeta->withWLock([&deleter](auto& lockedMeta) {
auto rlock = wlock.moveFromWriteToRead();
// Override the deleter with our custom one and capture the old one. // Override the deleter with our custom one and capture the old one.
meta.deleter = std::exchange(*deleter, metaDeleter); lockedMeta.deleter = std::exchange(*deleter, metaDeleter);
ssize_t n = symbolizer::getStackTrace(meta.trace.addresses, kMaxFrames); ssize_t n =
symbolizer::getStackTrace(lockedMeta.trace.addresses, kMaxFrames);
if (n != -1) { if (n != -1) {
meta.trace.frameCount = n; lockedMeta.trace.frameCount = n;
} }
ssize_t nAsync = symbolizer::getAsyncStackTraceSafe( ssize_t nAsync = symbolizer::getAsyncStackTraceSafe(
meta.traceAsync.addresses, kMaxFrames); lockedMeta.traceAsync.addresses, kMaxFrames);
if (nAsync != -1) { if (nAsync != -1) {
meta.traceAsync.frameCount = nAsync; lockedMeta.traceAsync.frameCount = nAsync;
} }
} catch (const std::bad_alloc&) { });
}
}); auto oldMeta = getMetaMap().withWLock([ex, &newMeta](auto& wlock) {
return std::exchange(wlock[ex], std::move(newMeta));
});
DCHECK(oldMeta == nullptr);
} catch (const std::bad_alloc&) {
}
} }
struct Initialize { struct Initialize {
...@@ -111,16 +123,26 @@ ExceptionInfo getTraceWithFunc( ...@@ -111,16 +123,26 @@ ExceptionInfo getTraceWithFunc(
const std::exception& ex, ExceptionMetaFunc func) { const std::exception& ex, ExceptionMetaFunc func) {
ExceptionInfo info; ExceptionInfo info;
info.type = &typeid(ex); info.type = &typeid(ex);
getMeta().withRLock([&](auto& locked) noexcept { auto rlockedMeta = getMetaMap().withRLock(
auto* meta = get_ptr(locked, (void*)&ex); [&](const auto& locked) noexcept
// If we can't find the exception, return an empty stack trace. -> SynchronizedExceptionMeta::RLockedPtr {
if (!meta) { auto* meta = get_ptr(locked, (void*)&ex);
return; // If we can't find the exception, return an empty stack trace.
} if (!meta) {
return {};
auto [traceBeginIt, traceEndIt] = func(*meta); }
info.frames.assign(traceBeginIt, traceEndIt); CHECK(*meta);
}); // Acquire the meta rlock while holding the map's rlock, to block meta's
// destruction.
return (*meta)->rlock();
});
if (!rlockedMeta) {
return info;
}
auto [traceBeginIt, traceEndIt] = func(*rlockedMeta);
info.frames.assign(traceBeginIt, traceEndIt);
return info; return info;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment