Commit 91a2c0c0 authored by Maged Michael's avatar Maged Michael Committed by Facebook GitHub Bot

RequestContext: Remove read-lock-based implementation

Summary: Remove implementation based on read locking (replaced by implementation based on hazard pointers).

Reviewed By: yfeldblum

Differential Revision: D20881677

fbshipit-source-id: da89eee6a92fa34fa1c111a421bf5368873ccd57
parent 1be2277a
This diff is collapsed.
......@@ -18,7 +18,6 @@
#include <folly/Synchronized.h>
#include <folly/container/F14Map.h>
#include <folly/portability/GFlags.h>
#include <folly/sorted_vector_types.h>
#include <folly/synchronization/Hazptr.h>
......@@ -27,8 +26,6 @@
#include <mutex>
#include <string>
DECLARE_bool(folly_reqctx_use_hazptr);
namespace folly {
/*
......@@ -72,18 +69,6 @@ struct hash<folly::RequestToken> {
namespace folly {
// - A runtime flag GFLAGS_reqctx_use_hazptr determines the
// implementation of RequestContext.
// - The flag false implementation uses sequential data structures
// protected by a read-write lock.
// - The flag true implementation uses single-writer multi-readers
// data structures protected by hazard pointers for readers and a
// lock for writers.
// - Each RequestContext instances contains a bool member useHazptr_
// (readable by a public member function useHazptr()) that indicates
// the implementation of the instance depending on the value of the
// GFLAG at instance construction time..
// Some request context that follows an async request through a process
// Everything in the context must be thread safe
......@@ -120,7 +105,6 @@ class RequestData {
}
private:
// Start shallow copy implementation details:
// For efficiency, RequestContext provides a raw ptr interface.
// To support shallow copy, we need a shared ptr.
// To keep it as safe as possible (even if a raw ptr is passed back),
......@@ -131,8 +115,8 @@ class RequestData {
static constexpr int kDeleteCount = 0x1;
static constexpr int kClearCount = 0x1000;
// Reference-counting functions used by the hazptr-based implementation.
// Increment the reference count
// Reference-counting functions.
// Increment the reference count.
void acquireRef();
// Decrement the reference count. Clear only if last.
void releaseRefClearOnly();
......@@ -142,27 +126,7 @@ class RequestData {
void releaseRefClearDelete();
void releaseRefClearDeleteSlow();
// Unique ptr with custom destructor, decrement the counter
// and only free if 0
struct DestructPtr {
void operator()(RequestData* ptr);
};
struct SharedPtr : public std::unique_ptr<RequestData, DestructPtr> {
SharedPtr() = default;
using std::unique_ptr<RequestData, DestructPtr>::unique_ptr;
SharedPtr(const SharedPtr& other) : SharedPtr(constructPtr(other.get())) {}
SharedPtr& operator=(const SharedPtr& other) {
return operator=(constructPtr(other.get()));
}
SharedPtr(SharedPtr&&) = default;
SharedPtr& operator=(SharedPtr&&) = default;
};
// Initialize the pseudo-shared ptr, increment the counter
static SharedPtr constructPtr(RequestData* ptr);
std::atomic<int> keepAliveCounter_{0};
// End shallow copy
};
// If you do not call create() to create a unique request context,
......@@ -260,11 +224,6 @@ class RequestContext {
void onSet();
void onUnset();
// useHazptr
FOLLY_ALWAYS_INLINE bool useHazptr() const {
return useHazptr_;
}
// The following API is used to pass the context through queues / threads.
// saveContext is called to get a shared_ptr to the context, and
// setContext is used to reset it on the other side of the queue.
......@@ -297,11 +256,7 @@ class RequestContext {
private:
static StaticContext& getStaticContext();
static std::shared_ptr<RequestContext> setContextLock(
std::shared_ptr<RequestContext>& newCtx,
StaticContext& staticCtx);
static std::shared_ptr<RequestContext> setContextHazptr(
static std::shared_ptr<RequestContext> setContextHelper(
std::shared_ptr<RequestContext>& newCtx,
StaticContext& staticCtx);
......@@ -313,30 +268,18 @@ class RequestContext {
// then return the previous context (so it can be reset later).
static std::shared_ptr<RequestContext> setShallowCopyContext();
// Similar to setContextData, except it overwrites the data
// if already set (instead of warn + reset ptr).
void overwriteContextDataLock(
const RequestToken& token,
std::unique_ptr<RequestData> data);
void overwriteContextDataLock(
const std::string& val,
std::unique_ptr<RequestData> data) {
overwriteContextDataLock(RequestToken(val), std::move(data));
}
// End shallow copy guard
// For functions with a parameter safe, if safe is true then the
// caller guarantees that there are no concurrent readers or writers
// accessing the structure.
void overwriteContextDataHazptr(
void overwriteContextData(
const RequestToken& token,
std::unique_ptr<RequestData> data,
bool safe = false);
void overwriteContextDataHazptr(
void overwriteContextData(
const std::string& val,
std::unique_ptr<RequestData> data,
bool safe = false) {
overwriteContextDataHazptr(RequestToken(val), std::move(data), safe);
overwriteContextData(RequestToken(val), std::move(data), safe);
}
enum class DoSetBehaviour {
......@@ -345,47 +288,23 @@ class RequestContext {
OVERWRITE,
};
bool doSetContextDataLock(
const RequestToken& token,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour);
bool doSetContextDataLock(
const std::string& val,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour) {
return doSetContextDataLock(RequestToken(val), data, behaviour);
}
bool doSetContextDataHazptr(
bool doSetContextDataHelper(
const RequestToken& token,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour,
bool safe = false);
bool doSetContextDataHazptr(
bool doSetContextDataHelper(
const std::string& val,
std::unique_ptr<RequestData>& data,
DoSetBehaviour behaviour,
bool safe = false) {
return doSetContextDataHazptr(RequestToken(val), data, behaviour, safe);
return doSetContextDataHelper(RequestToken(val), data, behaviour, safe);
}
// State immplementation with sequential data structures protected by a
// read-write locks.
struct State {
// This must be optimized for lookup, its hot path is getContextData
// Efficiency of copying the container also matters in setShallowCopyContext
F14FastMap<RequestToken, RequestData::SharedPtr> requestData_;
// This must be optimized for iteration, its hot path is setContext
// We also use the fact that it's ordered to efficiently compute
// the difference with previous context
sorted_vector_set<RequestData*> callbackData_;
};
folly::Synchronized<State> state_;
// State implementation with single-writer multi-reader data
// structures protected by hazard pointers for readers and a lock
// for writers.
struct StateHazptr {
struct State {
// Hazard pointer-protected combined structure for request data
// and callbacks.
struct Combined;
......@@ -393,12 +312,12 @@ class RequestContext {
std::atomic<Combined*> combined_{nullptr};
std::mutex mutex_;
StateHazptr();
StateHazptr(const StateHazptr& o);
StateHazptr(StateHazptr&&) = delete;
StateHazptr& operator=(const StateHazptr&) = delete;
StateHazptr& operator=(StateHazptr&&) = delete;
~StateHazptr();
State();
State(const State& o);
State(State&&) = delete;
State& operator=(const State&) = delete;
State& operator=(State&&) = delete;
~State();
private:
friend class RequestContext;
......@@ -439,9 +358,8 @@ class RequestContext {
const RequestToken& token,
std::unique_ptr<RequestData>& data,
bool found);
}; // StateHazptr
StateHazptr stateHazptr_;
bool useHazptr_;
}; // State
State state_;
// Shallow copies keep a note of the root context
intptr_t rootId_;
};
......@@ -499,23 +417,13 @@ struct ShallowCopyRequestContextScopeGuard {
const RequestToken& token,
std::unique_ptr<RequestData> data)
: ShallowCopyRequestContextScopeGuard() {
auto ctx = RequestContext::get();
if (ctx->useHazptr()) {
ctx->overwriteContextDataHazptr(token, std::move(data), true);
} else {
ctx->overwriteContextDataLock(token, std::move(data));
}
RequestContext::get()->overwriteContextData(token, std::move(data), true);
}
ShallowCopyRequestContextScopeGuard(
const std::string& val,
std::unique_ptr<RequestData> data)
: ShallowCopyRequestContextScopeGuard() {
auto ctx = RequestContext::get();
if (ctx->useHazptr()) {
ctx->overwriteContextDataHazptr(val, std::move(data), true);
} else {
ctx->overwriteContextDataLock(val, std::move(data));
}
RequestContext::get()->overwriteContextData(val, std::move(data), true);
}
~ShallowCopyRequestContextScopeGuard() {
......
......@@ -300,8 +300,8 @@ onSet 12 ns 12 ns 0 ns 12 ns
onUnset 12 ns 12 ns 0 ns 12 ns
setContext 46 ns 44 ns 1 ns 42 ns
RequestContextScopeGuard 113 ns 103 ns 3 ns 101 ns
ShallowCopyRequestC...-replace 230 ns 221 ns 4 ns 217 ns
ShallowCopyReq...-keep&replace 904 ns 893 ns 5 ns 886 ns
ShallowCopyRequestC...-replace 213 ns 201 ns 5 ns 196 ns
ShallowCopyReq...-keep&replace 883 ns 835 ns 20 ns 814 ns
============================== 10 threads ==============================
hasContextData 1 ns 1 ns 0 ns 1 ns
getContextData 2 ns 1 ns 0 ns 1 ns
......@@ -309,7 +309,7 @@ onSet 2 ns 2 ns 0 ns 1 ns
onUnset 2 ns 2 ns 0 ns 1 ns
setContext 11 ns 7 ns 2 ns 5 ns
RequestContextScopeGuard 22 ns 15 ns 5 ns 11 ns
ShallowCopyRequestC...-replace 51 ns 32 ns 11 ns 24 ns
ShallowCopyReq...-keep&replace 102 ns 98 ns 2 ns 96 ns
ShallowCopyRequestC...-replace 48 ns 30 ns 11 ns 21 ns
ShallowCopyReq...-keep&replace 98 ns 93 ns 2 ns 91 ns
========================================================================
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment