Commit 2aa61de1 authored by Anton Likhtarov's avatar Anton Likhtarov Committed by Sara Golemon

Add a guard page to a limited number of stacks

Summary:
GuardedPageAllocator now maintains a process-wide cache
of preallocated stacks with guard pages.  We can't create too many of these, since
each stack has the overhead of two memory mappings.  Once we run out of preallocated stacks
we simply fallback on the default allocator.

Test Plan:
unit tests

perflab
TAO unit tests

Reviewed By: pavlo@fb.com

Subscribers: zhuohuang, trunkagent, sameen, folly-diffs@, yfeldblum, chalfant

FB internal diff: D2129510

Tasks: 7319041

Signature: t1:2129510:1433542031:324942af3c9813344c1b331ee2c9b66c4bfa3f03
parent db57efd0
......@@ -91,7 +91,6 @@ nobase_follyinclude_HEADERS = \
experimental/fibers/ForEach-inl.h \
experimental/fibers/GenericBaton.h \
experimental/fibers/GuardPageAllocator.h \
experimental/fibers/GuardPageAllocator-inl.h \
experimental/fibers/LoopController.h \
experimental/fibers/Promise.h \
experimental/fibers/Promise-inl.h \
......@@ -400,6 +399,7 @@ libfolly_la_SOURCES = \
experimental/fibers/Fiber.cpp \
experimental/fibers/FiberManager.cpp \
experimental/fibers/FiberManagerMap.cpp \
experimental/fibers/GuardPageAllocator.cpp \
experimental/fibers/TimeoutController.cpp \
experimental/FunctionScheduler.cpp \
experimental/io/FsUtil.cpp \
......
......@@ -32,11 +32,8 @@
#include <folly/experimental/ExecutionObserver.h>
#include <folly/experimental/fibers/BoostContextCompatibility.h>
#include <folly/experimental/fibers/Fiber.h>
#include <folly/experimental/fibers/traits.h>
#ifdef USE_GUARD_ALLOCATOR
#include <folly/experimental/fibers/GuardPageAllocator.h>
#endif
#include <folly/experimental/fibers/traits.h>
namespace folly { namespace fibers {
......@@ -317,13 +314,7 @@ class FiberManager : public ::folly::Executor {
* Allocator used to allocate stack for Fibers in the pool.
* Allocates stack on the stack of the main context.
*/
#ifdef USE_GUARD_ALLOCATOR
/* This is too slow for production use; can be fixed
if we allocated all stack storage once upfront */
GuardPageAllocator stackAllocator_;
#else
std::allocator<unsigned char> stackAllocator_;
#endif
const Options options_; /**< FiberManager options */
......
/*
* Copyright 2015 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/mman.h>
#include <unistd.h>
#include <glog/logging.h>
namespace folly { namespace fibers {
namespace {
size_t pagesize() {
static const size_t pagesize = sysconf(_SC_PAGESIZE);
return pagesize;
}
/* Returns a multiple of pagesize() enough to store size + one guard page */
size_t allocSize(size_t size) {
return pagesize() * ((size + pagesize() - 1)/pagesize() + 1);
}
}
unsigned char* GuardPageAllocator::allocate(size_t size) {
/* We allocate minimum number of pages required, plus a guard page.
Since we use this for stack storage, requested allocation is aligned
at the top of the allocated pages, while the guard page is at the bottom.
-- increasing addresses -->
Guard page Normal pages
|xxxxxxxxxx|..........|..........|
<- size -------->
return value -^
*/
void* p = nullptr;
PCHECK(!::posix_memalign(&p, pagesize(), allocSize(size)));
/* Try to protect first page
(stack grows downwards from last allocated address), ignore errors */
::mprotect(p, pagesize(), PROT_NONE);
/* Return pointer to top 'size' bytes in allocated storage */
auto up = reinterpret_cast<unsigned char*>(p) + allocSize(size) - size;
assert(up >= reinterpret_cast<unsigned char*>(p) + pagesize());
return up;
}
void GuardPageAllocator::deallocate(unsigned char* up, size_t size) {
/* Get allocation base */
auto p = up + size - allocSize(size);
/* Try to unprotect the page for memory allocator to re-use,
ignore errors (in cases we failed to protect in the first place */
::mprotect(p, pagesize(), PROT_READ|PROT_WRITE);
::free(p);
}
}} // folly::fibers
/*
* Copyright 2015 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GuardPageAllocator.h"
#include <sys/mman.h>
#include <unistd.h>
#include <mutex>
#include <folly/Singleton.h>
#include <folly/SpinLock.h>
#include <glog/logging.h>
namespace folly { namespace fibers {
/**
* Each stack with a guard page creates two memory mappings.
* Since this is a limited resource, we don't want to create too many of these.
*
* The upper bound on total number of mappings created
* is kNumGuarded * kMaxInUse.
*/
/**
* Number of guarded stacks per allocator instance
*/
constexpr size_t kNumGuarded = 100;
/**
* Maximum number of allocator instances with guarded stacks enabled
*/
constexpr size_t kMaxInUse = 100;
/**
* A cache for kNumGuarded stacks of a given size
*/
class StackCache {
public:
explicit StackCache(size_t stackSize)
: allocSize_(allocSize(stackSize)) {
auto p = ::mmap(nullptr, allocSize_ * kNumGuarded,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
PCHECK(p != (void*)(-1));
storage_ = reinterpret_cast<unsigned char*>(p);
/* Protect the bottommost page of every stack allocation */
for (size_t i = 0; i < kNumGuarded; ++i) {
auto allocBegin = storage_ + allocSize_ * i;
freeList_.push_back(allocBegin);
PCHECK(0 == ::mprotect(allocBegin, pagesize(), PROT_NONE));
}
}
unsigned char* borrow(size_t size) {
std::lock_guard<folly::SpinLock> lg(lock_);
assert(storage_);
auto as = allocSize(size);
if (as != allocSize_ || freeList_.empty()) {
return nullptr;
}
auto p = freeList_.back();
freeList_.pop_back();
/* We allocate minimum number of pages required, plus a guard page.
Since we use this for stack storage, requested allocation is aligned
at the top of the allocated pages, while the guard page is at the bottom.
-- increasing addresses -->
Guard page Normal pages
|xxxxxxxxxx|..........|..........|
<- allocSize_ ------------------->
p -^ <- size -------->
limit -^
*/
auto limit = p + allocSize_ - size;
assert(limit >= p + pagesize());
return limit;
}
bool giveBack(unsigned char* limit, size_t size) {
std::lock_guard<folly::SpinLock> lg(lock_);
assert(storage_);
auto as = allocSize(size);
auto p = limit + size - as;
if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
/* not mine */
return false;
}
assert(as == allocSize_);
assert((p - storage_) % allocSize_ == 0);
freeList_.push_back(p);
return true;
}
~StackCache() {
assert(storage_);
PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
}
private:
folly::SpinLock lock_;
unsigned char* storage_{nullptr};
size_t allocSize_{0};
/**
* LIFO free list
*/
std::vector<unsigned char*> freeList_;
static size_t pagesize() {
static const size_t pagesize = sysconf(_SC_PAGESIZE);
return pagesize;
}
/* Returns a multiple of pagesize() enough to store size + one guard page */
static size_t allocSize(size_t size) {
return pagesize() * ((size + pagesize() - 1)/pagesize() + 1);
}
};
class CacheManager {
public:
static CacheManager& instance() {
static auto inst = new CacheManager();
return *inst;
}
std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
std::lock_guard<folly::SpinLock> lg(lock_);
if (inUse_ < kMaxInUse) {
++inUse_;
return folly::make_unique<StackCacheEntry>(stackSize);
}
return nullptr;
}
private:
folly::SpinLock lock_;
size_t inUse_{0};
friend class StackCacheEntry;
void giveBack(std::unique_ptr<StackCache> stackCache_) {
assert(inUse_ > 0);
--inUse_;
/* Note: we can add a free list for each size bucket
if stack re-use is important.
In this case this needs to be a folly::Singleton
to make sure the free list is cleaned up on fork.
TODO(t7351705): fix Singleton destruction order
*/
}
};
class StackCacheEntry {
public:
explicit StackCacheEntry(size_t stackSize)
: stackCache_(folly::make_unique<StackCache>(stackSize)) {
}
StackCache& cache() const noexcept {
return *stackCache_;
}
~StackCacheEntry() {
CacheManager::instance().giveBack(std::move(stackCache_));
}
private:
std::unique_ptr<StackCache> stackCache_;
};
GuardPageAllocator::GuardPageAllocator() = default;
GuardPageAllocator::~GuardPageAllocator() = default;
unsigned char* GuardPageAllocator::allocate(size_t size) {
if (!stackCache_) {
stackCache_ = CacheManager::instance().getStackCache(size);
}
if (stackCache_) {
auto p = stackCache_->cache().borrow(size);
if (p != nullptr) {
return p;
}
}
return fallbackAllocator_.allocate(size);
}
void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
fallbackAllocator_.deallocate(limit, size);
}
}
}} // folly::fibers
......@@ -15,18 +15,36 @@
*/
#pragma once
#include <memory>
namespace folly { namespace fibers {
class StackCacheEntry;
/**
* Stack allocator that protects an extra memory page after
* the end of the stack.
* Will only add extra memory pages up to a certain number of allocations
* to avoid creating too many memory maps for the process.
*/
class GuardPageAllocator {
public:
inline unsigned char* allocate(size_t size);
inline void deallocate(unsigned char* up, size_t size);
GuardPageAllocator();
~GuardPageAllocator();
/**
* @return pointer to the bottom of the allocated stack of `size' bytes.
*/
unsigned char* allocate(size_t size);
/**
* Deallocates the previous result of an `allocate(size)' call.
*/
void deallocate(unsigned char* limit, size_t size);
private:
std::unique_ptr<StackCacheEntry> stackCache_;
std::allocator<unsigned char> fallbackAllocator_;
};
}} // folly::fibers
#include "GuardPageAllocator-inl.h"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment