Commit 85d4e767 authored by Dan Melnic's avatar Dan Melnic Committed by Facebook GitHub Bot

Add callbacks for sized free and corresponding allocations

Summary:
Add callbacks for sized free and corresponding allocations

(Note: this ignores all push blocking failures!)

Reviewed By: simpkins

Differential Revision: D28469569

fbshipit-source-id: 7d0d4c649c3494354ae8ec7cbda3e950eb3c80ed
parent 93b54d70
......@@ -36,6 +36,30 @@
#include <folly/memory/Malloc.h>
#include <folly/memory/SanitizeAddress.h>
/*
* Callbacks that will be invoked when IOBuf allocates or frees memory.
* Note that io_buf_alloc_cb() will also be invoked when IOBuf takes ownership
* of a malloc-allocated buffer, even if it was allocated earlier by another
* part of the code.
*
* By default these are unimplemented, but programs can define these functions
* to perform their own custom logic on memory allocation. This is intended
* primarily to help programs track memory usage and possibly take action
* when thresholds are hit. Callers should generally avoid performing any
* expensive work in these callbacks, since they may be called from arbitrary
* locations in the code that use IOBuf, possibly while holding locks.
*/
#if FOLLY_HAVE_WEAK_SYMBOLS
FOLLY_ATTR_WEAK void io_buf_alloc_cb(void* /*ptr*/, size_t /*size*/) noexcept;
FOLLY_ATTR_WEAK void io_buf_free_cb(void* /*ptr*/, size_t /*size*/) noexcept;
#else
static void (*io_buf_alloc_cb)(void* /*ptr*/, size_t /*size*/) noexcept =
nullptr;
static void (*io_buf_free_cb)(void* /*ptr*/, size_t /*size*/) noexcept =
nullptr;
#endif
using std::unique_ptr;
namespace {
......@@ -167,6 +191,11 @@ void* IOBuf::operator new(size_t size) {
auto storage = static_cast<HeapStorage*>(checkedMalloc(fullSize));
new (&storage->prefix) HeapPrefix(kIOBufInUse, fullSize);
if (io_buf_alloc_cb) {
io_buf_alloc_cb(storage, fullSize);
}
return &(storage->buf);
}
......@@ -203,6 +232,9 @@ void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) noexcept {
// The storage space is now unused. Free it.
storage->prefix.HeapPrefix::~HeapPrefix();
if (FOLLY_LIKELY(size)) {
if (io_buf_free_cb) {
io_buf_free_cb(storage, size);
}
sizedFree(storage, size);
} else {
free(storage);
......@@ -305,6 +337,10 @@ unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) {
new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse, mallocSize);
new (&storage->shared) SharedInfo(freeInternalBuf, storage);
if (io_buf_alloc_cb) {
io_buf_alloc_cb(storage, mallocSize);
}
auto bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
auto actualCapacity = size_t(storageEnd - bufAddr);
......@@ -387,6 +423,10 @@ IOBuf::IOBuf(
});
setSharedInfo(new SharedInfo(freeFn, userData));
rollback.dismiss();
if (io_buf_alloc_cb && !freeFn && capacity) {
io_buf_alloc_cb(buf, capacity);
}
}
unique_ptr<IOBuf> IOBuf::takeOwnership(
......@@ -433,6 +473,16 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(
rollback.dismiss();
if (io_buf_alloc_cb) {
io_buf_alloc_cb(storage, mallocSize);
if (userData && !freeFn) {
// Even though we did not allocate the buffer, call io_buf_alloc_cb()
// since we will call io_buf_free_cb() on destruction, and we want these
// calls to be 1:1.
io_buf_alloc_cb(buf, capacity);
}
}
return result;
}
......@@ -984,10 +1034,16 @@ void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
void* p = buf_;
if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
if (io_buf_free_cb) {
io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData));
}
newBuffer = static_cast<uint8_t*>(p);
newHeadroom = oldHeadroom;
// update the userData
info->userData = reinterpret_cast<void*>(newAllocatedCapacity);
if (io_buf_alloc_cb) {
io_buf_alloc_cb(newBuffer, newAllocatedCapacity);
}
}
// if xallocx failed, do nothing, fall back to malloc/memcpy/free
}
......@@ -1056,6 +1112,9 @@ void IOBuf::freeExtBuffer() noexcept {
// this will invoke free if info->userData is 0
size_t size = reinterpret_cast<size_t>(info->userData);
if (size) {
if (io_buf_free_cb) {
io_buf_free_cb(buf_, size);
}
folly::sizedFree(buf_, size);
} else {
free(buf_);
......@@ -1081,6 +1140,9 @@ void IOBuf::allocExtBuffer(
// the userData and the freeFn are nullptr here
// just store the mallocSize in userData
(*infoReturn)->userData = reinterpret_cast<void*>(mallocSize);
if (io_buf_alloc_cb) {
io_buf_alloc_cb(buf, mallocSize);
}
*bufReturn = buf;
}
......@@ -1155,6 +1217,11 @@ fbstring IOBuf::moveToFbString() {
capacity(),
AcquireMallocatedString());
if (io_buf_free_cb && sharedInfo() && sharedInfo()->userData) {
io_buf_free_cb(
writableData(), reinterpret_cast<size_t>(sharedInfo()->userData));
}
SharedInfo::invokeAndDeleteEachObserver(
observerListHead, [](auto& entry) { entry.afterReleaseExtBuffer(); });
......
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/io/IOBuf.h>
#include <folly/portability/GTest.h>
std::atomic<size_t> gIOBufAlloc{};
void io_buf_alloc_cb(void* /*ptr*/, size_t size) noexcept {
gIOBufAlloc += size;
}
void io_buf_free_cb(void* /*ptr*/, size_t size) noexcept {
gIOBufAlloc -= size;
}
namespace {
struct IOBufTestInfo {
std::unique_ptr<folly::IOBuf> ioBuf;
size_t delta{0};
void reset() { ioBuf.reset(); }
};
} // namespace
TEST(IOBufCB, AllocSizes) {
std::vector<IOBufTestInfo> testInfoVec;
size_t initialSize = gIOBufAlloc.load();
size_t prevSize = initialSize;
auto allocAndAppend = [&](size_t size) {
auto buf = folly::IOBuf::create(size);
size_t newSize = gIOBufAlloc.load();
CHECK_GT(newSize, prevSize);
size_t delta = newSize - prevSize;
if (delta) {
CHECK_GE(delta, size);
}
prevSize = newSize;
IOBufTestInfo info;
info.ioBuf = std::move(buf);
info.delta = delta;
testInfoVec.emplace_back(std::move(info));
};
// try with smaller allocations
for (size_t i = 0; i < 1234; i++) {
allocAndAppend(i);
}
// try with large allocations that will require an external buffer
for (size_t i = 1; i < 100; i++) {
allocAndAppend(i * 1024);
}
// deallocate
for (size_t i = 0; i < testInfoVec.size(); i++) {
testInfoVec[i].reset();
}
CHECK_EQ(initialSize, gIOBufAlloc.load());
}
TEST(IOBufCB, TakeOwnership) {
size_t initialSize = gIOBufAlloc.load();
static constexpr size_t kAllocSize = 1024;
{
auto buf = folly::IOBuf::takeOwnership(malloc(kAllocSize), kAllocSize);
size_t newSize = gIOBufAlloc.load();
CHECK_GE(newSize, initialSize + kAllocSize);
}
CHECK_EQ(initialSize, gIOBufAlloc.load());
{
folly::IOBuf buf(
folly::IOBuf::TAKE_OWNERSHIP, malloc(kAllocSize), kAllocSize);
size_t newSize = gIOBufAlloc.load();
CHECK_GE(newSize, initialSize + kAllocSize);
}
CHECK_EQ(initialSize, gIOBufAlloc.load());
}
TEST(IOBufCB, MoveToFbString) {
size_t initialSize = gIOBufAlloc.load();
static constexpr size_t kAllocSize = 1024;
LOG(INFO) << gIOBufAlloc.load();
{
auto buf = folly::IOBuf::takeOwnership(malloc(kAllocSize), kAllocSize);
LOG(INFO) << gIOBufAlloc.load();
size_t newSize = gIOBufAlloc.load();
CHECK_GE(newSize, initialSize + kAllocSize);
auto str = buf->moveToFbString();
LOG(INFO) << gIOBufAlloc.load();
CHECK_LT(gIOBufAlloc.load(), newSize);
}
LOG(INFO) << gIOBufAlloc.load();
CHECK_EQ(initialSize, gIOBufAlloc.load());
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment