Commit fae10d94 authored by Nathan Bronson's avatar Nathan Bronson Committed by Facebook Github Bot

uint64_t -> size_t for 32-bit support

Summary:
On 64-bit platforms uint64_t and std::size_t are the same
size, but that is not the case on 32-bit platforms such as arm or x86.
This diff makes folly/io 32-bit clean, at least to the point that it
compiles and passes unit tests.  There is no change to behavior on
64-bit platforms.

Reviewed By: shixiao

Differential Revision: D8559702

fbshipit-source-id: c61a5d3d0e327f36cdf9a606a48a256943efc098
parent 8fbd6d60
...@@ -572,7 +572,7 @@ class CursorBase { ...@@ -572,7 +572,7 @@ class CursorBase {
DCHECK(crtBuf_ == nullptr || crtBegin_ == crtBuf_->data()); DCHECK(crtBuf_ == nullptr || crtBegin_ == crtBuf_->data());
DCHECK( DCHECK(
crtBuf_ == nullptr || crtBuf_ == nullptr ||
(uint64_t)(crtEnd_ - crtBegin_) == crtBuf_->length()); (std::size_t)(crtEnd_ - crtBegin_) == crtBuf_->length());
} }
~CursorBase() {} ~CursorBase() {}
...@@ -968,7 +968,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor; ...@@ -968,7 +968,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor;
*/ */
class Appender : public detail::Writable<Appender> { class Appender : public detail::Writable<Appender> {
public: public:
Appender(IOBuf* buf, uint64_t growth) Appender(IOBuf* buf, std::size_t growth)
: buffer_(buf), crtBuf_(buf->prev()), growth_(growth) {} : buffer_(buf), crtBuf_(buf->prev()), growth_(growth) {}
uint8_t* writableData() { uint8_t* writableData() {
...@@ -991,7 +991,7 @@ class Appender : public detail::Writable<Appender> { ...@@ -991,7 +991,7 @@ class Appender : public detail::Writable<Appender> {
* Ensure at least n contiguous bytes available to write. * Ensure at least n contiguous bytes available to write.
* Postcondition: length() >= n. * Postcondition: length() >= n.
*/ */
void ensure(uint64_t n) { void ensure(std::size_t n) {
if (LIKELY(length() >= n)) { if (LIKELY(length() >= n)) {
return; return;
} }
...@@ -1095,7 +1095,7 @@ class Appender : public detail::Writable<Appender> { ...@@ -1095,7 +1095,7 @@ class Appender : public detail::Writable<Appender> {
IOBuf* buffer_; IOBuf* buffer_;
IOBuf* crtBuf_; IOBuf* crtBuf_;
uint64_t growth_; std::size_t growth_;
}; };
class QueueAppender : public detail::Writable<QueueAppender> { class QueueAppender : public detail::Writable<QueueAppender> {
...@@ -1105,10 +1105,10 @@ class QueueAppender : public detail::Writable<QueueAppender> { ...@@ -1105,10 +1105,10 @@ class QueueAppender : public detail::Writable<QueueAppender> {
* space in the queue, we grow no more than growth bytes at once * space in the queue, we grow no more than growth bytes at once
* (unless you call ensure() with a bigger value yourself). * (unless you call ensure() with a bigger value yourself).
*/ */
QueueAppender(IOBufQueue* queue, uint64_t growth) QueueAppender(IOBufQueue* queue, std::size_t growth)
: queueCache_(queue), growth_(growth) {} : queueCache_(queue), growth_(growth) {}
void reset(IOBufQueue* queue, uint64_t growth) { void reset(IOBufQueue* queue, std::size_t growth) {
queueCache_.reset(queue); queueCache_.reset(queue);
growth_ = growth; growth_ = growth;
} }
......
...@@ -46,7 +46,7 @@ enum : uint16_t { ...@@ -46,7 +46,7 @@ enum : uint16_t {
kDataInUse = 0x02, kDataInUse = 0x02,
}; };
enum : uint64_t { enum : std::size_t {
// When create() is called for buffers less than kDefaultCombinedBufSize, // When create() is called for buffers less than kDefaultCombinedBufSize,
// we allocate a single combined memory segment for the IOBuf and the data // we allocate a single combined memory segment for the IOBuf and the data
// together. See the comments for createCombined()/createSeparate() for more // together. See the comments for createCombined()/createSeparate() for more
...@@ -190,7 +190,7 @@ void IOBuf::freeInternalBuf(void* /* buf */, void* userData) { ...@@ -190,7 +190,7 @@ void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
releaseStorage(storage, kDataInUse); releaseStorage(storage, kDataInUse);
} }
IOBuf::IOBuf(CreateOp, uint64_t capacity) IOBuf::IOBuf(CreateOp, std::size_t capacity)
: next_(this), : next_(this),
prev_(this), prev_(this),
data_(nullptr), data_(nullptr),
...@@ -205,9 +205,9 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity) ...@@ -205,9 +205,9 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity)
IOBuf::IOBuf( IOBuf::IOBuf(
CopyBufferOp /* op */, CopyBufferOp /* op */,
const void* buf, const void* buf,
uint64_t size, std::size_t size,
uint64_t headroom, std::size_t headroom,
uint64_t minTailroom) std::size_t minTailroom)
: IOBuf(CREATE, headroom + size + minTailroom) { : IOBuf(CREATE, headroom + size + minTailroom) {
advance(headroom); advance(headroom);
if (size > 0) { if (size > 0) {
...@@ -220,11 +220,11 @@ IOBuf::IOBuf( ...@@ -220,11 +220,11 @@ IOBuf::IOBuf(
IOBuf::IOBuf( IOBuf::IOBuf(
CopyBufferOp op, CopyBufferOp op,
ByteRange br, ByteRange br,
uint64_t headroom, std::size_t headroom,
uint64_t minTailroom) std::size_t minTailroom)
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) {} : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {}
unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) { unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) {
// For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
// all with a single allocation. // all with a single allocation.
// //
...@@ -240,7 +240,7 @@ unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) { ...@@ -240,7 +240,7 @@ unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
return createSeparate(capacity); return createSeparate(capacity);
} }
unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) { unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) {
// To save a memory allocation, allocate space for the IOBuf object, the // To save a memory allocation, allocate space for the IOBuf object, the
// SharedInfo struct, and the data itself all with a single call to malloc(). // SharedInfo struct, and the data itself all with a single call to malloc().
size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity; size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
...@@ -263,13 +263,13 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) { ...@@ -263,13 +263,13 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
return ret; return ret;
} }
unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) { unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) {
return std::make_unique<IOBuf>(CREATE, capacity); return std::make_unique<IOBuf>(CREATE, capacity);
} }
unique_ptr<IOBuf> IOBuf::createChain( unique_ptr<IOBuf> IOBuf::createChain(
size_t totalCapacity, size_t totalCapacity,
uint64_t maxBufCapacity) { std::size_t maxBufCapacity) {
unique_ptr<IOBuf> out = unique_ptr<IOBuf> out =
create(std::min(totalCapacity, size_t(maxBufCapacity))); create(std::min(totalCapacity, size_t(maxBufCapacity)));
size_t allocatedCapacity = out->capacity(); size_t allocatedCapacity = out->capacity();
...@@ -287,8 +287,8 @@ unique_ptr<IOBuf> IOBuf::createChain( ...@@ -287,8 +287,8 @@ unique_ptr<IOBuf> IOBuf::createChain(
IOBuf::IOBuf( IOBuf::IOBuf(
TakeOwnershipOp, TakeOwnershipOp,
void* buf, void* buf,
uint64_t capacity, std::size_t capacity,
uint64_t length, std::size_t length,
FreeFunction freeFn, FreeFunction freeFn,
void* userData, void* userData,
bool freeOnError) bool freeOnError)
...@@ -310,8 +310,8 @@ IOBuf::IOBuf( ...@@ -310,8 +310,8 @@ IOBuf::IOBuf(
unique_ptr<IOBuf> IOBuf::takeOwnership( unique_ptr<IOBuf> IOBuf::takeOwnership(
void* buf, void* buf,
uint64_t capacity, std::size_t capacity,
uint64_t length, std::size_t length,
FreeFunction freeFn, FreeFunction freeFn,
void* userData, void* userData,
bool freeOnError) { bool freeOnError) {
...@@ -333,7 +333,7 @@ unique_ptr<IOBuf> IOBuf::takeOwnership( ...@@ -333,7 +333,7 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(
} }
} }
IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity) IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity)
: IOBuf( : IOBuf(
InternalConstructor(), InternalConstructor(),
0, 0,
...@@ -347,11 +347,11 @@ IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity) ...@@ -347,11 +347,11 @@ IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
IOBuf::IOBuf(WrapBufferOp op, ByteRange br) : IOBuf(op, br.data(), br.size()) {} IOBuf::IOBuf(WrapBufferOp op, ByteRange br) : IOBuf(op, br.data(), br.size()) {}
unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) { unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) {
return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity); return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
} }
IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) { IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) {
return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity); return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
} }
...@@ -395,9 +395,9 @@ IOBuf::IOBuf( ...@@ -395,9 +395,9 @@ IOBuf::IOBuf(
InternalConstructor, InternalConstructor,
uintptr_t flagsAndSharedInfo, uintptr_t flagsAndSharedInfo,
uint8_t* buf, uint8_t* buf,
uint64_t capacity, std::size_t capacity,
uint8_t* data, uint8_t* data,
uint64_t length) std::size_t length)
: next_(this), : next_(this),
prev_(this), prev_(this),
data_(data), data_(data),
...@@ -495,8 +495,8 @@ size_t IOBuf::countChainElements() const { ...@@ -495,8 +495,8 @@ size_t IOBuf::countChainElements() const {
return numElements; return numElements;
} }
uint64_t IOBuf::computeChainDataLength() const { std::size_t IOBuf::computeChainDataLength() const {
uint64_t fullLength = length_; std::size_t fullLength = length_;
for (IOBuf* current = next_; current != this; current = current->next_) { for (IOBuf* current = next_; current != this; current = current->next_) {
fullLength += current->length_; fullLength += current->length_;
} }
...@@ -562,10 +562,10 @@ IOBuf IOBuf::cloneCoalescedAsValue() const { ...@@ -562,10 +562,10 @@ IOBuf IOBuf::cloneCoalescedAsValue() const {
return cloneOneAsValue(); return cloneOneAsValue();
} }
// Coalesce into newBuf // Coalesce into newBuf
const uint64_t newLength = computeChainDataLength(); const std::size_t newLength = computeChainDataLength();
const uint64_t newHeadroom = headroom(); const std::size_t newHeadroom = headroom();
const uint64_t newTailroom = prev()->tailroom(); const std::size_t newTailroom = prev()->tailroom();
const uint64_t newCapacity = newLength + newHeadroom + newTailroom; const std::size_t newCapacity = newLength + newHeadroom + newTailroom;
IOBuf newBuf{CREATE, newCapacity}; IOBuf newBuf{CREATE, newCapacity};
newBuf.advance(newHeadroom); newBuf.advance(newHeadroom);
...@@ -591,13 +591,13 @@ void IOBuf::unshareOneSlow() { ...@@ -591,13 +591,13 @@ void IOBuf::unshareOneSlow() {
// Allocate a new buffer for the data // Allocate a new buffer for the data
uint8_t* buf; uint8_t* buf;
SharedInfo* sharedInfo; SharedInfo* sharedInfo;
uint64_t actualCapacity; std::size_t actualCapacity;
allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity); allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
// Copy the data // Copy the data
// Maintain the same amount of headroom. Since we maintained the same // Maintain the same amount of headroom. Since we maintained the same
// minimum capacity we also maintain at least the same amount of tailroom. // minimum capacity we also maintain at least the same amount of tailroom.
uint64_t headlen = headroom(); std::size_t headlen = headroom();
if (length_ > 0) { if (length_ > 0) {
assert(data_ != nullptr); assert(data_ != nullptr);
memcpy(buf + headlen, data_, length_); memcpy(buf + headlen, data_, length_);
...@@ -664,7 +664,7 @@ void IOBuf::coalesceSlow() { ...@@ -664,7 +664,7 @@ void IOBuf::coalesceSlow() {
DCHECK(isChained()); DCHECK(isChained());
// Compute the length of the entire chain // Compute the length of the entire chain
uint64_t newLength = 0; std::size_t newLength = 0;
IOBuf* end = this; IOBuf* end = this;
do { do {
newLength += end->length_; newLength += end->length_;
...@@ -683,7 +683,7 @@ void IOBuf::coalesceSlow(size_t maxLength) { ...@@ -683,7 +683,7 @@ void IOBuf::coalesceSlow(size_t maxLength) {
DCHECK_LT(length_, maxLength); DCHECK_LT(length_, maxLength);
// Compute the length of the entire chain // Compute the length of the entire chain
uint64_t newLength = 0; std::size_t newLength = 0;
IOBuf* end = this; IOBuf* end = this;
while (true) { while (true) {
newLength += end->length_; newLength += end->length_;
...@@ -708,14 +708,14 @@ void IOBuf::coalesceAndReallocate( ...@@ -708,14 +708,14 @@ void IOBuf::coalesceAndReallocate(
size_t newLength, size_t newLength,
IOBuf* end, IOBuf* end,
size_t newTailroom) { size_t newTailroom) {
uint64_t newCapacity = newLength + newHeadroom + newTailroom; std::size_t newCapacity = newLength + newHeadroom + newTailroom;
// Allocate space for the coalesced buffer. // Allocate space for the coalesced buffer.
// We always convert to an external buffer, even if we happened to be an // We always convert to an external buffer, even if we happened to be an
// internal buffer before. // internal buffer before.
uint8_t* newBuf; uint8_t* newBuf;
SharedInfo* newInfo; SharedInfo* newInfo;
uint64_t actualCapacity; std::size_t actualCapacity;
allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity); allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
// Copy the data into the new buffer // Copy the data into the new buffer
...@@ -789,7 +789,7 @@ void IOBuf::decrementRefcount() { ...@@ -789,7 +789,7 @@ void IOBuf::decrementRefcount() {
} }
} }
void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom; size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
DCHECK_LT(newCapacity, UINT32_MAX); DCHECK_LT(newCapacity, UINT32_MAX);
...@@ -821,8 +821,8 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { ...@@ -821,8 +821,8 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
size_t newAllocatedCapacity = 0; size_t newAllocatedCapacity = 0;
uint8_t* newBuffer = nullptr; uint8_t* newBuffer = nullptr;
uint64_t newHeadroom = 0; std::size_t newHeadroom = 0;
uint64_t oldHeadroom = headroom(); std::size_t oldHeadroom = headroom();
// If we have a buffer allocated with malloc and we just need more tailroom, // If we have a buffer allocated with malloc and we just need more tailroom,
// try to use realloc()/xallocx() to grow the buffer in place. // try to use realloc()/xallocx() to grow the buffer in place.
...@@ -880,7 +880,7 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { ...@@ -880,7 +880,7 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
newHeadroom = minHeadroom; newHeadroom = minHeadroom;
} }
uint64_t cap; std::size_t cap;
initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
if (flags() & kFlagFreeSharedInfo) { if (flags() & kFlagFreeSharedInfo) {
...@@ -913,10 +913,10 @@ void IOBuf::freeExtBuffer() { ...@@ -913,10 +913,10 @@ void IOBuf::freeExtBuffer() {
} }
void IOBuf::allocExtBuffer( void IOBuf::allocExtBuffer(
uint64_t minCapacity, std::size_t minCapacity,
uint8_t** bufReturn, uint8_t** bufReturn,
SharedInfo** infoReturn, SharedInfo** infoReturn,
uint64_t* capacityReturn) { std::size_t* capacityReturn) {
size_t mallocSize = goodExtBufferSize(minCapacity); size_t mallocSize = goodExtBufferSize(minCapacity);
uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize)); uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
if (UNLIKELY(buf == nullptr)) { if (UNLIKELY(buf == nullptr)) {
...@@ -926,7 +926,7 @@ void IOBuf::allocExtBuffer( ...@@ -926,7 +926,7 @@ void IOBuf::allocExtBuffer(
*bufReturn = buf; *bufReturn = buf;
} }
size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) { size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) {
// Determine how much space we should allocate. We'll store the SharedInfo // Determine how much space we should allocate. We'll store the SharedInfo
// for the external buffer just after the buffer itself. (We store it just // for the external buffer just after the buffer itself. (We store it just
// after the buffer rather than just before so that the code can still just // after the buffer rather than just before so that the code can still just
...@@ -946,13 +946,13 @@ void IOBuf::initExtBuffer( ...@@ -946,13 +946,13 @@ void IOBuf::initExtBuffer(
uint8_t* buf, uint8_t* buf,
size_t mallocSize, size_t mallocSize,
SharedInfo** infoReturn, SharedInfo** infoReturn,
uint64_t* capacityReturn) { std::size_t* capacityReturn) {
// Find the SharedInfo storage at the end of the buffer // Find the SharedInfo storage at the end of the buffer
// and construct the SharedInfo. // and construct the SharedInfo.
uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
SharedInfo* sharedInfo = new (infoStart) SharedInfo; SharedInfo* sharedInfo = new (infoStart) SharedInfo;
*capacityReturn = uint64_t(infoStart - buf); *capacityReturn = std::size_t(infoStart - buf);
*infoReturn = sharedInfo; *infoReturn = sharedInfo;
} }
...@@ -1048,7 +1048,7 @@ size_t IOBufHash::operator()(const IOBuf& buf) const { ...@@ -1048,7 +1048,7 @@ size_t IOBufHash::operator()(const IOBuf& buf) const {
uint64_t h1; uint64_t h1;
uint64_t h2; uint64_t h2;
hasher.Final(&h1, &h2); hasher.Final(&h1, &h2);
return h1; return static_cast<std::size_t>(h1);
} }
ordering IOBufCompare::operator()(const IOBuf& a, const IOBuf& b) const { ordering IOBufCompare::operator()(const IOBuf& a, const IOBuf& b) const {
......
This diff is collapsed.
...@@ -45,7 +45,7 @@ void appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) { ...@@ -45,7 +45,7 @@ void appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
// reduce wastage (the tail's tailroom and the head's headroom) when // reduce wastage (the tail's tailroom and the head's headroom) when
// joining two IOBufQueues together. // joining two IOBufQueues together.
size_t copyRemaining = MAX_PACK_COPY; size_t copyRemaining = MAX_PACK_COPY;
uint64_t n; std::size_t n;
while (src && (n = src->length()) < copyRemaining && while (src && (n = src->length()) < copyRemaining &&
n < tail->tailroom() && n > 0) { n < tail->tailroom() && n > 0) {
memcpy(tail->writableTail(), src->data(), n); memcpy(tail->writableTail(), src->data(), n);
...@@ -108,7 +108,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) { ...@@ -108,7 +108,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
return *this; return *this;
} }
std::pair<void*, uint64_t> IOBufQueue::headroom() { std::pair<void*, std::size_t> IOBufQueue::headroom() {
// Note, headroom is independent from the tail, so we don't need to flush the // Note, headroom is independent from the tail, so we don't need to flush the
// cache. // cache.
if (head_) { if (head_) {
...@@ -118,7 +118,7 @@ std::pair<void*, uint64_t> IOBufQueue::headroom() { ...@@ -118,7 +118,7 @@ std::pair<void*, uint64_t> IOBufQueue::headroom() {
} }
} }
void IOBufQueue::markPrepended(uint64_t n) { void IOBufQueue::markPrepended(std::size_t n) {
if (n == 0) { if (n == 0) {
return; return;
} }
...@@ -129,7 +129,7 @@ void IOBufQueue::markPrepended(uint64_t n) { ...@@ -129,7 +129,7 @@ void IOBufQueue::markPrepended(uint64_t n) {
chainLength_ += n; chainLength_ += n;
} }
void IOBufQueue::prepend(const void* buf, uint64_t n) { void IOBufQueue::prepend(const void* buf, std::size_t n) {
// We're not touching the tail, so we don't need to flush the cache. // We're not touching the tail, so we don't need to flush the cache.
auto hroom = head_->headroom(); auto hroom = head_->headroom();
if (!head_ || hroom < n) { if (!head_ || hroom < n) {
...@@ -182,7 +182,7 @@ void IOBufQueue::append(const void* buf, size_t len) { ...@@ -182,7 +182,7 @@ void IOBufQueue::append(const void* buf, size_t len) {
false); false);
} }
IOBuf* last = head_->prev(); IOBuf* last = head_->prev();
uint64_t copyLen = std::min(len, (size_t)last->tailroom()); std::size_t copyLen = std::min(len, (size_t)last->tailroom());
memcpy(last->writableTail(), src, copyLen); memcpy(last->writableTail(), src, copyLen);
src += copyLen; src += copyLen;
last->append(copyLen); last->append(copyLen);
...@@ -191,7 +191,10 @@ void IOBufQueue::append(const void* buf, size_t len) { ...@@ -191,7 +191,10 @@ void IOBufQueue::append(const void* buf, size_t len) {
} }
} }
void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) { void IOBufQueue::wrapBuffer(
const void* buf,
size_t len,
std::size_t blockSize) {
auto src = static_cast<const uint8_t*>(buf); auto src = static_cast<const uint8_t*>(buf);
while (len != 0) { while (len != 0) {
size_t n = std::min(len, size_t(blockSize)); size_t n = std::min(len, size_t(blockSize));
...@@ -201,10 +204,10 @@ void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) { ...@@ -201,10 +204,10 @@ void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
} }
} }
pair<void*, uint64_t> IOBufQueue::preallocateSlow( pair<void*, std::size_t> IOBufQueue::preallocateSlow(
uint64_t min, std::size_t min,
uint64_t newAllocationSize, std::size_t newAllocationSize,
uint64_t max) { std::size_t max) {
// Avoid grabbing update guard, since we're manually setting the cache ptrs. // Avoid grabbing update guard, since we're manually setting the cache ptrs.
flushCache(); flushCache();
// Allocate a new buffer of the requested max size. // Allocate a new buffer of the requested max size.
...@@ -214,7 +217,7 @@ pair<void*, uint64_t> IOBufQueue::preallocateSlow( ...@@ -214,7 +217,7 @@ pair<void*, uint64_t> IOBufQueue::preallocateSlow(
cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>( cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>(
tailStart_, tailStart_ + newBuf->tailroom()); tailStart_, tailStart_ + newBuf->tailroom());
appendToChain(head_, std::move(newBuf), false); appendToChain(head_, std::move(newBuf), false);
return make_pair(writableTail(), std::min<uint64_t>(max, tailroom())); return make_pair(writableTail(), std::min<std::size_t>(max, tailroom()));
} }
unique_ptr<IOBuf> IOBufQueue::split(size_t n, bool throwOnUnderflow) { unique_ptr<IOBuf> IOBufQueue::split(size_t n, bool throwOnUnderflow) {
...@@ -355,7 +358,7 @@ void IOBufQueue::appendToString(std::string& out) const { ...@@ -355,7 +358,7 @@ void IOBufQueue::appendToString(std::string& out) const {
} }
} }
void IOBufQueue::gather(uint64_t maxLength) { void IOBufQueue::gather(std::size_t maxLength) {
auto guard = updateGuard(); auto guard = updateGuard();
if (head_ != nullptr) { if (head_ != nullptr) {
head_->gather(maxLength); head_->gather(maxLength);
......
...@@ -269,18 +269,18 @@ class IOBufQueue { ...@@ -269,18 +269,18 @@ class IOBufQueue {
/** /**
* Return a space to prepend bytes and the amount of headroom available. * Return a space to prepend bytes and the amount of headroom available.
*/ */
std::pair<void*, uint64_t> headroom(); std::pair<void*, std::size_t> headroom();
/** /**
* Indicate that n bytes from the headroom have been used. * Indicate that n bytes from the headroom have been used.
*/ */
void markPrepended(uint64_t n); void markPrepended(std::size_t n);
/** /**
* Prepend an existing range; throws std::overflow_error if not enough * Prepend an existing range; throws std::overflow_error if not enough
* room. * room.
*/ */
void prepend(const void* buf, uint64_t n); void prepend(const void* buf, std::size_t n);
/** /**
* Add a buffer or buffer chain to the end of this queue. The * Add a buffer or buffer chain to the end of this queue. The
...@@ -331,7 +331,7 @@ class IOBufQueue { ...@@ -331,7 +331,7 @@ class IOBufQueue {
void wrapBuffer( void wrapBuffer(
const void* buf, const void* buf,
size_t len, size_t len,
uint64_t blockSize = (1U << 31)); // default block size: 2GB std::size_t blockSize = (1U << 31)); // default block size: 2GB
/** /**
* Obtain a writable block of contiguous bytes at the end of this * Obtain a writable block of contiguous bytes at the end of this
...@@ -353,15 +353,15 @@ class IOBufQueue { ...@@ -353,15 +353,15 @@ class IOBufQueue {
* callback, tell the application how much of the buffer they've * callback, tell the application how much of the buffer they've
* filled with data. * filled with data.
*/ */
std::pair<void*, uint64_t> preallocate( std::pair<void*, std::size_t> preallocate(
uint64_t min, std::size_t min,
uint64_t newAllocationSize, std::size_t newAllocationSize,
uint64_t max = std::numeric_limits<uint64_t>::max()) { std::size_t max = std::numeric_limits<std::size_t>::max()) {
dcheckCacheIntegrity(); dcheckCacheIntegrity();
if (LIKELY(writableTail() != nullptr && tailroom() >= min)) { if (LIKELY(writableTail() != nullptr && tailroom() >= min)) {
return std::make_pair( return std::make_pair(
writableTail(), std::min<uint64_t>(max, tailroom())); writableTail(), std::min<std::size_t>(max, tailroom()));
} }
return preallocateSlow(min, newAllocationSize, max); return preallocateSlow(min, newAllocationSize, max);
...@@ -377,7 +377,7 @@ class IOBufQueue { ...@@ -377,7 +377,7 @@ class IOBufQueue {
* invoke any other non-const methods on this IOBufQueue between * invoke any other non-const methods on this IOBufQueue between
* the call to preallocate and the call to postallocate(). * the call to preallocate and the call to postallocate().
*/ */
void postallocate(uint64_t n) { void postallocate(std::size_t n) {
dcheckCacheIntegrity(); dcheckCacheIntegrity();
DCHECK_LE( DCHECK_LE(
(void*)(cachePtr_->cachedRange.first + n), (void*)(cachePtr_->cachedRange.first + n),
...@@ -389,7 +389,7 @@ class IOBufQueue { ...@@ -389,7 +389,7 @@ class IOBufQueue {
* Obtain a writable block of n contiguous bytes, allocating more space * Obtain a writable block of n contiguous bytes, allocating more space
* if necessary, and mark it as used. The caller can fill it later. * if necessary, and mark it as used. The caller can fill it later.
*/ */
void* allocate(uint64_t n) { void* allocate(std::size_t n) {
void* p = preallocate(n, n).first; void* p = preallocate(n, n).first;
postallocate(n); postallocate(n);
return p; return p;
...@@ -525,7 +525,7 @@ class IOBufQueue { ...@@ -525,7 +525,7 @@ class IOBufQueue {
/** /**
* Calls IOBuf::gather() on the head of the queue, if it exists. * Calls IOBuf::gather() on the head of the queue, if it exists.
*/ */
void gather(uint64_t maxLength); void gather(std::size_t maxLength);
/** Movable */ /** Movable */
IOBufQueue(IOBufQueue&&) noexcept; IOBufQueue(IOBufQueue&&) noexcept;
...@@ -644,8 +644,10 @@ class IOBufQueue { ...@@ -644,8 +644,10 @@ class IOBufQueue {
cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>(); cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>();
} }
std::pair<void*, uint64_t> std::pair<void*, std::size_t> preallocateSlow(
preallocateSlow(uint64_t min, uint64_t newAllocationSize, uint64_t max); std::size_t min,
std::size_t newAllocationSize,
std::size_t max);
}; };
} // namespace folly } // namespace folly
...@@ -87,7 +87,7 @@ struct Header { ...@@ -87,7 +87,7 @@ struct Header {
uint16_t flags; // reserved (must be 0) uint16_t flags; // reserved (must be 0)
uint32_t fileId; // unique file ID uint32_t fileId; // unique file ID
uint32_t dataLength; uint32_t dataLength;
uint64_t dataHash; std::size_t dataHash;
uint32_t headerHash; // must be last uint32_t headerHash; // must be last
} FOLLY_PACK_ATTR; } FOLLY_PACK_ATTR;
FOLLY_PACK_POP FOLLY_PACK_POP
......
...@@ -113,7 +113,7 @@ uint32_t headerHash(const Header& header) { ...@@ -113,7 +113,7 @@ uint32_t headerHash(const Header& header) {
&header, offsetof(Header, headerHash), kHashSeed); &header, offsetof(Header, headerHash), kHashSeed);
} }
std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) { std::pair<size_t, std::size_t> dataLengthAndHash(const IOBuf* buf) {
size_t len = 0; size_t len = 0;
hash::SpookyHashV2 hasher; hash::SpookyHashV2 hasher;
hasher.Init(kHashSeed, kHashSeed); hasher.Init(kHashSeed, kHashSeed);
...@@ -127,10 +127,10 @@ std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) { ...@@ -127,10 +127,10 @@ std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) {
if (len + headerSize() >= std::numeric_limits<uint32_t>::max()) { if (len + headerSize() >= std::numeric_limits<uint32_t>::max()) {
throw std::invalid_argument("Record length must fit in 32 bits"); throw std::invalid_argument("Record length must fit in 32 bits");
} }
return std::make_pair(len, hash1); return std::make_pair(len, static_cast<std::size_t>(hash1));
} }
uint64_t dataHash(ByteRange range) { std::size_t dataHash(ByteRange range) {
return hash::SpookyHashV2::Hash64(range.data(), range.size(), kHashSeed); return hash::SpookyHashV2::Hash64(range.data(), range.size(), kHashSeed);
} }
......
...@@ -108,7 +108,7 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept { ...@@ -108,7 +108,7 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept {
if (bytesRead > 0) { if (bytesRead > 0) {
if (movable) { if (movable) {
ioBuf->append(uint64_t(bytesRead)); ioBuf->append(std::size_t(bytesRead));
readCallback_->readBufferAvailable(std::move(ioBuf)); readCallback_->readBufferAvailable(std::move(ioBuf));
} else { } else {
readCallback_->readDataAvailable(size_t(bytesRead)); readCallback_->readDataAvailable(size_t(bytesRead));
......
...@@ -579,7 +579,7 @@ class AsyncServerSocket : public DelayedDestruction, public AsyncSocketBase { ...@@ -579,7 +579,7 @@ class AsyncServerSocket : public DelayedDestruction, public AsyncSocketBase {
/** /**
* Get the number of connections dropped by the AsyncServerSocket * Get the number of connections dropped by the AsyncServerSocket
*/ */
uint64_t getNumDroppedConnections() const { std::size_t getNumDroppedConnections() const {
return numDroppedConnections_; return numDroppedConnections_;
} }
...@@ -863,7 +863,7 @@ class AsyncServerSocket : public DelayedDestruction, public AsyncSocketBase { ...@@ -863,7 +863,7 @@ class AsyncServerSocket : public DelayedDestruction, public AsyncSocketBase {
double acceptRateAdjustSpeed_; // 0 to disable auto adjust double acceptRateAdjustSpeed_; // 0 to disable auto adjust
double acceptRate_; double acceptRate_;
std::chrono::time_point<std::chrono::steady_clock> lastAccepTimestamp_; std::chrono::time_point<std::chrono::steady_clock> lastAccepTimestamp_;
uint64_t numDroppedConnections_; std::size_t numDroppedConnections_;
uint32_t callbackIndex_; uint32_t callbackIndex_;
BackoffTimeout* backoffTimeout_; BackoffTimeout* backoffTimeout_;
std::vector<CallbackInfo> callbacks_; std::vector<CallbackInfo> callbacks_;
......
...@@ -83,7 +83,7 @@ EventBase::EventBase(bool enableTimeMeasurement) ...@@ -83,7 +83,7 @@ EventBase::EventBase(bool enableTimeMeasurement)
maxLatencyLoopTime_(avgLoopTime_), maxLatencyLoopTime_(avgLoopTime_),
enableTimeMeasurement_(enableTimeMeasurement), enableTimeMeasurement_(enableTimeMeasurement),
nextLoopCnt_( nextLoopCnt_(
uint64_t(-40)) // Early wrap-around so bugs will manifest soon std::size_t(-40)) // Early wrap-around so bugs will manifest soon
, ,
latestLoopCnt_(nextLoopCnt_), latestLoopCnt_(nextLoopCnt_),
startWork_(), startWork_(),
...@@ -130,7 +130,7 @@ EventBase::EventBase(event_base* evb, bool enableTimeMeasurement) ...@@ -130,7 +130,7 @@ EventBase::EventBase(event_base* evb, bool enableTimeMeasurement)
maxLatencyLoopTime_(avgLoopTime_), maxLatencyLoopTime_(avgLoopTime_),
enableTimeMeasurement_(enableTimeMeasurement), enableTimeMeasurement_(enableTimeMeasurement),
nextLoopCnt_( nextLoopCnt_(
uint64_t(-40)) // Early wrap-around so bugs will manifest soon std::size_t(-40)) // Early wrap-around so bugs will manifest soon
, ,
latestLoopCnt_(nextLoopCnt_), latestLoopCnt_(nextLoopCnt_),
startWork_(), startWork_(),
......
...@@ -580,7 +580,7 @@ class EventBase : private boost::noncopyable, ...@@ -580,7 +580,7 @@ class EventBase : private boost::noncopyable,
double value_; double value_;
std::chrono::microseconds buffer_time_{0}; std::chrono::microseconds buffer_time_{0};
std::chrono::microseconds busy_buffer_{0}; std::chrono::microseconds busy_buffer_{0};
uint64_t buffer_cnt_{0}; std::size_t buffer_cnt_{0};
static constexpr std::chrono::milliseconds buffer_interval_{10}; static constexpr std::chrono::milliseconds buffer_interval_{10};
}; };
...@@ -757,8 +757,8 @@ class EventBase : private boost::noncopyable, ...@@ -757,8 +757,8 @@ class EventBase : private boost::noncopyable,
const bool enableTimeMeasurement_; const bool enableTimeMeasurement_;
// Wrap-around loop counter to detect beginning of each loop // Wrap-around loop counter to detect beginning of each loop
uint64_t nextLoopCnt_; std::size_t nextLoopCnt_;
uint64_t latestLoopCnt_; std::size_t latestLoopCnt_;
std::chrono::steady_clock::time_point startWork_; std::chrono::steady_clock::time_point startWork_;
// Prevent undefined behavior from invoking event_base_loop() reentrantly. // Prevent undefined behavior from invoking event_base_loop() reentrantly.
// This is needed since many projects use libevent-1.4, which lacks commit // This is needed since many projects use libevent-1.4, which lacks commit
...@@ -783,7 +783,7 @@ class EventBase : private boost::noncopyable, ...@@ -783,7 +783,7 @@ class EventBase : private boost::noncopyable,
friend class detail::EventBaseLocalBase; friend class detail::EventBaseLocalBase;
template <typename T> template <typename T>
friend class EventBaseLocal; friend class EventBaseLocal;
std::unordered_map<uint64_t, std::shared_ptr<void>> localStorage_; std::unordered_map<std::size_t, std::shared_ptr<void>> localStorage_;
std::unordered_set<detail::EventBaseLocalBaseBase*> localStorageToDtor_; std::unordered_set<detail::EventBaseLocalBaseBase*> localStorageToDtor_;
folly::once_flag virtualEventBaseInitFlag_; folly::once_flag virtualEventBaseInitFlag_;
......
...@@ -66,6 +66,6 @@ void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) { ...@@ -66,6 +66,6 @@ void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) {
} }
} }
std::atomic<uint64_t> EventBaseLocalBase::keyCounter_{0}; std::atomic<std::size_t> EventBaseLocalBase::keyCounter_{0};
} // namespace detail } // namespace detail
} // namespace folly } // namespace folly
...@@ -40,8 +40,8 @@ class EventBaseLocalBase : public EventBaseLocalBaseBase, boost::noncopyable { ...@@ -40,8 +40,8 @@ class EventBaseLocalBase : public EventBaseLocalBaseBase, boost::noncopyable {
void* getVoid(EventBase& evb); void* getVoid(EventBase& evb);
folly::Synchronized<std::unordered_set<EventBase*>> eventBases_; folly::Synchronized<std::unordered_set<EventBase*>> eventBases_;
static std::atomic<uint64_t> keyCounter_; static std::atomic<std::size_t> keyCounter_;
uint64_t key_{keyCounter_++}; std::size_t key_{keyCounter_++};
}; };
} // namespace detail } // namespace detail
......
...@@ -89,7 +89,7 @@ HHWheelTimer::HHWheelTimer( ...@@ -89,7 +89,7 @@ HHWheelTimer::HHWheelTimer(
count_(0), count_(0),
startTime_(getCurTime()), startTime_(getCurTime()),
processingCallbacksGuard_(nullptr) { processingCallbacksGuard_(nullptr) {
bitmap_.resize((WHEEL_SIZE / sizeof(uint64_t)) / 8, 0); bitmap_.resize((WHEEL_SIZE / sizeof(std::size_t)) / 8, 0);
} }
HHWheelTimer::~HHWheelTimer() { HHWheelTimer::~HHWheelTimer() {
...@@ -249,7 +249,7 @@ size_t HHWheelTimer::cancelAll() { ...@@ -249,7 +249,7 @@ size_t HHWheelTimer::cancelAll() {
size_t count = 0; size_t count = 0;
if (count_ != 0) { if (count_ != 0) {
const uint64_t numElements = WHEEL_BUCKETS * WHEEL_SIZE; const std::size_t numElements = WHEEL_BUCKETS * WHEEL_SIZE;
auto maxBuckets = std::min(numElements, count_); auto maxBuckets = std::min(numElements, count_);
auto buckets = std::make_unique<CallbackList[]>(maxBuckets); auto buckets = std::make_unique<CallbackList[]>(maxBuckets);
size_t countBuckets = 0; size_t countBuckets = 0;
......
...@@ -249,7 +249,7 @@ class HHWheelTimer : private folly::AsyncTimeout, ...@@ -249,7 +249,7 @@ class HHWheelTimer : private folly::AsyncTimeout,
/** /**
* Return the number of currently pending timeouts * Return the number of currently pending timeouts
*/ */
uint64_t count() const { std::size_t count() const {
return count_; return count_;
} }
...@@ -289,7 +289,7 @@ class HHWheelTimer : private folly::AsyncTimeout, ...@@ -289,7 +289,7 @@ class HHWheelTimer : private folly::AsyncTimeout,
typedef Callback::List CallbackList; typedef Callback::List CallbackList;
CallbackList buckets_[WHEEL_BUCKETS][WHEEL_SIZE]; CallbackList buckets_[WHEEL_BUCKETS][WHEEL_SIZE];
std::vector<uint64_t> bitmap_; std::vector<std::size_t> bitmap_;
int64_t timeToWheelTicks(std::chrono::milliseconds t) { int64_t timeToWheelTicks(std::chrono::milliseconds t) {
return t.count() / interval_.count(); return t.count() / interval_.count();
...@@ -298,7 +298,7 @@ class HHWheelTimer : private folly::AsyncTimeout, ...@@ -298,7 +298,7 @@ class HHWheelTimer : private folly::AsyncTimeout,
bool cascadeTimers(int bucket, int tick); bool cascadeTimers(int bucket, int tick);
int64_t lastTick_; int64_t lastTick_;
int64_t expireTick_; int64_t expireTick_;
uint64_t count_; std::size_t count_;
std::chrono::steady_clock::time_point startTime_; std::chrono::steady_clock::time_point startTime_;
int64_t calcNextTick(); int64_t calcNextTick();
......
...@@ -1308,9 +1308,9 @@ TEST(AsyncSSLSocketTest, SSLParseClientHelloMultiplePackets) { ...@@ -1308,9 +1308,9 @@ TEST(AsyncSSLSocketTest, SSLParseClientHelloMultiplePackets) {
sock->enableClientHelloParsing(); sock->enableClientHelloParsing();
// Test parsing with multiple small packets // Test parsing with multiple small packets
for (uint64_t i = 0; i < buf->length(); i += 3) { for (std::size_t i = 0; i < buf->length(); i += 3) {
auto bufCopy = folly::IOBuf::copyBuffer( auto bufCopy = folly::IOBuf::copyBuffer(
buf->data() + i, std::min((uint64_t)3, buf->length() - i)); buf->data() + i, std::min((std::size_t)3, buf->length() - i));
AsyncSSLSocket::clientHelloParsingCallback( AsyncSSLSocket::clientHelloParsingCallback(
0, 0,
0, 0,
......
...@@ -1567,7 +1567,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout { ...@@ -1567,7 +1567,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
public: public:
explicit IdleTimeTimeoutSeries( explicit IdleTimeTimeoutSeries(
EventBase* base, EventBase* base,
std::deque<std::uint64_t>& timeout) std::deque<std::size_t>& timeout)
: AsyncTimeout(base), timeouts_(0), timeout_(timeout) { : AsyncTimeout(base), timeouts_(0), timeout_(timeout) {
scheduleTimeout(1); scheduleTimeout(1);
} }
...@@ -1580,7 +1580,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout { ...@@ -1580,7 +1580,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
if (timeout_.empty()) { if (timeout_.empty()) {
cancelTimeout(); cancelTimeout();
} else { } else {
uint64_t sleepTime = timeout_.front(); std::size_t sleepTime = timeout_.front();
timeout_.pop_front(); timeout_.pop_front();
if (sleepTime) { if (sleepTime) {
usleep(sleepTime); usleep(sleepTime);
...@@ -1595,7 +1595,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout { ...@@ -1595,7 +1595,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
private: private:
int timeouts_; int timeouts_;
std::deque<uint64_t>& timeout_; std::deque<std::size_t>& timeout_;
}; };
/** /**
...@@ -1609,11 +1609,11 @@ class IdleTimeTimeoutSeries : public AsyncTimeout { ...@@ -1609,11 +1609,11 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
*/ */
TEST(EventBaseTest, IdleTime) { TEST(EventBaseTest, IdleTime) {
EventBase eventBase; EventBase eventBase;
std::deque<uint64_t> timeouts0(4, 8080); std::deque<std::size_t> timeouts0(4, 8080);
timeouts0.push_front(8000); timeouts0.push_front(8000);
timeouts0.push_back(14000); timeouts0.push_back(14000);
IdleTimeTimeoutSeries tos0(&eventBase, timeouts0); IdleTimeTimeoutSeries tos0(&eventBase, timeouts0);
std::deque<uint64_t> timeouts(20, 20); std::deque<std::size_t> timeouts(20, 20);
std::unique_ptr<IdleTimeTimeoutSeries> tos; std::unique_ptr<IdleTimeTimeoutSeries> tos;
bool hostOverloaded = false; bool hostOverloaded = false;
......
...@@ -177,7 +177,7 @@ TEST(IOBufQueue, SplitZero) { ...@@ -177,7 +177,7 @@ TEST(IOBufQueue, SplitZero) {
TEST(IOBufQueue, Preallocate) { TEST(IOBufQueue, Preallocate) {
IOBufQueue queue(clOptions); IOBufQueue queue(clOptions);
queue.append(string("Hello")); queue.append(string("Hello"));
pair<void*, uint64_t> writable = queue.preallocate(2, 64, 64); pair<void*, std::size_t> writable = queue.preallocate(2, 64, 64);
checkConsistency(queue); checkConsistency(queue);
EXPECT_NE((void*)nullptr, writable.first); EXPECT_NE((void*)nullptr, writable.first);
EXPECT_LE(2, writable.second); EXPECT_LE(2, writable.second);
......
...@@ -391,7 +391,7 @@ TEST(IOBuf, Chaining) { ...@@ -391,7 +391,7 @@ TEST(IOBuf, Chaining) {
EXPECT_TRUE(iob4ptr->isChained()); EXPECT_TRUE(iob4ptr->isChained());
EXPECT_TRUE(iob5ptr->isChained()); EXPECT_TRUE(iob5ptr->isChained());
uint64_t fullLength = std::size_t fullLength =
(iob1->length() + iob2ptr->length() + iob3ptr->length() + (iob1->length() + iob2ptr->length() + iob3ptr->length() +
iob4ptr->length() + iob5ptr->length()); iob4ptr->length() + iob5ptr->length());
EXPECT_EQ(5, iob1->countChainElements()); EXPECT_EQ(5, iob1->countChainElements());
...@@ -828,16 +828,16 @@ TEST(IOBuf, Alignment) { ...@@ -828,16 +828,16 @@ TEST(IOBuf, Alignment) {
TEST(TypedIOBuf, Simple) { TEST(TypedIOBuf, Simple) {
auto buf = IOBuf::create(0); auto buf = IOBuf::create(0);
TypedIOBuf<uint64_t> typed(buf.get()); TypedIOBuf<std::size_t> typed(buf.get());
const uint64_t n = 10000; const std::size_t n = 10000;
typed.reserve(0, n); typed.reserve(0, n);
EXPECT_LE(n, typed.capacity()); EXPECT_LE(n, typed.capacity());
for (uint64_t i = 0; i < n; i++) { for (std::size_t i = 0; i < n; i++) {
*typed.writableTail() = i; *typed.writableTail() = i;
typed.append(1); typed.append(1);
} }
EXPECT_EQ(n, typed.length()); EXPECT_EQ(n, typed.length());
for (uint64_t i = 0; i < n; i++) { for (std::size_t i = 0; i < n; i++) {
EXPECT_EQ(i, typed.data()[i]); EXPECT_EQ(i, typed.data()[i]);
} }
} }
...@@ -1397,7 +1397,7 @@ TEST(IOBuf, CloneCoalescedChain) { ...@@ -1397,7 +1397,7 @@ TEST(IOBuf, CloneCoalescedChain) {
boost::mt19937 gen(fillSeed); boost::mt19937 gen(fillSeed);
{ {
auto c = b.get(); auto c = b.get();
uint64_t length = c->tailroom(); std::size_t length = c->tailroom();
do { do {
length = std::min(length, c->tailroom()); length = std::min(length, c->tailroom());
c->append(length--); c->append(length--);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment