Commit fae10d94 authored by Nathan Bronson's avatar Nathan Bronson Committed by Facebook Github Bot

uint64_t -> size_t for 32-bit support

Summary:
On 64-bit platforms uint64_t and std::size_t are the same
size, but that is not the case on 32-bit platforms such as arm or x86.
This diff makes folly/io 32-bit clean, at least to the point that it
compiles and passes unit tests.  There is no change to behavior on
64-bit platforms.

Reviewed By: shixiao

Differential Revision: D8559702

fbshipit-source-id: c61a5d3d0e327f36cdf9a606a48a256943efc098
parent 8fbd6d60
......@@ -572,7 +572,7 @@ class CursorBase {
DCHECK(crtBuf_ == nullptr || crtBegin_ == crtBuf_->data());
DCHECK(
crtBuf_ == nullptr ||
(uint64_t)(crtEnd_ - crtBegin_) == crtBuf_->length());
(std::size_t)(crtEnd_ - crtBegin_) == crtBuf_->length());
}
~CursorBase() {}
......@@ -968,7 +968,7 @@ typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor;
*/
class Appender : public detail::Writable<Appender> {
public:
Appender(IOBuf* buf, uint64_t growth)
Appender(IOBuf* buf, std::size_t growth)
: buffer_(buf), crtBuf_(buf->prev()), growth_(growth) {}
uint8_t* writableData() {
......@@ -991,7 +991,7 @@ class Appender : public detail::Writable<Appender> {
* Ensure at least n contiguous bytes available to write.
* Postcondition: length() >= n.
*/
void ensure(uint64_t n) {
void ensure(std::size_t n) {
if (LIKELY(length() >= n)) {
return;
}
......@@ -1095,7 +1095,7 @@ class Appender : public detail::Writable<Appender> {
IOBuf* buffer_;
IOBuf* crtBuf_;
uint64_t growth_;
std::size_t growth_;
};
class QueueAppender : public detail::Writable<QueueAppender> {
......@@ -1105,10 +1105,10 @@ class QueueAppender : public detail::Writable<QueueAppender> {
* space in the queue, we grow no more than growth bytes at once
* (unless you call ensure() with a bigger value yourself).
*/
QueueAppender(IOBufQueue* queue, uint64_t growth)
QueueAppender(IOBufQueue* queue, std::size_t growth)
: queueCache_(queue), growth_(growth) {}
void reset(IOBufQueue* queue, uint64_t growth) {
void reset(IOBufQueue* queue, std::size_t growth) {
queueCache_.reset(queue);
growth_ = growth;
}
......
......@@ -46,7 +46,7 @@ enum : uint16_t {
kDataInUse = 0x02,
};
enum : uint64_t {
enum : std::size_t {
// When create() is called for buffers less than kDefaultCombinedBufSize,
// we allocate a single combined memory segment for the IOBuf and the data
// together. See the comments for createCombined()/createSeparate() for more
......@@ -190,7 +190,7 @@ void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
releaseStorage(storage, kDataInUse);
}
IOBuf::IOBuf(CreateOp, uint64_t capacity)
IOBuf::IOBuf(CreateOp, std::size_t capacity)
: next_(this),
prev_(this),
data_(nullptr),
......@@ -205,9 +205,9 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity)
IOBuf::IOBuf(
CopyBufferOp /* op */,
const void* buf,
uint64_t size,
uint64_t headroom,
uint64_t minTailroom)
std::size_t size,
std::size_t headroom,
std::size_t minTailroom)
: IOBuf(CREATE, headroom + size + minTailroom) {
advance(headroom);
if (size > 0) {
......@@ -220,11 +220,11 @@ IOBuf::IOBuf(
IOBuf::IOBuf(
CopyBufferOp op,
ByteRange br,
uint64_t headroom,
uint64_t minTailroom)
std::size_t headroom,
std::size_t minTailroom)
: IOBuf(op, br.data(), br.size(), headroom, minTailroom) {}
unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) {
// For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
// all with a single allocation.
//
......@@ -240,7 +240,7 @@ unique_ptr<IOBuf> IOBuf::create(uint64_t capacity) {
return createSeparate(capacity);
}
unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) {
// To save a memory allocation, allocate space for the IOBuf object, the
// SharedInfo struct, and the data itself all with a single call to malloc().
size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
......@@ -263,13 +263,13 @@ unique_ptr<IOBuf> IOBuf::createCombined(uint64_t capacity) {
return ret;
}
unique_ptr<IOBuf> IOBuf::createSeparate(uint64_t capacity) {
unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) {
return std::make_unique<IOBuf>(CREATE, capacity);
}
unique_ptr<IOBuf> IOBuf::createChain(
size_t totalCapacity,
uint64_t maxBufCapacity) {
std::size_t maxBufCapacity) {
unique_ptr<IOBuf> out =
create(std::min(totalCapacity, size_t(maxBufCapacity)));
size_t allocatedCapacity = out->capacity();
......@@ -287,8 +287,8 @@ unique_ptr<IOBuf> IOBuf::createChain(
IOBuf::IOBuf(
TakeOwnershipOp,
void* buf,
uint64_t capacity,
uint64_t length,
std::size_t capacity,
std::size_t length,
FreeFunction freeFn,
void* userData,
bool freeOnError)
......@@ -310,8 +310,8 @@ IOBuf::IOBuf(
unique_ptr<IOBuf> IOBuf::takeOwnership(
void* buf,
uint64_t capacity,
uint64_t length,
std::size_t capacity,
std::size_t length,
FreeFunction freeFn,
void* userData,
bool freeOnError) {
......@@ -333,7 +333,7 @@ unique_ptr<IOBuf> IOBuf::takeOwnership(
}
}
IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity)
: IOBuf(
InternalConstructor(),
0,
......@@ -347,11 +347,11 @@ IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity)
IOBuf::IOBuf(WrapBufferOp op, ByteRange br) : IOBuf(op, br.data(), br.size()) {}
unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint64_t capacity) {
unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) {
return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
}
IOBuf IOBuf::wrapBufferAsValue(const void* buf, uint64_t capacity) {
IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) {
return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
}
......@@ -395,9 +395,9 @@ IOBuf::IOBuf(
InternalConstructor,
uintptr_t flagsAndSharedInfo,
uint8_t* buf,
uint64_t capacity,
std::size_t capacity,
uint8_t* data,
uint64_t length)
std::size_t length)
: next_(this),
prev_(this),
data_(data),
......@@ -495,8 +495,8 @@ size_t IOBuf::countChainElements() const {
return numElements;
}
uint64_t IOBuf::computeChainDataLength() const {
uint64_t fullLength = length_;
std::size_t IOBuf::computeChainDataLength() const {
std::size_t fullLength = length_;
for (IOBuf* current = next_; current != this; current = current->next_) {
fullLength += current->length_;
}
......@@ -562,10 +562,10 @@ IOBuf IOBuf::cloneCoalescedAsValue() const {
return cloneOneAsValue();
}
// Coalesce into newBuf
const uint64_t newLength = computeChainDataLength();
const uint64_t newHeadroom = headroom();
const uint64_t newTailroom = prev()->tailroom();
const uint64_t newCapacity = newLength + newHeadroom + newTailroom;
const std::size_t newLength = computeChainDataLength();
const std::size_t newHeadroom = headroom();
const std::size_t newTailroom = prev()->tailroom();
const std::size_t newCapacity = newLength + newHeadroom + newTailroom;
IOBuf newBuf{CREATE, newCapacity};
newBuf.advance(newHeadroom);
......@@ -591,13 +591,13 @@ void IOBuf::unshareOneSlow() {
// Allocate a new buffer for the data
uint8_t* buf;
SharedInfo* sharedInfo;
uint64_t actualCapacity;
std::size_t actualCapacity;
allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
// Copy the data
// Maintain the same amount of headroom. Since we maintained the same
// minimum capacity we also maintain at least the same amount of tailroom.
uint64_t headlen = headroom();
std::size_t headlen = headroom();
if (length_ > 0) {
assert(data_ != nullptr);
memcpy(buf + headlen, data_, length_);
......@@ -664,7 +664,7 @@ void IOBuf::coalesceSlow() {
DCHECK(isChained());
// Compute the length of the entire chain
uint64_t newLength = 0;
std::size_t newLength = 0;
IOBuf* end = this;
do {
newLength += end->length_;
......@@ -683,7 +683,7 @@ void IOBuf::coalesceSlow(size_t maxLength) {
DCHECK_LT(length_, maxLength);
// Compute the length of the entire chain
uint64_t newLength = 0;
std::size_t newLength = 0;
IOBuf* end = this;
while (true) {
newLength += end->length_;
......@@ -708,14 +708,14 @@ void IOBuf::coalesceAndReallocate(
size_t newLength,
IOBuf* end,
size_t newTailroom) {
uint64_t newCapacity = newLength + newHeadroom + newTailroom;
std::size_t newCapacity = newLength + newHeadroom + newTailroom;
// Allocate space for the coalesced buffer.
// We always convert to an external buffer, even if we happened to be an
// internal buffer before.
uint8_t* newBuf;
SharedInfo* newInfo;
uint64_t actualCapacity;
std::size_t actualCapacity;
allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
// Copy the data into the new buffer
......@@ -789,7 +789,7 @@ void IOBuf::decrementRefcount() {
}
}
void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
DCHECK_LT(newCapacity, UINT32_MAX);
......@@ -821,8 +821,8 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
size_t newAllocatedCapacity = 0;
uint8_t* newBuffer = nullptr;
uint64_t newHeadroom = 0;
uint64_t oldHeadroom = headroom();
std::size_t newHeadroom = 0;
std::size_t oldHeadroom = headroom();
// If we have a buffer allocated with malloc and we just need more tailroom,
// try to use realloc()/xallocx() to grow the buffer in place.
......@@ -880,7 +880,7 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) {
newHeadroom = minHeadroom;
}
uint64_t cap;
std::size_t cap;
initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
if (flags() & kFlagFreeSharedInfo) {
......@@ -913,10 +913,10 @@ void IOBuf::freeExtBuffer() {
}
void IOBuf::allocExtBuffer(
uint64_t minCapacity,
std::size_t minCapacity,
uint8_t** bufReturn,
SharedInfo** infoReturn,
uint64_t* capacityReturn) {
std::size_t* capacityReturn) {
size_t mallocSize = goodExtBufferSize(minCapacity);
uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
if (UNLIKELY(buf == nullptr)) {
......@@ -926,7 +926,7 @@ void IOBuf::allocExtBuffer(
*bufReturn = buf;
}
size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) {
size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) {
// Determine how much space we should allocate. We'll store the SharedInfo
// for the external buffer just after the buffer itself. (We store it just
// after the buffer rather than just before so that the code can still just
......@@ -946,13 +946,13 @@ void IOBuf::initExtBuffer(
uint8_t* buf,
size_t mallocSize,
SharedInfo** infoReturn,
uint64_t* capacityReturn) {
std::size_t* capacityReturn) {
// Find the SharedInfo storage at the end of the buffer
// and construct the SharedInfo.
uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
SharedInfo* sharedInfo = new (infoStart) SharedInfo;
*capacityReturn = uint64_t(infoStart - buf);
*capacityReturn = std::size_t(infoStart - buf);
*infoReturn = sharedInfo;
}
......@@ -1048,7 +1048,7 @@ size_t IOBufHash::operator()(const IOBuf& buf) const {
uint64_t h1;
uint64_t h2;
hasher.Final(&h1, &h2);
return h1;
return static_cast<std::size_t>(h1);
}
ordering IOBufCompare::operator()(const IOBuf& a, const IOBuf& b) const {
......
This diff is collapsed.
......@@ -45,7 +45,7 @@ void appendToChain(unique_ptr<IOBuf>& dst, unique_ptr<IOBuf>&& src, bool pack) {
// reduce wastage (the tail's tailroom and the head's headroom) when
// joining two IOBufQueues together.
size_t copyRemaining = MAX_PACK_COPY;
uint64_t n;
std::size_t n;
while (src && (n = src->length()) < copyRemaining &&
n < tail->tailroom() && n > 0) {
memcpy(tail->writableTail(), src->data(), n);
......@@ -108,7 +108,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
return *this;
}
std::pair<void*, uint64_t> IOBufQueue::headroom() {
std::pair<void*, std::size_t> IOBufQueue::headroom() {
// Note, headroom is independent from the tail, so we don't need to flush the
// cache.
if (head_) {
......@@ -118,7 +118,7 @@ std::pair<void*, uint64_t> IOBufQueue::headroom() {
}
}
void IOBufQueue::markPrepended(uint64_t n) {
void IOBufQueue::markPrepended(std::size_t n) {
if (n == 0) {
return;
}
......@@ -129,7 +129,7 @@ void IOBufQueue::markPrepended(uint64_t n) {
chainLength_ += n;
}
void IOBufQueue::prepend(const void* buf, uint64_t n) {
void IOBufQueue::prepend(const void* buf, std::size_t n) {
// We're not touching the tail, so we don't need to flush the cache.
auto hroom = head_->headroom();
if (!head_ || hroom < n) {
......@@ -182,7 +182,7 @@ void IOBufQueue::append(const void* buf, size_t len) {
false);
}
IOBuf* last = head_->prev();
uint64_t copyLen = std::min(len, (size_t)last->tailroom());
std::size_t copyLen = std::min(len, (size_t)last->tailroom());
memcpy(last->writableTail(), src, copyLen);
src += copyLen;
last->append(copyLen);
......@@ -191,7 +191,10 @@ void IOBufQueue::append(const void* buf, size_t len) {
}
}
void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
void IOBufQueue::wrapBuffer(
const void* buf,
size_t len,
std::size_t blockSize) {
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
size_t n = std::min(len, size_t(blockSize));
......@@ -201,10 +204,10 @@ void IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
}
}
pair<void*, uint64_t> IOBufQueue::preallocateSlow(
uint64_t min,
uint64_t newAllocationSize,
uint64_t max) {
pair<void*, std::size_t> IOBufQueue::preallocateSlow(
std::size_t min,
std::size_t newAllocationSize,
std::size_t max) {
// Avoid grabbing update guard, since we're manually setting the cache ptrs.
flushCache();
// Allocate a new buffer of the requested max size.
......@@ -214,7 +217,7 @@ pair<void*, uint64_t> IOBufQueue::preallocateSlow(
cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>(
tailStart_, tailStart_ + newBuf->tailroom());
appendToChain(head_, std::move(newBuf), false);
return make_pair(writableTail(), std::min<uint64_t>(max, tailroom()));
return make_pair(writableTail(), std::min<std::size_t>(max, tailroom()));
}
unique_ptr<IOBuf> IOBufQueue::split(size_t n, bool throwOnUnderflow) {
......@@ -355,7 +358,7 @@ void IOBufQueue::appendToString(std::string& out) const {
}
}
void IOBufQueue::gather(uint64_t maxLength) {
void IOBufQueue::gather(std::size_t maxLength) {
auto guard = updateGuard();
if (head_ != nullptr) {
head_->gather(maxLength);
......
......@@ -269,18 +269,18 @@ class IOBufQueue {
/**
* Return a space to prepend bytes and the amount of headroom available.
*/
std::pair<void*, uint64_t> headroom();
std::pair<void*, std::size_t> headroom();
/**
* Indicate that n bytes from the headroom have been used.
*/
void markPrepended(uint64_t n);
void markPrepended(std::size_t n);
/**
* Prepend an existing range; throws std::overflow_error if not enough
* room.
*/
void prepend(const void* buf, uint64_t n);
void prepend(const void* buf, std::size_t n);
/**
* Add a buffer or buffer chain to the end of this queue. The
......@@ -331,7 +331,7 @@ class IOBufQueue {
void wrapBuffer(
const void* buf,
size_t len,
uint64_t blockSize = (1U << 31)); // default block size: 2GB
std::size_t blockSize = (1U << 31)); // default block size: 2GB
/**
* Obtain a writable block of contiguous bytes at the end of this
......@@ -353,15 +353,15 @@ class IOBufQueue {
* callback, tell the application how much of the buffer they've
* filled with data.
*/
std::pair<void*, uint64_t> preallocate(
uint64_t min,
uint64_t newAllocationSize,
uint64_t max = std::numeric_limits<uint64_t>::max()) {
std::pair<void*, std::size_t> preallocate(
std::size_t min,
std::size_t newAllocationSize,
std::size_t max = std::numeric_limits<std::size_t>::max()) {
dcheckCacheIntegrity();
if (LIKELY(writableTail() != nullptr && tailroom() >= min)) {
return std::make_pair(
writableTail(), std::min<uint64_t>(max, tailroom()));
writableTail(), std::min<std::size_t>(max, tailroom()));
}
return preallocateSlow(min, newAllocationSize, max);
......@@ -377,7 +377,7 @@ class IOBufQueue {
* invoke any other non-const methods on this IOBufQueue between
* the call to preallocate and the call to postallocate().
*/
void postallocate(uint64_t n) {
void postallocate(std::size_t n) {
dcheckCacheIntegrity();
DCHECK_LE(
(void*)(cachePtr_->cachedRange.first + n),
......@@ -389,7 +389,7 @@ class IOBufQueue {
* Obtain a writable block of n contiguous bytes, allocating more space
* if necessary, and mark it as used. The caller can fill it later.
*/
void* allocate(uint64_t n) {
void* allocate(std::size_t n) {
void* p = preallocate(n, n).first;
postallocate(n);
return p;
......@@ -525,7 +525,7 @@ class IOBufQueue {
/**
* Calls IOBuf::gather() on the head of the queue, if it exists.
*/
void gather(uint64_t maxLength);
void gather(std::size_t maxLength);
/** Movable */
IOBufQueue(IOBufQueue&&) noexcept;
......@@ -644,8 +644,10 @@ class IOBufQueue {
cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>();
}
std::pair<void*, uint64_t>
preallocateSlow(uint64_t min, uint64_t newAllocationSize, uint64_t max);
std::pair<void*, std::size_t> preallocateSlow(
std::size_t min,
std::size_t newAllocationSize,
std::size_t max);
};
} // namespace folly
......@@ -87,7 +87,7 @@ struct Header {
uint16_t flags; // reserved (must be 0)
uint32_t fileId; // unique file ID
uint32_t dataLength;
uint64_t dataHash;
std::size_t dataHash;
uint32_t headerHash; // must be last
} FOLLY_PACK_ATTR;
FOLLY_PACK_POP
......
......@@ -113,7 +113,7 @@ uint32_t headerHash(const Header& header) {
&header, offsetof(Header, headerHash), kHashSeed);
}
std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) {
std::pair<size_t, std::size_t> dataLengthAndHash(const IOBuf* buf) {
size_t len = 0;
hash::SpookyHashV2 hasher;
hasher.Init(kHashSeed, kHashSeed);
......@@ -127,10 +127,10 @@ std::pair<size_t, uint64_t> dataLengthAndHash(const IOBuf* buf) {
if (len + headerSize() >= std::numeric_limits<uint32_t>::max()) {
throw std::invalid_argument("Record length must fit in 32 bits");
}
return std::make_pair(len, hash1);
return std::make_pair(len, static_cast<std::size_t>(hash1));
}
uint64_t dataHash(ByteRange range) {
std::size_t dataHash(ByteRange range) {
return hash::SpookyHashV2::Hash64(range.data(), range.size(), kHashSeed);
}
......
......@@ -108,7 +108,7 @@ void AsyncPipeReader::handlerReady(uint16_t events) noexcept {
if (bytesRead > 0) {
if (movable) {
ioBuf->append(uint64_t(bytesRead));
ioBuf->append(std::size_t(bytesRead));
readCallback_->readBufferAvailable(std::move(ioBuf));
} else {
readCallback_->readDataAvailable(size_t(bytesRead));
......
......@@ -579,7 +579,7 @@ class AsyncServerSocket : public DelayedDestruction, public AsyncSocketBase {
/**
* Get the number of connections dropped by the AsyncServerSocket
*/
uint64_t getNumDroppedConnections() const {
std::size_t getNumDroppedConnections() const {
return numDroppedConnections_;
}
......@@ -863,7 +863,7 @@ class AsyncServerSocket : public DelayedDestruction, public AsyncSocketBase {
double acceptRateAdjustSpeed_; // 0 to disable auto adjust
double acceptRate_;
std::chrono::time_point<std::chrono::steady_clock> lastAccepTimestamp_;
uint64_t numDroppedConnections_;
std::size_t numDroppedConnections_;
uint32_t callbackIndex_;
BackoffTimeout* backoffTimeout_;
std::vector<CallbackInfo> callbacks_;
......
......@@ -83,7 +83,7 @@ EventBase::EventBase(bool enableTimeMeasurement)
maxLatencyLoopTime_(avgLoopTime_),
enableTimeMeasurement_(enableTimeMeasurement),
nextLoopCnt_(
uint64_t(-40)) // Early wrap-around so bugs will manifest soon
std::size_t(-40)) // Early wrap-around so bugs will manifest soon
,
latestLoopCnt_(nextLoopCnt_),
startWork_(),
......@@ -130,7 +130,7 @@ EventBase::EventBase(event_base* evb, bool enableTimeMeasurement)
maxLatencyLoopTime_(avgLoopTime_),
enableTimeMeasurement_(enableTimeMeasurement),
nextLoopCnt_(
uint64_t(-40)) // Early wrap-around so bugs will manifest soon
std::size_t(-40)) // Early wrap-around so bugs will manifest soon
,
latestLoopCnt_(nextLoopCnt_),
startWork_(),
......
......@@ -580,7 +580,7 @@ class EventBase : private boost::noncopyable,
double value_;
std::chrono::microseconds buffer_time_{0};
std::chrono::microseconds busy_buffer_{0};
uint64_t buffer_cnt_{0};
std::size_t buffer_cnt_{0};
static constexpr std::chrono::milliseconds buffer_interval_{10};
};
......@@ -757,8 +757,8 @@ class EventBase : private boost::noncopyable,
const bool enableTimeMeasurement_;
// Wrap-around loop counter to detect beginning of each loop
uint64_t nextLoopCnt_;
uint64_t latestLoopCnt_;
std::size_t nextLoopCnt_;
std::size_t latestLoopCnt_;
std::chrono::steady_clock::time_point startWork_;
// Prevent undefined behavior from invoking event_base_loop() reentrantly.
// This is needed since many projects use libevent-1.4, which lacks commit
......@@ -783,7 +783,7 @@ class EventBase : private boost::noncopyable,
friend class detail::EventBaseLocalBase;
template <typename T>
friend class EventBaseLocal;
std::unordered_map<uint64_t, std::shared_ptr<void>> localStorage_;
std::unordered_map<std::size_t, std::shared_ptr<void>> localStorage_;
std::unordered_set<detail::EventBaseLocalBaseBase*> localStorageToDtor_;
folly::once_flag virtualEventBaseInitFlag_;
......
......@@ -66,6 +66,6 @@ void EventBaseLocalBase::setVoid(EventBase& evb, std::shared_ptr<void>&& ptr) {
}
}
std::atomic<uint64_t> EventBaseLocalBase::keyCounter_{0};
std::atomic<std::size_t> EventBaseLocalBase::keyCounter_{0};
} // namespace detail
} // namespace folly
......@@ -40,8 +40,8 @@ class EventBaseLocalBase : public EventBaseLocalBaseBase, boost::noncopyable {
void* getVoid(EventBase& evb);
folly::Synchronized<std::unordered_set<EventBase*>> eventBases_;
static std::atomic<uint64_t> keyCounter_;
uint64_t key_{keyCounter_++};
static std::atomic<std::size_t> keyCounter_;
std::size_t key_{keyCounter_++};
};
} // namespace detail
......
......@@ -89,7 +89,7 @@ HHWheelTimer::HHWheelTimer(
count_(0),
startTime_(getCurTime()),
processingCallbacksGuard_(nullptr) {
bitmap_.resize((WHEEL_SIZE / sizeof(uint64_t)) / 8, 0);
bitmap_.resize((WHEEL_SIZE / sizeof(std::size_t)) / 8, 0);
}
HHWheelTimer::~HHWheelTimer() {
......@@ -249,7 +249,7 @@ size_t HHWheelTimer::cancelAll() {
size_t count = 0;
if (count_ != 0) {
const uint64_t numElements = WHEEL_BUCKETS * WHEEL_SIZE;
const std::size_t numElements = WHEEL_BUCKETS * WHEEL_SIZE;
auto maxBuckets = std::min(numElements, count_);
auto buckets = std::make_unique<CallbackList[]>(maxBuckets);
size_t countBuckets = 0;
......
......@@ -249,7 +249,7 @@ class HHWheelTimer : private folly::AsyncTimeout,
/**
* Return the number of currently pending timeouts
*/
uint64_t count() const {
std::size_t count() const {
return count_;
}
......@@ -289,7 +289,7 @@ class HHWheelTimer : private folly::AsyncTimeout,
typedef Callback::List CallbackList;
CallbackList buckets_[WHEEL_BUCKETS][WHEEL_SIZE];
std::vector<uint64_t> bitmap_;
std::vector<std::size_t> bitmap_;
int64_t timeToWheelTicks(std::chrono::milliseconds t) {
return t.count() / interval_.count();
......@@ -298,7 +298,7 @@ class HHWheelTimer : private folly::AsyncTimeout,
bool cascadeTimers(int bucket, int tick);
int64_t lastTick_;
int64_t expireTick_;
uint64_t count_;
std::size_t count_;
std::chrono::steady_clock::time_point startTime_;
int64_t calcNextTick();
......
......@@ -1308,9 +1308,9 @@ TEST(AsyncSSLSocketTest, SSLParseClientHelloMultiplePackets) {
sock->enableClientHelloParsing();
// Test parsing with multiple small packets
for (uint64_t i = 0; i < buf->length(); i += 3) {
for (std::size_t i = 0; i < buf->length(); i += 3) {
auto bufCopy = folly::IOBuf::copyBuffer(
buf->data() + i, std::min((uint64_t)3, buf->length() - i));
buf->data() + i, std::min((std::size_t)3, buf->length() - i));
AsyncSSLSocket::clientHelloParsingCallback(
0,
0,
......
......@@ -1567,7 +1567,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
public:
explicit IdleTimeTimeoutSeries(
EventBase* base,
std::deque<std::uint64_t>& timeout)
std::deque<std::size_t>& timeout)
: AsyncTimeout(base), timeouts_(0), timeout_(timeout) {
scheduleTimeout(1);
}
......@@ -1580,7 +1580,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
if (timeout_.empty()) {
cancelTimeout();
} else {
uint64_t sleepTime = timeout_.front();
std::size_t sleepTime = timeout_.front();
timeout_.pop_front();
if (sleepTime) {
usleep(sleepTime);
......@@ -1595,7 +1595,7 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
private:
int timeouts_;
std::deque<uint64_t>& timeout_;
std::deque<std::size_t>& timeout_;
};
/**
......@@ -1609,11 +1609,11 @@ class IdleTimeTimeoutSeries : public AsyncTimeout {
*/
TEST(EventBaseTest, IdleTime) {
EventBase eventBase;
std::deque<uint64_t> timeouts0(4, 8080);
std::deque<std::size_t> timeouts0(4, 8080);
timeouts0.push_front(8000);
timeouts0.push_back(14000);
IdleTimeTimeoutSeries tos0(&eventBase, timeouts0);
std::deque<uint64_t> timeouts(20, 20);
std::deque<std::size_t> timeouts(20, 20);
std::unique_ptr<IdleTimeTimeoutSeries> tos;
bool hostOverloaded = false;
......
......@@ -177,7 +177,7 @@ TEST(IOBufQueue, SplitZero) {
TEST(IOBufQueue, Preallocate) {
IOBufQueue queue(clOptions);
queue.append(string("Hello"));
pair<void*, uint64_t> writable = queue.preallocate(2, 64, 64);
pair<void*, std::size_t> writable = queue.preallocate(2, 64, 64);
checkConsistency(queue);
EXPECT_NE((void*)nullptr, writable.first);
EXPECT_LE(2, writable.second);
......
......@@ -391,7 +391,7 @@ TEST(IOBuf, Chaining) {
EXPECT_TRUE(iob4ptr->isChained());
EXPECT_TRUE(iob5ptr->isChained());
uint64_t fullLength =
std::size_t fullLength =
(iob1->length() + iob2ptr->length() + iob3ptr->length() +
iob4ptr->length() + iob5ptr->length());
EXPECT_EQ(5, iob1->countChainElements());
......@@ -828,16 +828,16 @@ TEST(IOBuf, Alignment) {
TEST(TypedIOBuf, Simple) {
auto buf = IOBuf::create(0);
TypedIOBuf<uint64_t> typed(buf.get());
const uint64_t n = 10000;
TypedIOBuf<std::size_t> typed(buf.get());
const std::size_t n = 10000;
typed.reserve(0, n);
EXPECT_LE(n, typed.capacity());
for (uint64_t i = 0; i < n; i++) {
for (std::size_t i = 0; i < n; i++) {
*typed.writableTail() = i;
typed.append(1);
}
EXPECT_EQ(n, typed.length());
for (uint64_t i = 0; i < n; i++) {
for (std::size_t i = 0; i < n; i++) {
EXPECT_EQ(i, typed.data()[i]);
}
}
......@@ -1397,7 +1397,7 @@ TEST(IOBuf, CloneCoalescedChain) {
boost::mt19937 gen(fillSeed);
{
auto c = b.get();
uint64_t length = c->tailroom();
std::size_t length = c->tailroom();
do {
length = std::min(length, c->tailroom());
c->append(length--);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment