Commit 52fceaf4 authored by Nicholas Ormrod's avatar Nicholas Ormrod Committed by Dave Watson

NULL -> nullptr

Summary:
Change NULLs to nullptrs.

Facebook:
I was tired of seeing lint errors for NULL -> nullptr, so I
fixed it.

How: modified flint's tokenizer to output lines of the form

sed -i '__LINE__s|\<NULL\>|nullptr|g' __FILE__

for each NULL token encountered.

Ran the sed lines, restricted to filepaths with extensions
h,hpp,cc,cpp,tcc. Did not apply to FacebookUpdate, due to weird
formatting; hphp, to be done in a separate diff; payment, due to a large
number of generated text which didn't play well with my flow; and
memcache, which has lots of .h's that cannot contain nullptr's because
they are included from .c's.

@bypass-lint

Test Plan:
fbconfig -r common folly && fbmake opt && fbmake runtests_opt
^ of the 4k+ test cases, about two dozen are failing. Slightly more
failed in master than with my diff applied.

arc unit

Reviewed By: andrei.alexandrescu@fb.com

FB internal diff: D1234261
parent 7154531b
...@@ -267,7 +267,7 @@ static const ScaleInfo kTimeSuffixes[] { ...@@ -267,7 +267,7 @@ static const ScaleInfo kTimeSuffixes[] {
{ 1E-9, "ns" }, { 1E-9, "ns" },
{ 1E-12, "ps" }, { 1E-12, "ps" },
{ 1E-15, "fs" }, { 1E-15, "fs" },
{ 0, NULL }, { 0, nullptr },
}; };
static const ScaleInfo kMetricSuffixes[] { static const ScaleInfo kMetricSuffixes[] {
...@@ -289,7 +289,7 @@ static const ScaleInfo kMetricSuffixes[] { ...@@ -289,7 +289,7 @@ static const ScaleInfo kMetricSuffixes[] {
{ 1E-18, "a" }, // atto { 1E-18, "a" }, // atto
{ 1E-21, "z" }, // zepto { 1E-21, "z" }, // zepto
{ 1E-24, "y" }, // yocto { 1E-24, "y" }, // yocto
{ 0, NULL }, { 0, nullptr },
}; };
static string humanReadable(double n, unsigned int decimals, static string humanReadable(double n, unsigned int decimals,
...@@ -300,7 +300,7 @@ static string humanReadable(double n, unsigned int decimals, ...@@ -300,7 +300,7 @@ static string humanReadable(double n, unsigned int decimals,
const double absValue = fabs(n); const double absValue = fabs(n);
const ScaleInfo* scale = scales; const ScaleInfo* scale = scales;
while (absValue < scale[0].boundary && scale[1].suffix != NULL) { while (absValue < scale[0].boundary && scale[1].suffix != nullptr) {
++scale; ++scale;
} }
......
...@@ -804,17 +804,17 @@ class ConcurrentSkipList<T, Comp, MAX_HEIGHT>::Skipper { ...@@ -804,17 +804,17 @@ class ConcurrentSkipList<T, Comp, MAX_HEIGHT>::Skipper {
} }
const value_type &data() const { const value_type &data() const {
DCHECK(succs_[0] != NULL); DCHECK(succs_[0] != nullptr);
return succs_[0]->data(); return succs_[0]->data();
} }
value_type &operator *() const { value_type &operator *() const {
DCHECK(succs_[0] != NULL); DCHECK(succs_[0] != nullptr);
return succs_[0]->data(); return succs_[0]->data();
} }
value_type *operator->() { value_type *operator->() {
DCHECK(succs_[0] != NULL); DCHECK(succs_[0] != nullptr);
return &succs_[0]->data(); return &succs_[0]->data();
} }
...@@ -839,7 +839,7 @@ class ConcurrentSkipList<T, Comp, MAX_HEIGHT>::Skipper { ...@@ -839,7 +839,7 @@ class ConcurrentSkipList<T, Comp, MAX_HEIGHT>::Skipper {
findInsertionPoint(preds_[lyr], lyr, data, preds_, succs_); findInsertionPoint(preds_[lyr], lyr, data, preds_, succs_);
if (foundLayer < 0) return false; if (foundLayer < 0) return false;
DCHECK(succs_[0] != NULL) << "lyr=" << lyr << "; max_layer=" << max_layer; DCHECK(succs_[0] != nullptr) << "lyr=" << lyr << "; max_layer=" << max_layer;
return !succs_[0]->markedForRemoval(); return !succs_[0]->markedForRemoval();
} }
......
...@@ -2358,7 +2358,7 @@ getline( ...@@ -2358,7 +2358,7 @@ getline(
basic_fbstring<E, T, A, S>& str, basic_fbstring<E, T, A, S>& str,
typename basic_fbstring<E, T, A, S>::value_type delim) { typename basic_fbstring<E, T, A, S>::value_type delim) {
// Use the nonstandard getdelim() // Use the nonstandard getdelim()
char * buf = NULL; char * buf = nullptr;
size_t size = 0; size_t size = 0;
for (;;) { for (;;) {
// This looks quadratic but it really depends on realloc // This looks quadratic but it really depends on realloc
......
...@@ -1109,7 +1109,7 @@ public: ...@@ -1109,7 +1109,7 @@ public:
void* p = impl_.b_; void* p = impl_.b_;
if ((rallocm && usingStdAllocator::value) && if ((rallocm && usingStdAllocator::value) &&
newCapacityBytes >= folly::jemallocMinInPlaceExpandable && newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE) rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) { == ALLOCM_SUCCESS) {
impl_.z_ += newCap - oldCap; impl_.z_ += newCap - oldCap;
} else { } else {
...@@ -1144,7 +1144,7 @@ private: ...@@ -1144,7 +1144,7 @@ private:
auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T)); auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
void* p = impl_.b_; void* p = impl_.b_;
if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE) if (rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) { == ALLOCM_SUCCESS) {
impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T); impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
return true; return true;
......
...@@ -27,7 +27,7 @@ bool usingJEMallocSlow() { ...@@ -27,7 +27,7 @@ bool usingJEMallocSlow() {
// Some platforms (*cough* OSX *cough*) require weak symbol checks to be // Some platforms (*cough* OSX *cough*) require weak symbol checks to be
// in the form if (mallctl != NULL). Not if (mallctl) or if (!mallctl) (!!). // in the form if (mallctl != NULL). Not if (mallctl) or if (!mallctl) (!!).
// http://goo.gl/xpmctm // http://goo.gl/xpmctm
if (allocm == NULL || rallocm == NULL || mallctl == NULL) { if (allocm == nullptr || rallocm == nullptr || mallctl == nullptr) {
return false; return false;
} }
......
...@@ -208,7 +208,7 @@ inline void* smartRealloc(void* p, ...@@ -208,7 +208,7 @@ inline void* smartRealloc(void* p,
// using jemalloc's API. Don't forget that jemalloc can never grow // using jemalloc's API. Don't forget that jemalloc can never grow
// in place blocks smaller than 4096 bytes. // in place blocks smaller than 4096 bytes.
if (currentCapacity >= jemallocMinInPlaceExpandable && if (currentCapacity >= jemallocMinInPlaceExpandable &&
rallocm(&p, NULL, newCapacity, 0, ALLOCM_NO_MOVE) == ALLOCM_SUCCESS) { rallocm(&p, nullptr, newCapacity, 0, ALLOCM_NO_MOVE) == ALLOCM_SUCCESS) {
// Managed to expand in place // Managed to expand in place
return p; return p;
} }
......
...@@ -35,7 +35,7 @@ std::atomic<uint32_t> seedInput(0); ...@@ -35,7 +35,7 @@ std::atomic<uint32_t> seedInput(0);
uint32_t randomNumberSeed() { uint32_t randomNumberSeed() {
struct timeval tv; struct timeval tv;
gettimeofday(&tv, NULL); gettimeofday(&tv, nullptr);
const uint32_t kPrime0 = 51551; const uint32_t kPrime0 = 51551;
const uint32_t kPrime1 = 61631; const uint32_t kPrime1 = 61631;
const uint32_t kPrime2 = 64997; const uint32_t kPrime2 = 64997;
......
...@@ -80,7 +80,7 @@ namespace detail { ...@@ -80,7 +80,7 @@ namespace detail {
* linux this varies by kernel version from 1ms to 10ms). * linux this varies by kernel version from 1ms to 10ms).
*/ */
struct timespec ts = { 0, 500000 }; struct timespec ts = { 0, 500000 };
nanosleep(&ts, NULL); nanosleep(&ts, nullptr);
} }
} }
}; };
......
...@@ -315,7 +315,7 @@ struct Synchronized { ...@@ -315,7 +315,7 @@ struct Synchronized {
return; return;
} }
// Could not acquire the resource, pointer is null // Could not acquire the resource, pointer is null
parent_ = NULL; parent_ = nullptr;
} }
/** /**
...@@ -363,7 +363,7 @@ struct Synchronized { ...@@ -363,7 +363,7 @@ struct Synchronized {
* SYNCHRONIZED below. * SYNCHRONIZED below.
*/ */
T* operator->() { T* operator->() {
return parent_ ? &parent_->datum_ : NULL; return parent_ ? &parent_->datum_ : nullptr;
} }
/** /**
...@@ -433,7 +433,7 @@ struct Synchronized { ...@@ -433,7 +433,7 @@ struct Synchronized {
return; return;
} }
// Could not acquire the resource, pointer is null // Could not acquire the resource, pointer is null
parent_ = NULL; parent_ = nullptr;
} }
ConstLockedPtr& operator=(const ConstLockedPtr& rhs) { ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
...@@ -449,7 +449,7 @@ struct Synchronized { ...@@ -449,7 +449,7 @@ struct Synchronized {
} }
const T* operator->() const { const T* operator->() const {
return parent_ ? &parent_->datum_ : NULL; return parent_ ? &parent_->datum_ : nullptr;
} }
struct Unsynchronizer { struct Unsynchronizer {
......
...@@ -48,7 +48,7 @@ class ThreadCachedInt : boost::noncopyable { ...@@ -48,7 +48,7 @@ class ThreadCachedInt : boost::noncopyable {
void increment(IntT inc) { void increment(IntT inc) {
auto cache = cache_.get(); auto cache = cache_.get();
if (UNLIKELY(cache == NULL || cache->parent_ == NULL)) { if (UNLIKELY(cache == nullptr || cache->parent_ == nullptr)) {
cache = new IntCache(*this); cache = new IntCache(*this);
cache_.reset(cache); cache_.reset(cache);
} }
...@@ -122,7 +122,7 @@ class ThreadCachedInt : boost::noncopyable { ...@@ -122,7 +122,7 @@ class ThreadCachedInt : boost::noncopyable {
// need to make sure we signal that this parent is dead. // need to make sure we signal that this parent is dead.
~ThreadCachedInt() { ~ThreadCachedInt() {
for (auto& cache : cache_.accessAllThreads()) { for (auto& cache : cache_.accessAllThreads()) {
cache.parent_ = NULL; cache.parent_ = nullptr;
} }
} }
......
...@@ -74,22 +74,22 @@ class CustomDeleter : public DeleterBase { ...@@ -74,22 +74,22 @@ class CustomDeleter : public DeleterBase {
*/ */
struct ElementWrapper { struct ElementWrapper {
void dispose(TLPDestructionMode mode) { void dispose(TLPDestructionMode mode) {
if (ptr != NULL) { if (ptr != nullptr) {
DCHECK(deleter != NULL); DCHECK(deleter != nullptr);
deleter->dispose(ptr, mode); deleter->dispose(ptr, mode);
if (ownsDeleter) { if (ownsDeleter) {
delete deleter; delete deleter;
} }
ptr = NULL; ptr = nullptr;
deleter = NULL; deleter = nullptr;
ownsDeleter = false; ownsDeleter = false;
} }
} }
template <class Ptr> template <class Ptr>
void set(Ptr p) { void set(Ptr p) {
DCHECK(ptr == NULL); DCHECK(ptr == nullptr);
DCHECK(deleter == NULL); DCHECK(deleter == nullptr);
if (p) { if (p) {
// We leak a single object here but that is ok. If we used an // We leak a single object here but that is ok. If we used an
...@@ -105,8 +105,8 @@ struct ElementWrapper { ...@@ -105,8 +105,8 @@ struct ElementWrapper {
template <class Ptr, class Deleter> template <class Ptr, class Deleter>
void set(Ptr p, Deleter d) { void set(Ptr p, Deleter d) {
DCHECK(ptr == NULL); DCHECK(ptr == nullptr);
DCHECK(deleter == NULL); DCHECK(deleter == nullptr);
if (p) { if (p) {
ptr = p; ptr = p;
deleter = new CustomDeleter<Ptr,Deleter>(d); deleter = new CustomDeleter<Ptr,Deleter>(d);
...@@ -241,8 +241,8 @@ struct StaticMeta { ...@@ -241,8 +241,8 @@ struct StaticMeta {
threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD); threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD);
} }
free(threadEntry->elements); free(threadEntry->elements);
threadEntry->elements = NULL; threadEntry->elements = nullptr;
pthread_setspecific(meta.pthreadKey_, NULL); pthread_setspecific(meta.pthreadKey_, nullptr);
#if __APPLE__ #if __APPLE__
// Allocated in getThreadEntry(); free it // Allocated in getThreadEntry(); free it
......
...@@ -114,8 +114,8 @@ struct IOBuf::HeapFullStorage { ...@@ -114,8 +114,8 @@ struct IOBuf::HeapFullStorage {
}; };
IOBuf::SharedInfo::SharedInfo() IOBuf::SharedInfo::SharedInfo()
: freeFn(NULL), : freeFn(nullptr),
userData(NULL) { userData(nullptr) {
// Use relaxed memory ordering here. Since we are creating a new SharedInfo, // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
// no other threads should be referring to it yet. // no other threads should be referring to it yet.
refcount.store(1, std::memory_order_relaxed); refcount.store(1, std::memory_order_relaxed);
...@@ -801,7 +801,7 @@ void IOBuf::allocExtBuffer(uint64_t minCapacity, ...@@ -801,7 +801,7 @@ void IOBuf::allocExtBuffer(uint64_t minCapacity,
uint64_t* capacityReturn) { uint64_t* capacityReturn) {
size_t mallocSize = goodExtBufferSize(minCapacity); size_t mallocSize = goodExtBufferSize(minCapacity);
uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize)); uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
if (UNLIKELY(buf == NULL)) { if (UNLIKELY(buf == nullptr)) {
throw std::bad_alloc(); throw std::bad_alloc();
} }
initExtBuffer(buf, mallocSize, infoReturn, capacityReturn); initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
......
...@@ -63,14 +63,14 @@ void checkConsistency(const IOBufQueue& queue) { ...@@ -63,14 +63,14 @@ void checkConsistency(const IOBufQueue& queue) {
TEST(IOBufQueue, Simple) { TEST(IOBufQueue, Simple) {
IOBufQueue queue(clOptions); IOBufQueue queue(clOptions);
EXPECT_EQ(NULL, queue.front()); EXPECT_EQ(nullptr, queue.front());
queue.append(SCL("")); queue.append(SCL(""));
EXPECT_EQ(NULL, queue.front()); EXPECT_EQ(nullptr, queue.front());
queue.append(unique_ptr<IOBuf>()); queue.append(unique_ptr<IOBuf>());
EXPECT_EQ(NULL, queue.front()); EXPECT_EQ(nullptr, queue.front());
string emptyString; string emptyString;
queue.append(emptyString); queue.append(emptyString);
EXPECT_EQ(NULL, queue.front()); EXPECT_EQ(nullptr, queue.front());
} }
TEST(IOBufQueue, Append) { TEST(IOBufQueue, Append) {
...@@ -85,9 +85,9 @@ TEST(IOBufQueue, Append) { ...@@ -85,9 +85,9 @@ TEST(IOBufQueue, Append) {
checkConsistency(queue); checkConsistency(queue);
checkConsistency(queue2); checkConsistency(queue2);
const IOBuf* chain = queue.front(); const IOBuf* chain = queue.front();
EXPECT_NE((IOBuf*)NULL, chain); EXPECT_NE((IOBuf*)nullptr, chain);
EXPECT_EQ(12, chain->computeChainDataLength()); EXPECT_EQ(12, chain->computeChainDataLength());
EXPECT_EQ(NULL, queue2.front()); EXPECT_EQ(nullptr, queue2.front());
} }
TEST(IOBufQueue, Append2) { TEST(IOBufQueue, Append2) {
...@@ -102,9 +102,9 @@ TEST(IOBufQueue, Append2) { ...@@ -102,9 +102,9 @@ TEST(IOBufQueue, Append2) {
checkConsistency(queue); checkConsistency(queue);
checkConsistency(queue2); checkConsistency(queue2);
const IOBuf* chain = queue.front(); const IOBuf* chain = queue.front();
EXPECT_NE((IOBuf*)NULL, chain); EXPECT_NE((IOBuf*)nullptr, chain);
EXPECT_EQ(12, chain->computeChainDataLength()); EXPECT_EQ(12, chain->computeChainDataLength());
EXPECT_EQ(NULL, queue2.front()); EXPECT_EQ(nullptr, queue2.front());
} }
TEST(IOBufQueue, Split) { TEST(IOBufQueue, Split) {
...@@ -136,7 +136,7 @@ TEST(IOBufQueue, Split) { ...@@ -136,7 +136,7 @@ TEST(IOBufQueue, Split) {
prefix = queue.split(5); prefix = queue.split(5);
checkConsistency(queue); checkConsistency(queue);
EXPECT_EQ(5, prefix->computeChainDataLength()); EXPECT_EQ(5, prefix->computeChainDataLength());
EXPECT_EQ((IOBuf*)NULL, queue.front()); EXPECT_EQ((IOBuf*)nullptr, queue.front());
queue.append(stringToIOBuf(SCL("Hello,"))); queue.append(stringToIOBuf(SCL("Hello,")));
queue.append(stringToIOBuf(SCL(" World"))); queue.append(stringToIOBuf(SCL(" World")));
...@@ -151,7 +151,7 @@ TEST(IOBufQueue, Preallocate) { ...@@ -151,7 +151,7 @@ TEST(IOBufQueue, Preallocate) {
queue.append(string("Hello")); queue.append(string("Hello"));
pair<void*,uint32_t> writable = queue.preallocate(2, 64, 64); pair<void*,uint32_t> writable = queue.preallocate(2, 64, 64);
checkConsistency(queue); checkConsistency(queue);
EXPECT_NE((void*)NULL, writable.first); EXPECT_NE((void*)nullptr, writable.first);
EXPECT_LE(2, writable.second); EXPECT_LE(2, writable.second);
EXPECT_GE(64, writable.second); EXPECT_GE(64, writable.second);
memcpy(writable.first, SCL(", ")); memcpy(writable.first, SCL(", "));
...@@ -238,7 +238,7 @@ TEST(IOBufQueue, Trim) { ...@@ -238,7 +238,7 @@ TEST(IOBufQueue, Trim) {
queue.trimEnd(1); queue.trimEnd(1);
checkConsistency(queue); checkConsistency(queue);
EXPECT_EQ(NULL, queue.front()); EXPECT_EQ(nullptr, queue.front());
EXPECT_THROW(queue.trimStart(2), std::underflow_error); EXPECT_THROW(queue.trimStart(2), std::underflow_error);
checkConsistency(queue); checkConsistency(queue);
...@@ -296,7 +296,7 @@ TEST(IOBufQueue, TrimPack) { ...@@ -296,7 +296,7 @@ TEST(IOBufQueue, TrimPack) {
queue.trimEnd(1); queue.trimEnd(1);
checkConsistency(queue); checkConsistency(queue);
EXPECT_EQ(NULL, queue.front()); EXPECT_EQ(nullptr, queue.front());
EXPECT_THROW(queue.trimStart(2), std::underflow_error); EXPECT_THROW(queue.trimStart(2), std::underflow_error);
checkConsistency(queue); checkConsistency(queue);
...@@ -354,12 +354,12 @@ TEST(IOBufQueue, PopFirst) { ...@@ -354,12 +354,12 @@ TEST(IOBufQueue, PopFirst) {
checkConsistency(queue); checkConsistency(queue);
EXPECT_EQ(chainLength, queue.chainLength()); EXPECT_EQ(chainLength, queue.chainLength());
EXPECT_EQ((IOBuf*)NULL, queue.front()); EXPECT_EQ((IOBuf*)nullptr, queue.front());
first = queue.pop_front(); first = queue.pop_front();
EXPECT_EQ((IOBuf*)NULL, first.get()); EXPECT_EQ((IOBuf*)nullptr, first.get());
checkConsistency(queue); checkConsistency(queue);
EXPECT_EQ((IOBuf*)NULL, queue.front()); EXPECT_EQ((IOBuf*)nullptr, queue.front());
EXPECT_EQ(0, queue.chainLength()); EXPECT_EQ(0, queue.chainLength());
} }
......
...@@ -966,7 +966,7 @@ private: ...@@ -966,7 +966,7 @@ private:
doConstruct(n, val); doConstruct(n, val);
} }
void makeSize(size_type size, value_type* v = NULL) { void makeSize(size_type size, value_type* v = nullptr) {
makeSize(size, v, size - 1); makeSize(size, v, size - 1);
} }
...@@ -1009,7 +1009,7 @@ private: ...@@ -1009,7 +1009,7 @@ private:
detail::shiftPointer(newh, kHeapifyCapacitySize) : detail::shiftPointer(newh, kHeapifyCapacitySize) :
newh); newh);
if (v != NULL) { if (v != nullptr) {
// move new element // move new element
try { try {
new (&newp[pos]) value_type(std::move(*v)); new (&newp[pos]) value_type(std::move(*v));
......
...@@ -161,8 +161,8 @@ class HistogramBuckets { ...@@ -161,8 +161,8 @@ class HistogramBuckets {
template <typename CountFn> template <typename CountFn>
unsigned int getPercentileBucketIdx(double pct, unsigned int getPercentileBucketIdx(double pct,
CountFn countFromBucket, CountFn countFromBucket,
double* lowPct = NULL, double* lowPct = nullptr,
double* highPct = NULL) const; double* highPct = nullptr) const;
/** /**
* Estimate the value at the specified percentile. * Estimate the value at the specified percentile.
...@@ -357,8 +357,8 @@ class Histogram { ...@@ -357,8 +357,8 @@ class Histogram {
* returned in the lowPct and highPct arguments, if they are non-NULL. * returned in the lowPct and highPct arguments, if they are non-NULL.
*/ */
unsigned int getPercentileBucketIdx(double pct, unsigned int getPercentileBucketIdx(double pct,
double* lowPct = NULL, double* lowPct = nullptr,
double* highPct = NULL) const { double* highPct = nullptr) const {
// We unfortunately can't use lambdas here yet; // We unfortunately can't use lambdas here yet;
// Some users of this code are still built with gcc-4.4. // Some users of this code are still built with gcc-4.4.
CountFromBucket countFn; CountFromBucket countFn;
......
...@@ -346,7 +346,7 @@ void* insertThread(void* jj) { ...@@ -346,7 +346,7 @@ void* insertThread(void* jj) {
KeyT key = randomizeKey(i + j * numOpsPerThread); KeyT key = randomizeKey(i + j * numOpsPerThread);
globalAHM->insert(key, genVal(key)); globalAHM->insert(key, genVal(key));
} }
return NULL; return nullptr;
} }
void* insertThreadArr(void* jj) { void* insertThreadArr(void* jj) {
...@@ -355,7 +355,7 @@ void* insertThreadArr(void* jj) { ...@@ -355,7 +355,7 @@ void* insertThreadArr(void* jj) {
KeyT key = randomizeKey(i + j * numOpsPerThread); KeyT key = randomizeKey(i + j * numOpsPerThread);
globalAHA->insert(std::make_pair(key, genVal(key))); globalAHA->insert(std::make_pair(key, genVal(key)));
} }
return NULL; return nullptr;
} }
std::atomic<bool> runThreadsCreatedAllThreads; std::atomic<bool> runThreadsCreatedAllThreads;
...@@ -365,7 +365,7 @@ void runThreads(void *(*thread)(void*), int numThreads, void **statuses) { ...@@ -365,7 +365,7 @@ void runThreads(void *(*thread)(void*), int numThreads, void **statuses) {
vector<pthread_t> threadIds; vector<pthread_t> threadIds;
for (int64_t j = 0; j < numThreads; j++) { for (int64_t j = 0; j < numThreads; j++) {
pthread_t tid; pthread_t tid;
if (pthread_create(&tid, NULL, thread, (void*) j) != 0) { if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
LOG(ERROR) << "Could not start thread"; LOG(ERROR) << "Could not start thread";
} else { } else {
threadIds.push_back(tid); threadIds.push_back(tid);
...@@ -375,12 +375,12 @@ void runThreads(void *(*thread)(void*), int numThreads, void **statuses) { ...@@ -375,12 +375,12 @@ void runThreads(void *(*thread)(void*), int numThreads, void **statuses) {
runThreadsCreatedAllThreads.store(true); runThreadsCreatedAllThreads.store(true);
for (int i = 0; i < threadIds.size(); ++i) { for (int i = 0; i < threadIds.size(); ++i) {
pthread_join(threadIds[i], statuses == NULL ? NULL : &statuses[i]); pthread_join(threadIds[i], statuses == nullptr ? nullptr : &statuses[i]);
} }
} }
void runThreads(void *(*thread)(void*)) { void runThreads(void *(*thread)(void*)) {
runThreads(thread, FLAGS_numThreads, NULL); runThreads(thread, FLAGS_numThreads, nullptr);
} }
} }
...@@ -462,10 +462,10 @@ void* raceIterateThread(void* jj) { ...@@ -462,10 +462,10 @@ void* raceIterateThread(void* jj) {
++count; ++count;
if (count > raceFinalSizeEstimate) { if (count > raceFinalSizeEstimate) {
EXPECT_FALSE("Infinite loop in iterator."); EXPECT_FALSE("Infinite loop in iterator.");
return NULL; return nullptr;
} }
} }
return NULL; return nullptr;
} }
void* raceInsertRandomThread(void* jj) { void* raceInsertRandomThread(void* jj) {
...@@ -474,7 +474,7 @@ void* raceInsertRandomThread(void* jj) { ...@@ -474,7 +474,7 @@ void* raceInsertRandomThread(void* jj) {
KeyT key = rand(); KeyT key = rand();
globalAHM->insert(key, genVal(key)); globalAHM->insert(key, genVal(key));
} }
return NULL; return nullptr;
} }
} }
...@@ -496,14 +496,14 @@ TEST(Ahm, race_insert_iterate_thread_test) { ...@@ -496,14 +496,14 @@ TEST(Ahm, race_insert_iterate_thread_test) {
pthread_t tid; pthread_t tid;
void *(*thread)(void*) = void *(*thread)(void*) =
(j < kInsertThreads ? raceInsertRandomThread : raceIterateThread); (j < kInsertThreads ? raceInsertRandomThread : raceIterateThread);
if (pthread_create(&tid, NULL, thread, (void*) j) != 0) { if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
LOG(ERROR) << "Could not start thread"; LOG(ERROR) << "Could not start thread";
} else { } else {
threadIds.push_back(tid); threadIds.push_back(tid);
} }
} }
for (int i = 0; i < threadIds.size(); ++i) { for (int i = 0; i < threadIds.size(); ++i) {
pthread_join(threadIds[i], NULL); pthread_join(threadIds[i], nullptr);
} }
VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps"; VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps";
VLOG(1) << "Final size of map " << globalAHM->size(); VLOG(1) << "Final size of map " << globalAHM->size();
...@@ -521,7 +521,7 @@ void* testEraseInsertThread(void*) { ...@@ -521,7 +521,7 @@ void* testEraseInsertThread(void*) {
insertedLevel.store(i, std::memory_order_release); insertedLevel.store(i, std::memory_order_release);
} }
insertedLevel.store(kTestEraseInsertions, std::memory_order_release); insertedLevel.store(kTestEraseInsertions, std::memory_order_release);
return NULL; return nullptr;
} }
void* testEraseEraseThread(void*) { void* testEraseEraseThread(void*) {
...@@ -551,7 +551,7 @@ void* testEraseEraseThread(void*) { ...@@ -551,7 +551,7 @@ void* testEraseEraseThread(void*) {
} }
} }
} }
return NULL; return nullptr;
} }
} }
...@@ -572,14 +572,14 @@ TEST(Ahm, thread_erase_insert_race) { ...@@ -572,14 +572,14 @@ TEST(Ahm, thread_erase_insert_race) {
pthread_t tid; pthread_t tid;
void *(*thread)(void*) = void *(*thread)(void*) =
(j < kInsertThreads ? testEraseInsertThread : testEraseEraseThread); (j < kInsertThreads ? testEraseInsertThread : testEraseEraseThread);
if (pthread_create(&tid, NULL, thread, (void*) j) != 0) { if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
LOG(ERROR) << "Could not start thread"; LOG(ERROR) << "Could not start thread";
} else { } else {
threadIds.push_back(tid); threadIds.push_back(tid);
} }
} }
for (int i = 0; i < threadIds.size(); i++) { for (int i = 0; i < threadIds.size(); i++) {
pthread_join(threadIds[i], NULL); pthread_join(threadIds[i], nullptr);
} }
EXPECT_TRUE(globalAHM->empty()); EXPECT_TRUE(globalAHM->empty());
......
...@@ -104,8 +104,8 @@ TEST(ConcurrentSkipList, SequentialAccess) { ...@@ -104,8 +104,8 @@ TEST(ConcurrentSkipList, SequentialAccess) {
LOG(INFO) << "nodetype size=" << sizeof(SkipListNodeType); LOG(INFO) << "nodetype size=" << sizeof(SkipListNodeType);
auto skipList(SkipListType::create(kHeadHeight)); auto skipList(SkipListType::create(kHeadHeight));
EXPECT_TRUE(skipList.first() == NULL); EXPECT_TRUE(skipList.first() == nullptr);
EXPECT_TRUE(skipList.last() == NULL); EXPECT_TRUE(skipList.last() == nullptr);
skipList.add(3); skipList.add(3);
EXPECT_TRUE(skipList.contains(3)); EXPECT_TRUE(skipList.contains(3));
......
...@@ -420,9 +420,9 @@ void testVariadicToDelim() { ...@@ -420,9 +420,9 @@ void testVariadicToDelim() {
} }
TEST(Conv, NullString) { TEST(Conv, NullString) {
string s1 = to<string>((char *) NULL); string s1 = to<string>((char *) nullptr);
EXPECT_TRUE(s1.empty()); EXPECT_TRUE(s1.empty());
fbstring s2 = to<fbstring>((char *) NULL); fbstring s2 = to<fbstring>((char *) nullptr);
EXPECT_TRUE(s2.empty()); EXPECT_TRUE(s2.empty());
} }
......
...@@ -44,7 +44,7 @@ TEST(DiscriminatedPtr, Basic) { ...@@ -44,7 +44,7 @@ TEST(DiscriminatedPtr, Basic) {
EXPECT_EQ(&a, static_cast<const Ptr&>(p).get_nothrow<int>()); EXPECT_EQ(&a, static_cast<const Ptr&>(p).get_nothrow<int>());
EXPECT_EQ(&a, p.get<int>()); EXPECT_EQ(&a, p.get<int>());
EXPECT_EQ(&a, static_cast<const Ptr&>(p).get<int>()); EXPECT_EQ(&a, static_cast<const Ptr&>(p).get<int>());
EXPECT_EQ(static_cast<void*>(NULL), p.get_nothrow<void>()); EXPECT_EQ(static_cast<void*>(nullptr), p.get_nothrow<void>());
EXPECT_THROW({p.get<void>();}, std::invalid_argument); EXPECT_THROW({p.get<void>();}, std::invalid_argument);
Foo foo; Foo foo;
...@@ -55,7 +55,7 @@ TEST(DiscriminatedPtr, Basic) { ...@@ -55,7 +55,7 @@ TEST(DiscriminatedPtr, Basic) {
EXPECT_TRUE(p.hasType<Foo>()); EXPECT_TRUE(p.hasType<Foo>());
EXPECT_FALSE(p.hasType<Bar>()); EXPECT_FALSE(p.hasType<Bar>());
EXPECT_EQ(static_cast<int*>(NULL), p.get_nothrow<int>()); EXPECT_EQ(static_cast<int*>(nullptr), p.get_nothrow<int>());
p.clear(); p.clear();
EXPECT_TRUE(p.empty()); EXPECT_TRUE(p.empty());
......
...@@ -169,7 +169,7 @@ TEST(ThreadLocal, SimpleRepeatDestructor) { ...@@ -169,7 +169,7 @@ TEST(ThreadLocal, SimpleRepeatDestructor) {
TEST(ThreadLocal, InterleavedDestructors) { TEST(ThreadLocal, InterleavedDestructors) {
Widget::totalVal_ = 0; Widget::totalVal_ = 0;
ThreadLocal<Widget>* w = NULL; ThreadLocal<Widget>* w = nullptr;
int wVersion = 0; int wVersion = 0;
const int wVersionMax = 2; const int wVersionMax = 2;
int thIter = 0; int thIter = 0;
......
...@@ -267,7 +267,7 @@ public: ...@@ -267,7 +267,7 @@ public:
typedef std::reverse_iterator<const_iterator> const_reverse_iterator; typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
// 23.3.6.1 construct/copy/destroy: // 23.3.6.1 construct/copy/destroy:
fbvector() : b_(NULL), e_(NULL), z_(NULL) {} fbvector() : b_(nullptr), e_(nullptr), z_(nullptr) {}
explicit fbvector(const Allocator&) { explicit fbvector(const Allocator&) {
new(this) fbvector; new(this) fbvector;
...@@ -566,7 +566,7 @@ private: ...@@ -566,7 +566,7 @@ private:
auto const newCapacityBytes = goodMallocSize(n * sizeof(T)); auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
void* p = b_; void* p = b_;
if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE) if (rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
!= ALLOCM_SUCCESS) { != ALLOCM_SUCCESS) {
return false; return false;
} }
...@@ -609,7 +609,7 @@ public: ...@@ -609,7 +609,7 @@ public:
auto const crtCapacityBytes = capacity() * sizeof(T); auto const crtCapacityBytes = capacity() * sizeof(T);
auto const newCapacityBytes = goodMallocSize(size() * sizeof(T)); auto const newCapacityBytes = goodMallocSize(size() * sizeof(T));
if (crtCapacityBytes >= jemallocMinInPlaceExpandable && if (crtCapacityBytes >= jemallocMinInPlaceExpandable &&
rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE) rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) { == ALLOCM_SUCCESS) {
// Celebrate // Celebrate
z_ = b_ + newCapacityBytes / sizeof(T); z_ = b_ + newCapacityBytes / sizeof(T);
...@@ -697,7 +697,7 @@ private: ...@@ -697,7 +697,7 @@ private:
if (capBytes < jemallocMinInPlaceExpandable) return false; if (capBytes < jemallocMinInPlaceExpandable) return false;
auto const newCapBytes = goodMallocSize(capBytes + sizeof(T)); auto const newCapBytes = goodMallocSize(capBytes + sizeof(T));
void * bv = b_; void * bv = b_;
if (rallocm(&bv, NULL, newCapBytes, 0, ALLOCM_NO_MOVE) != ALLOCM_SUCCESS) { if (rallocm(&bv, nullptr, newCapBytes, 0, ALLOCM_NO_MOVE) != ALLOCM_SUCCESS) {
return false; return false;
} }
// Managed to expand in place // Managed to expand in place
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment