Commit 52fceaf4 authored by Nicholas Ormrod's avatar Nicholas Ormrod Committed by Dave Watson

NULL -> nullptr

Summary:
Change NULLs to nullptrs.

Facebook:
I was tired of seeing lint errors for NULL -> nullptr, so I
fixed it.

How: modified flint's tokenizer to output lines of the form

sed -i '__LINE__s|\<NULL\>|nullptr|g' __FILE__

for each NULL token encountered.

Ran the sed lines, restricted to filepaths with extensions
h,hpp,cc,cpp,tcc. Did not apply to FacebookUpdate, due to weird
formatting; hphp, to be done in a separate diff; payment, due to a large
number of generated text which didn't play well with my flow; and
memcache, which has lots of .h's that cannot contain nullptr's because
they are included from .c's.

@bypass-lint

Test Plan:
fbconfig -r common folly && fbmake opt && fbmake runtests_opt
^ of the 4k+ test cases, about two dozen are failing. Slightly more
failed in master than with my diff applied.

arc unit

Reviewed By: andrei.alexandrescu@fb.com

FB internal diff: D1234261
parent 7154531b
......@@ -267,7 +267,7 @@ static const ScaleInfo kTimeSuffixes[] {
{ 1E-9, "ns" },
{ 1E-12, "ps" },
{ 1E-15, "fs" },
{ 0, NULL },
{ 0, nullptr },
};
static const ScaleInfo kMetricSuffixes[] {
......@@ -289,7 +289,7 @@ static const ScaleInfo kMetricSuffixes[] {
{ 1E-18, "a" }, // atto
{ 1E-21, "z" }, // zepto
{ 1E-24, "y" }, // yocto
{ 0, NULL },
{ 0, nullptr },
};
static string humanReadable(double n, unsigned int decimals,
......@@ -300,7 +300,7 @@ static string humanReadable(double n, unsigned int decimals,
const double absValue = fabs(n);
const ScaleInfo* scale = scales;
while (absValue < scale[0].boundary && scale[1].suffix != NULL) {
while (absValue < scale[0].boundary && scale[1].suffix != nullptr) {
++scale;
}
......
......@@ -804,17 +804,17 @@ class ConcurrentSkipList<T, Comp, MAX_HEIGHT>::Skipper {
}
const value_type &data() const {
DCHECK(succs_[0] != NULL);
DCHECK(succs_[0] != nullptr);
return succs_[0]->data();
}
value_type &operator *() const {
DCHECK(succs_[0] != NULL);
DCHECK(succs_[0] != nullptr);
return succs_[0]->data();
}
value_type *operator->() {
DCHECK(succs_[0] != NULL);
DCHECK(succs_[0] != nullptr);
return &succs_[0]->data();
}
......@@ -839,7 +839,7 @@ class ConcurrentSkipList<T, Comp, MAX_HEIGHT>::Skipper {
findInsertionPoint(preds_[lyr], lyr, data, preds_, succs_);
if (foundLayer < 0) return false;
DCHECK(succs_[0] != NULL) << "lyr=" << lyr << "; max_layer=" << max_layer;
DCHECK(succs_[0] != nullptr) << "lyr=" << lyr << "; max_layer=" << max_layer;
return !succs_[0]->markedForRemoval();
}
......
......@@ -2358,7 +2358,7 @@ getline(
basic_fbstring<E, T, A, S>& str,
typename basic_fbstring<E, T, A, S>::value_type delim) {
// Use the nonstandard getdelim()
char * buf = NULL;
char * buf = nullptr;
size_t size = 0;
for (;;) {
// This looks quadratic but it really depends on realloc
......
......@@ -1109,7 +1109,7 @@ public:
void* p = impl_.b_;
if ((rallocm && usingStdAllocator::value) &&
newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) {
impl_.z_ += newCap - oldCap;
} else {
......@@ -1144,7 +1144,7 @@ private:
auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
void* p = impl_.b_;
if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
if (rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) {
impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
return true;
......
......@@ -27,7 +27,7 @@ bool usingJEMallocSlow() {
// Some platforms (*cough* OSX *cough*) require weak symbol checks to be
// in the form if (mallctl != NULL). Not if (mallctl) or if (!mallctl) (!!).
// http://goo.gl/xpmctm
if (allocm == NULL || rallocm == NULL || mallctl == NULL) {
if (allocm == nullptr || rallocm == nullptr || mallctl == nullptr) {
return false;
}
......
......@@ -208,7 +208,7 @@ inline void* smartRealloc(void* p,
// using jemalloc's API. Don't forget that jemalloc can never grow
// in place blocks smaller than 4096 bytes.
if (currentCapacity >= jemallocMinInPlaceExpandable &&
rallocm(&p, NULL, newCapacity, 0, ALLOCM_NO_MOVE) == ALLOCM_SUCCESS) {
rallocm(&p, nullptr, newCapacity, 0, ALLOCM_NO_MOVE) == ALLOCM_SUCCESS) {
// Managed to expand in place
return p;
}
......
......@@ -35,7 +35,7 @@ std::atomic<uint32_t> seedInput(0);
uint32_t randomNumberSeed() {
struct timeval tv;
gettimeofday(&tv, NULL);
gettimeofday(&tv, nullptr);
const uint32_t kPrime0 = 51551;
const uint32_t kPrime1 = 61631;
const uint32_t kPrime2 = 64997;
......
......@@ -80,7 +80,7 @@ namespace detail {
* linux this varies by kernel version from 1ms to 10ms).
*/
struct timespec ts = { 0, 500000 };
nanosleep(&ts, NULL);
nanosleep(&ts, nullptr);
}
}
};
......
......@@ -315,7 +315,7 @@ struct Synchronized {
return;
}
// Could not acquire the resource, pointer is null
parent_ = NULL;
parent_ = nullptr;
}
/**
......@@ -363,7 +363,7 @@ struct Synchronized {
* SYNCHRONIZED below.
*/
T* operator->() {
return parent_ ? &parent_->datum_ : NULL;
return parent_ ? &parent_->datum_ : nullptr;
}
/**
......@@ -433,7 +433,7 @@ struct Synchronized {
return;
}
// Could not acquire the resource, pointer is null
parent_ = NULL;
parent_ = nullptr;
}
ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
......@@ -449,7 +449,7 @@ struct Synchronized {
}
const T* operator->() const {
return parent_ ? &parent_->datum_ : NULL;
return parent_ ? &parent_->datum_ : nullptr;
}
struct Unsynchronizer {
......
......@@ -48,7 +48,7 @@ class ThreadCachedInt : boost::noncopyable {
void increment(IntT inc) {
auto cache = cache_.get();
if (UNLIKELY(cache == NULL || cache->parent_ == NULL)) {
if (UNLIKELY(cache == nullptr || cache->parent_ == nullptr)) {
cache = new IntCache(*this);
cache_.reset(cache);
}
......@@ -122,7 +122,7 @@ class ThreadCachedInt : boost::noncopyable {
// need to make sure we signal that this parent is dead.
~ThreadCachedInt() {
for (auto& cache : cache_.accessAllThreads()) {
cache.parent_ = NULL;
cache.parent_ = nullptr;
}
}
......
......@@ -74,22 +74,22 @@ class CustomDeleter : public DeleterBase {
*/
struct ElementWrapper {
void dispose(TLPDestructionMode mode) {
if (ptr != NULL) {
DCHECK(deleter != NULL);
if (ptr != nullptr) {
DCHECK(deleter != nullptr);
deleter->dispose(ptr, mode);
if (ownsDeleter) {
delete deleter;
}
ptr = NULL;
deleter = NULL;
ptr = nullptr;
deleter = nullptr;
ownsDeleter = false;
}
}
template <class Ptr>
void set(Ptr p) {
DCHECK(ptr == NULL);
DCHECK(deleter == NULL);
DCHECK(ptr == nullptr);
DCHECK(deleter == nullptr);
if (p) {
// We leak a single object here but that is ok. If we used an
......@@ -105,8 +105,8 @@ struct ElementWrapper {
template <class Ptr, class Deleter>
void set(Ptr p, Deleter d) {
DCHECK(ptr == NULL);
DCHECK(deleter == NULL);
DCHECK(ptr == nullptr);
DCHECK(deleter == nullptr);
if (p) {
ptr = p;
deleter = new CustomDeleter<Ptr,Deleter>(d);
......@@ -241,8 +241,8 @@ struct StaticMeta {
threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD);
}
free(threadEntry->elements);
threadEntry->elements = NULL;
pthread_setspecific(meta.pthreadKey_, NULL);
threadEntry->elements = nullptr;
pthread_setspecific(meta.pthreadKey_, nullptr);
#if __APPLE__
// Allocated in getThreadEntry(); free it
......
......@@ -114,8 +114,8 @@ struct IOBuf::HeapFullStorage {
};
IOBuf::SharedInfo::SharedInfo()
: freeFn(NULL),
userData(NULL) {
: freeFn(nullptr),
userData(nullptr) {
// Use relaxed memory ordering here. Since we are creating a new SharedInfo,
// no other threads should be referring to it yet.
refcount.store(1, std::memory_order_relaxed);
......@@ -801,7 +801,7 @@ void IOBuf::allocExtBuffer(uint64_t minCapacity,
uint64_t* capacityReturn) {
size_t mallocSize = goodExtBufferSize(minCapacity);
uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
if (UNLIKELY(buf == NULL)) {
if (UNLIKELY(buf == nullptr)) {
throw std::bad_alloc();
}
initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
......
......@@ -63,14 +63,14 @@ void checkConsistency(const IOBufQueue& queue) {
TEST(IOBufQueue, Simple) {
IOBufQueue queue(clOptions);
EXPECT_EQ(NULL, queue.front());
EXPECT_EQ(nullptr, queue.front());
queue.append(SCL(""));
EXPECT_EQ(NULL, queue.front());
EXPECT_EQ(nullptr, queue.front());
queue.append(unique_ptr<IOBuf>());
EXPECT_EQ(NULL, queue.front());
EXPECT_EQ(nullptr, queue.front());
string emptyString;
queue.append(emptyString);
EXPECT_EQ(NULL, queue.front());
EXPECT_EQ(nullptr, queue.front());
}
TEST(IOBufQueue, Append) {
......@@ -85,9 +85,9 @@ TEST(IOBufQueue, Append) {
checkConsistency(queue);
checkConsistency(queue2);
const IOBuf* chain = queue.front();
EXPECT_NE((IOBuf*)NULL, chain);
EXPECT_NE((IOBuf*)nullptr, chain);
EXPECT_EQ(12, chain->computeChainDataLength());
EXPECT_EQ(NULL, queue2.front());
EXPECT_EQ(nullptr, queue2.front());
}
TEST(IOBufQueue, Append2) {
......@@ -102,9 +102,9 @@ TEST(IOBufQueue, Append2) {
checkConsistency(queue);
checkConsistency(queue2);
const IOBuf* chain = queue.front();
EXPECT_NE((IOBuf*)NULL, chain);
EXPECT_NE((IOBuf*)nullptr, chain);
EXPECT_EQ(12, chain->computeChainDataLength());
EXPECT_EQ(NULL, queue2.front());
EXPECT_EQ(nullptr, queue2.front());
}
TEST(IOBufQueue, Split) {
......@@ -136,7 +136,7 @@ TEST(IOBufQueue, Split) {
prefix = queue.split(5);
checkConsistency(queue);
EXPECT_EQ(5, prefix->computeChainDataLength());
EXPECT_EQ((IOBuf*)NULL, queue.front());
EXPECT_EQ((IOBuf*)nullptr, queue.front());
queue.append(stringToIOBuf(SCL("Hello,")));
queue.append(stringToIOBuf(SCL(" World")));
......@@ -151,7 +151,7 @@ TEST(IOBufQueue, Preallocate) {
queue.append(string("Hello"));
pair<void*,uint32_t> writable = queue.preallocate(2, 64, 64);
checkConsistency(queue);
EXPECT_NE((void*)NULL, writable.first);
EXPECT_NE((void*)nullptr, writable.first);
EXPECT_LE(2, writable.second);
EXPECT_GE(64, writable.second);
memcpy(writable.first, SCL(", "));
......@@ -238,7 +238,7 @@ TEST(IOBufQueue, Trim) {
queue.trimEnd(1);
checkConsistency(queue);
EXPECT_EQ(NULL, queue.front());
EXPECT_EQ(nullptr, queue.front());
EXPECT_THROW(queue.trimStart(2), std::underflow_error);
checkConsistency(queue);
......@@ -296,7 +296,7 @@ TEST(IOBufQueue, TrimPack) {
queue.trimEnd(1);
checkConsistency(queue);
EXPECT_EQ(NULL, queue.front());
EXPECT_EQ(nullptr, queue.front());
EXPECT_THROW(queue.trimStart(2), std::underflow_error);
checkConsistency(queue);
......@@ -354,12 +354,12 @@ TEST(IOBufQueue, PopFirst) {
checkConsistency(queue);
EXPECT_EQ(chainLength, queue.chainLength());
EXPECT_EQ((IOBuf*)NULL, queue.front());
EXPECT_EQ((IOBuf*)nullptr, queue.front());
first = queue.pop_front();
EXPECT_EQ((IOBuf*)NULL, first.get());
EXPECT_EQ((IOBuf*)nullptr, first.get());
checkConsistency(queue);
EXPECT_EQ((IOBuf*)NULL, queue.front());
EXPECT_EQ((IOBuf*)nullptr, queue.front());
EXPECT_EQ(0, queue.chainLength());
}
......
......@@ -966,7 +966,7 @@ private:
doConstruct(n, val);
}
void makeSize(size_type size, value_type* v = NULL) {
void makeSize(size_type size, value_type* v = nullptr) {
makeSize(size, v, size - 1);
}
......@@ -1009,7 +1009,7 @@ private:
detail::shiftPointer(newh, kHeapifyCapacitySize) :
newh);
if (v != NULL) {
if (v != nullptr) {
// move new element
try {
new (&newp[pos]) value_type(std::move(*v));
......
......@@ -161,8 +161,8 @@ class HistogramBuckets {
template <typename CountFn>
unsigned int getPercentileBucketIdx(double pct,
CountFn countFromBucket,
double* lowPct = NULL,
double* highPct = NULL) const;
double* lowPct = nullptr,
double* highPct = nullptr) const;
/**
* Estimate the value at the specified percentile.
......@@ -357,8 +357,8 @@ class Histogram {
* returned in the lowPct and highPct arguments, if they are non-NULL.
*/
unsigned int getPercentileBucketIdx(double pct,
double* lowPct = NULL,
double* highPct = NULL) const {
double* lowPct = nullptr,
double* highPct = nullptr) const {
// We unfortunately can't use lambdas here yet;
// Some users of this code are still built with gcc-4.4.
CountFromBucket countFn;
......
......@@ -346,7 +346,7 @@ void* insertThread(void* jj) {
KeyT key = randomizeKey(i + j * numOpsPerThread);
globalAHM->insert(key, genVal(key));
}
return NULL;
return nullptr;
}
void* insertThreadArr(void* jj) {
......@@ -355,7 +355,7 @@ void* insertThreadArr(void* jj) {
KeyT key = randomizeKey(i + j * numOpsPerThread);
globalAHA->insert(std::make_pair(key, genVal(key)));
}
return NULL;
return nullptr;
}
std::atomic<bool> runThreadsCreatedAllThreads;
......@@ -365,7 +365,7 @@ void runThreads(void *(*thread)(void*), int numThreads, void **statuses) {
vector<pthread_t> threadIds;
for (int64_t j = 0; j < numThreads; j++) {
pthread_t tid;
if (pthread_create(&tid, NULL, thread, (void*) j) != 0) {
if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
LOG(ERROR) << "Could not start thread";
} else {
threadIds.push_back(tid);
......@@ -375,12 +375,12 @@ void runThreads(void *(*thread)(void*), int numThreads, void **statuses) {
runThreadsCreatedAllThreads.store(true);
for (int i = 0; i < threadIds.size(); ++i) {
pthread_join(threadIds[i], statuses == NULL ? NULL : &statuses[i]);
pthread_join(threadIds[i], statuses == nullptr ? nullptr : &statuses[i]);
}
}
void runThreads(void *(*thread)(void*)) {
runThreads(thread, FLAGS_numThreads, NULL);
runThreads(thread, FLAGS_numThreads, nullptr);
}
}
......@@ -462,10 +462,10 @@ void* raceIterateThread(void* jj) {
++count;
if (count > raceFinalSizeEstimate) {
EXPECT_FALSE("Infinite loop in iterator.");
return NULL;
return nullptr;
}
}
return NULL;
return nullptr;
}
void* raceInsertRandomThread(void* jj) {
......@@ -474,7 +474,7 @@ void* raceInsertRandomThread(void* jj) {
KeyT key = rand();
globalAHM->insert(key, genVal(key));
}
return NULL;
return nullptr;
}
}
......@@ -496,14 +496,14 @@ TEST(Ahm, race_insert_iterate_thread_test) {
pthread_t tid;
void *(*thread)(void*) =
(j < kInsertThreads ? raceInsertRandomThread : raceIterateThread);
if (pthread_create(&tid, NULL, thread, (void*) j) != 0) {
if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
LOG(ERROR) << "Could not start thread";
} else {
threadIds.push_back(tid);
}
}
for (int i = 0; i < threadIds.size(); ++i) {
pthread_join(threadIds[i], NULL);
pthread_join(threadIds[i], nullptr);
}
VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps";
VLOG(1) << "Final size of map " << globalAHM->size();
......@@ -521,7 +521,7 @@ void* testEraseInsertThread(void*) {
insertedLevel.store(i, std::memory_order_release);
}
insertedLevel.store(kTestEraseInsertions, std::memory_order_release);
return NULL;
return nullptr;
}
void* testEraseEraseThread(void*) {
......@@ -551,7 +551,7 @@ void* testEraseEraseThread(void*) {
}
}
}
return NULL;
return nullptr;
}
}
......@@ -572,14 +572,14 @@ TEST(Ahm, thread_erase_insert_race) {
pthread_t tid;
void *(*thread)(void*) =
(j < kInsertThreads ? testEraseInsertThread : testEraseEraseThread);
if (pthread_create(&tid, NULL, thread, (void*) j) != 0) {
if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
LOG(ERROR) << "Could not start thread";
} else {
threadIds.push_back(tid);
}
}
for (int i = 0; i < threadIds.size(); i++) {
pthread_join(threadIds[i], NULL);
pthread_join(threadIds[i], nullptr);
}
EXPECT_TRUE(globalAHM->empty());
......
......@@ -104,8 +104,8 @@ TEST(ConcurrentSkipList, SequentialAccess) {
LOG(INFO) << "nodetype size=" << sizeof(SkipListNodeType);
auto skipList(SkipListType::create(kHeadHeight));
EXPECT_TRUE(skipList.first() == NULL);
EXPECT_TRUE(skipList.last() == NULL);
EXPECT_TRUE(skipList.first() == nullptr);
EXPECT_TRUE(skipList.last() == nullptr);
skipList.add(3);
EXPECT_TRUE(skipList.contains(3));
......
......@@ -420,9 +420,9 @@ void testVariadicToDelim() {
}
TEST(Conv, NullString) {
string s1 = to<string>((char *) NULL);
string s1 = to<string>((char *) nullptr);
EXPECT_TRUE(s1.empty());
fbstring s2 = to<fbstring>((char *) NULL);
fbstring s2 = to<fbstring>((char *) nullptr);
EXPECT_TRUE(s2.empty());
}
......
......@@ -44,7 +44,7 @@ TEST(DiscriminatedPtr, Basic) {
EXPECT_EQ(&a, static_cast<const Ptr&>(p).get_nothrow<int>());
EXPECT_EQ(&a, p.get<int>());
EXPECT_EQ(&a, static_cast<const Ptr&>(p).get<int>());
EXPECT_EQ(static_cast<void*>(NULL), p.get_nothrow<void>());
EXPECT_EQ(static_cast<void*>(nullptr), p.get_nothrow<void>());
EXPECT_THROW({p.get<void>();}, std::invalid_argument);
Foo foo;
......@@ -55,7 +55,7 @@ TEST(DiscriminatedPtr, Basic) {
EXPECT_TRUE(p.hasType<Foo>());
EXPECT_FALSE(p.hasType<Bar>());
EXPECT_EQ(static_cast<int*>(NULL), p.get_nothrow<int>());
EXPECT_EQ(static_cast<int*>(nullptr), p.get_nothrow<int>());
p.clear();
EXPECT_TRUE(p.empty());
......
......@@ -169,7 +169,7 @@ TEST(ThreadLocal, SimpleRepeatDestructor) {
TEST(ThreadLocal, InterleavedDestructors) {
Widget::totalVal_ = 0;
ThreadLocal<Widget>* w = NULL;
ThreadLocal<Widget>* w = nullptr;
int wVersion = 0;
const int wVersionMax = 2;
int thIter = 0;
......
......@@ -267,7 +267,7 @@ public:
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
// 23.3.6.1 construct/copy/destroy:
fbvector() : b_(NULL), e_(NULL), z_(NULL) {}
fbvector() : b_(nullptr), e_(nullptr), z_(nullptr) {}
explicit fbvector(const Allocator&) {
new(this) fbvector;
......@@ -566,7 +566,7 @@ private:
auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
void* p = b_;
if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
if (rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
!= ALLOCM_SUCCESS) {
return false;
}
......@@ -609,7 +609,7 @@ public:
auto const crtCapacityBytes = capacity() * sizeof(T);
auto const newCapacityBytes = goodMallocSize(size() * sizeof(T));
if (crtCapacityBytes >= jemallocMinInPlaceExpandable &&
rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) {
// Celebrate
z_ = b_ + newCapacityBytes / sizeof(T);
......@@ -697,7 +697,7 @@ private:
if (capBytes < jemallocMinInPlaceExpandable) return false;
auto const newCapBytes = goodMallocSize(capBytes + sizeof(T));
void * bv = b_;
if (rallocm(&bv, NULL, newCapBytes, 0, ALLOCM_NO_MOVE) != ALLOCM_SUCCESS) {
if (rallocm(&bv, nullptr, newCapBytes, 0, ALLOCM_NO_MOVE) != ALLOCM_SUCCESS) {
return false;
}
// Managed to expand in place
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment