Commit 57de05c8 authored by Emanuele Altieri's avatar Emanuele Altieri Committed by Facebook GitHub Bot

Expose total CPU time used by a ThreadPoolExecutor

Summary: New method to return the cumulative CPU time consumed by the pool. Requires support for per-thread CPU clocks.

Reviewed By: yfeldblum, iahs

Differential Revision: D30685002

fbshipit-source-id: ac103e581585832bcda245e8f034bbc71489c831
parent 118a3923
......@@ -16,7 +16,10 @@
#include <folly/executors/ThreadPoolExecutor.h>
#include <ctime>
#include <folly/executors/GlobalThreadPoolList.h>
#include <folly/portability/PThread.h>
#include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <folly/tracing/StaticTracepoint.h>
......@@ -347,6 +350,20 @@ std::string ThreadPoolExecutor::getNameHelper() const {
std::atomic<uint64_t> ThreadPoolExecutor::Thread::nextId(0);
std::chrono::nanoseconds ThreadPoolExecutor::Thread::usedCpuTime() const {
using std::chrono::nanoseconds;
using std::chrono::seconds;
timespec tp{};
#ifdef __linux__
clockid_t clockid;
auto th = const_cast<std::thread&>(handle).native_handle();
if (!pthread_getcpuclockid(th, &clockid)) {
clock_gettime(clockid, &tp);
}
#endif
return nanoseconds(tp.tv_nsec) + seconds(tp.tv_sec);
}
void ThreadPoolExecutor::subscribeToTaskStats(TaskStatsCallback cb) {
if (*taskStatsCallbacks_->inCallback) {
throw std::runtime_error("cannot subscribe in task stats callback");
......
......@@ -125,6 +125,17 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor {
size_t getPendingTaskCount() const;
const std::string& getName() const;
/**
* Return the cumulative CPU time used by all threads in the pool, including
* those that are no longer alive. Requires system support for per-thread CPU
* clocks. If not available, the function returns 0. This operation can be
* expensive.
*/
std::chrono::nanoseconds getUsedCpuTime() const {
SharedMutex::ReadHolder r{&threadListLock_};
return threadList_.getUsedCpuTime();
}
struct TaskStats {
TaskStats() : expired(false), waitTime(0), runTime(0), requestId(0) {}
bool expired;
......@@ -189,6 +200,8 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor {
~Thread() override = default;
std::chrono::nanoseconds usedCpuTime() const;
static std::atomic<uint64_t> nextId;
uint64_t id;
std::thread handle;
......@@ -251,6 +264,7 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor {
CHECK(itPair.first != vec_.end());
CHECK(std::next(itPair.first) == itPair.second);
vec_.erase(itPair.first);
pastCpuUsed_ += state->usedCpuTime();
}
bool contains(const ThreadPtr& ts) const {
......@@ -259,6 +273,14 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor {
const std::vector<ThreadPtr>& get() const { return vec_; }
std::chrono::nanoseconds getUsedCpuTime() const {
auto acc{pastCpuUsed_};
for (const auto& thread : vec_) {
acc += thread->usedCpuTime();
}
return acc;
}
private:
struct Compare {
bool operator()(const ThreadPtr& ts1, const ThreadPtr& ts2) const {
......@@ -266,6 +288,8 @@ class ThreadPoolExecutor : public DefaultKeepAliveExecutor {
}
};
std::vector<ThreadPtr> vec_;
// cpu time used by threads that are no longer alive
std::chrono::nanoseconds pastCpuUsed_{0};
};
class StoppedThreadQueue : public BlockingQueue<ThreadPtr> {
......
......@@ -19,6 +19,7 @@
#include <folly/executors/CPUThreadPoolExecutor.h>
#include <folly/executors/ThreadPoolExecutor.h>
#include <folly/lang/Keep.h>
#include <folly/synchronization/Latch.h>
#include <atomic>
#include <memory>
......@@ -42,10 +43,39 @@
using namespace folly;
using namespace std::chrono;
// Like ASSERT_NEAR, for chrono duration types
#define ASSERT_NEAR_NS(a, b, c) \
do { \
ASSERT_NEAR( \
nanoseconds(a).count(), \
nanoseconds(b).count(), \
nanoseconds(c).count()); \
} while (0)
static Func burnMs(uint64_t ms) {
return [ms]() { std::this_thread::sleep_for(milliseconds(ms)); };
}
// Loop and burn cpu cycles
FOLLY_MAYBE_UNUSED
static void busyLoopFor(milliseconds ms) {
using clock = high_resolution_clock;
auto expires = clock::now() + ms;
while (clock::now() < expires) {
// (we want to burn cpu time)
}
}
// Loop without using much cpu time
FOLLY_MAYBE_UNUSED
static void idleLoopFor(milliseconds ms) {
using clock = high_resolution_clock;
auto expires = clock::now() + ms;
while (clock::now() < expires) {
/* sleep override */ std::this_thread::sleep_for(100ms);
}
}
static WorkerProvider* kWorkerProviderGlobal = nullptr;
namespace folly {
......@@ -316,6 +346,68 @@ TEST(ThreadPoolExecutorTest, EDFTaskStats) {
taskStats<EDFThreadPoolExecutor>();
}
#ifdef __linux__
TEST(ThreadPoolExecutorTest, GetUsedCpuTime) {
CPUThreadPoolExecutor e(4);
ASSERT_EQ(e.numActiveThreads(), 0);
ASSERT_EQ(e.getUsedCpuTime(), nanoseconds(0));
// get busy
Latch latch(4);
auto busy_loop = [&] {
busyLoopFor(1s);
latch.count_down();
};
auto idle_loop = [&] {
idleLoopFor(1s);
latch.count_down();
};
e.add(busy_loop); // +1s cpu time
e.add(busy_loop); // +1s cpu time
e.add(idle_loop); // +0s cpu time
e.add(idle_loop); // +0s cpu time
latch.wait();
// pool should have used 2s cpu time (in 1s wall clock time)
auto elapsed0 = e.getUsedCpuTime();
ASSERT_NEAR_NS(elapsed0, 2s, 100ms);
// stop all threads
e.setNumThreads(0);
ASSERT_EQ(e.numActiveThreads(), 0);
// total pool CPU time should not have changed
auto elapsed1 = e.getUsedCpuTime();
ASSERT_NEAR_NS(elapsed0, elapsed1, 100ms);
// add a thread, do nothing, cpu time should stay the same
e.setNumThreads(1);
Baton<> baton;
e.add([&] { baton.post(); });
baton.wait();
ASSERT_EQ(e.numActiveThreads(), 1);
auto elapsed2 = e.getUsedCpuTime();
ASSERT_NEAR_NS(elapsed1, elapsed2, 100ms);
// now burn some more cycles
baton.reset();
e.add([&] {
busyLoopFor(500ms);
baton.post();
});
baton.wait();
auto elapsed3 = e.getUsedCpuTime();
ASSERT_NEAR_NS(elapsed3, elapsed2 + 500ms, 100ms);
}
#else
TEST(ThreadPoolExecutorTest, GetUsedCpuTime) {
CPUThreadPoolExecutor e(1);
// Just make sure 0 is returned
ASSERT_EQ(e.getUsedCpuTime(), nanoseconds(0));
Baton<> baton;
e.add([&] {
busyLoopFor(500ms);
baton.post();
});
baton.wait();
ASSERT_EQ(e.getUsedCpuTime(), nanoseconds(0));
}
#endif
template <class TPE>
static void expiration() {
TPE tpe(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment