Commit 8a5981ec authored by zhaoliwei's avatar zhaoliwei Committed by Facebook GitHub Bot

refactor: delete unused unit test (#1354)

Summary:
There are some duplicated unit test in TokenBucketTest.cpp. So this pr is intent to delete these unused unit test.
Pull Request resolved: https://github.com/facebook/folly/pull/1354

Reviewed By: yfeldblum

Differential Revision: D21061401

Pulled By: Orvid

fbshipit-source-id: ea486fc244e2be68196ef015e020b8664bb3670a
parent 52a8633f
......@@ -74,52 +74,6 @@ INSTANTIATE_TEST_CASE_P(
TokenBucketTest,
::testing::ValuesIn(rateToConsumeSize));
void doTokenBucketTest(double maxQps, double consumeSize) {
const double tenMillisecondBurst = maxQps * 0.010;
// Select a burst size of 10 milliseconds at the max rate or the consume size
// if 10 ms at maxQps is too small.
const double burstSize = std::max(consumeSize, tenMillisecondBurst);
TokenBucket tokenBucket(maxQps, burstSize, 0);
double tokenCounter = 0;
double currentTime = 0;
// Simulate time advancing 10 seconds
for (; currentTime <= 10.0; currentTime += 0.001) {
EXPECT_FALSE(tokenBucket.consume(burstSize + 1, currentTime));
while (tokenBucket.consume(consumeSize, currentTime)) {
tokenCounter += consumeSize;
}
// Tokens consumed should exceed some lower bound based on maxQps.
// Note: The token bucket implementation is not precise, so the lower bound
// is somewhat fudged. The upper bound is accurate however.
EXPECT_LE(maxQps * currentTime * 0.9 - 1, tokenCounter);
// Tokens consumed should not exceed some upper bound based on maxQps.
EXPECT_GE(maxQps * currentTime + 1e-6, tokenCounter);
}
}
TEST(TokenBucket, sanity) {
doTokenBucketTest(100, 1);
doTokenBucketTest(1000, 1);
doTokenBucketTest(10000, 1);
// Consume more than one at a time.
doTokenBucketTest(10000, 5);
}
TEST(TokenBucket, ReverseTime2) {
const double rate = 1000;
TokenBucket tokenBucket(rate, rate * 0.01 + 1e-6);
size_t count = 0;
while (tokenBucket.consume(1, 0.1)) {
count += 1;
}
EXPECT_EQ(10, count);
// Going backwards in time has no affect on the toke count (this protects
// against different threads providing out of order timestamps).
double tokensBefore = tokenBucket.available();
EXPECT_FALSE(tokenBucket.consume(1, 0.09999999));
EXPECT_EQ(tokensBefore, tokenBucket.available());
}
TEST(TokenBucket, drainOnFail) {
DynamicTokenBucket tokenBucket;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment