summaryrefslogtreecommitdiffstats
path: root/storage/src/tests
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@verizonmedia.com>2019-05-09 12:03:06 +0000
committerTor Brede Vekterli <vekterli@verizonmedia.com>2019-06-03 13:08:25 +0000
commit7e9a122d5865db5a24b135c41b6dbf7dedf6a31c (patch)
treee6901c00e5797b7b688e921d9c83961550463c5a /storage/src/tests
parent34920d57c38c11f8ef8979071992e206fcd0ab03 (diff)
Add new DB merging API to distributor BucketDatabase
Abstracts away how an ordered merge may be performed with the database and an arbitrary sorted bucket sequence, with any number of buckets skipped, updated or inserted as part of the merge. Such an API is required to allow efficient bulk updates of a B-tree backed database, as it is suboptimal to require constant tree mutations. Other changes: - Removed legacy mutable iteration API. Not needed with new merge API. - Const-iteration of bucket database now uses an explicit const reference entry type to avoid needing to construct a temporary entry when we can instead just point directly into the backing ArrayStore. - Micro-optimizations of node remover pass to avoid going via cluster state's node state std::map for each bucket replica entry. Now uses a precomputed bit vector. Also avoid BucketId bit reversing operations as much as possible by using raw bucket keys in more places. - Changed wording and contents of log message that triggers when buckets are removed from the DB due to no remaining nodes containing replicas for the bucket. Now more obvious what the message actually means. - Added several benchmark tests (disabled by default)
Diffstat (limited to 'storage/src/tests')
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.cpp242
-rw-r--r--storage/src/tests/distributor/bucketdbupdatertest.cpp102
2 files changed, 281 insertions, 63 deletions
diff --git a/storage/src/tests/distributor/bucketdatabasetest.cpp b/storage/src/tests/distributor/bucketdatabasetest.cpp
index 92e0c534e31..cfb54edbe78 100644
--- a/storage/src/tests/distributor/bucketdatabasetest.cpp
+++ b/storage/src/tests/distributor/bucketdatabasetest.cpp
@@ -1,5 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bucketdatabasetest.h"
+#include <vespa/vespalib/util/benchmark_timer.h>
+#include <chrono>
#include <iomanip>
#include <algorithm>
@@ -12,15 +14,25 @@ void BucketDatabaseTest::SetUp() {
}
namespace {
- BucketCopy BC(uint32_t nodeIdx) {
- return BucketCopy(0, nodeIdx, api::BucketInfo());
- }
- BucketInfo BI(uint32_t nodeIdx) {
- BucketInfo bi;
- bi.addNode(BC(nodeIdx), toVector<uint16_t>(0));
- return bi;
- }
+BucketCopy BC(uint32_t nodeIdx) {
+ return BucketCopy(0, nodeIdx, api::BucketInfo());
+}
+
+BucketInfo BI(uint32_t nodeIdx) {
+ BucketInfo bi;
+ bi.addNode(BC(nodeIdx), toVector<uint16_t>(0));
+ return bi;
+}
+
+BucketInfo BI3(uint32_t node0, uint32_t node1, uint32_t node2) {
+ BucketInfo bi;
+ bi.addNode(BC(node0), toVector<uint16_t>(node0, node1, node2));
+ bi.addNode(BC(node1), toVector<uint16_t>(node0, node1, node2));
+ bi.addNode(BC(node2), toVector<uint16_t>(node0, node1, node2));
+ return bi;
+}
+
}
TEST_P(BucketDatabaseTest, testClear) {
@@ -63,34 +75,23 @@ TEST_P(BucketDatabaseTest, testUpdateGetAndRemove) {
namespace {
-struct ModifyProcessor : public BucketDatabase::MutableEntryProcessor
-{
- bool process(BucketDatabase::Entry& e) override {
- if (e.getBucketId() == document::BucketId(16, 0x0b)) {
- e.getBucketInfo() = BI(7);
- } else if (e.getBucketId() == document::BucketId(16, 0x2a)) {
- e->clear();
- e->addNode(BC(4), toVector<uint16_t>(0));
- e->addNode(BC(5), toVector<uint16_t>(0));
- }
-
- return true;
- }
-};
-
struct ListAllProcessor : public BucketDatabase::EntryProcessor {
std::ostringstream ost;
- bool process(const BucketDatabase::Entry& e) override {
+ bool process(const BucketDatabase::ConstEntryRef& e) override {
ost << e << "\n";
return true;
}
};
-struct DummyProcessor : public BucketDatabase::EntryProcessor {
- std::ostringstream ost;
+std::string dump_db(const BucketDatabase& db) {
+ ListAllProcessor proc;
+ db.forEach(proc, document::BucketId());
+ return proc.ost.str();
+}
- bool process(const BucketDatabase::Entry&) override {
+struct DummyProcessor : public BucketDatabase::EntryProcessor {
+ bool process(const BucketDatabase::ConstEntryRef&) override {
return true;
}
};
@@ -99,7 +100,7 @@ struct DummyProcessor : public BucketDatabase::EntryProcessor {
struct StoppingProcessor : public BucketDatabase::EntryProcessor {
std::ostringstream ost;
- bool process(const BucketDatabase::Entry& e) override {
+ bool process(const BucketDatabase::ConstEntryRef& e) override {
ost << e << "\n";
if (e.getBucketId() == document::BucketId(16, 0x2a)) {
@@ -156,25 +157,6 @@ TEST_P(BucketDatabaseTest, testIterating) {
"node(idx=3,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"),
proc.ost.str());
}
-
- {
- ModifyProcessor alterProc;
- db().forEach(alterProc, document::BucketId());
- // Verify content after altering
- ListAllProcessor proc;
- db().forEach(proc);
-
- EXPECT_EQ(
- std::string(
- "BucketId(0x4000000000000010) : "
- "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
- "BucketId(0x400000000000002a) : "
- "node(idx=4,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false), "
- "node(idx=5,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
- "BucketId(0x400000000000000b) : "
- "node(idx=7,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"),
- proc.ost.str());
- }
}
std::string
@@ -552,4 +534,170 @@ TEST_P(BucketDatabaseTest, testChildCount) {
EXPECT_EQ(0u, db().childCount(BucketId(3, 5)));
}
+using Merger = BucketDatabase::Merger;
+using TrailingInserter = BucketDatabase::TrailingInserter;
+using Result = BucketDatabase::MergingProcessor::Result;
+
+namespace {
+
+struct KeepUnchangedMergingProcessor : BucketDatabase::MergingProcessor {
+ Result merge(Merger&) override {
+ return Result::KeepUnchanged;
+ }
+};
+
+struct SkipBucketMergingProcessor : BucketDatabase::MergingProcessor {
+ BucketId _skip_bucket;
+ explicit SkipBucketMergingProcessor(BucketId skip_bucket) : _skip_bucket(skip_bucket) {}
+
+ Result merge(Merger& m) override {
+ return (m.bucket_id() == _skip_bucket) ? Result::Skip : Result::KeepUnchanged;
+ }
+};
+
+struct UpdateBucketMergingProcessor : BucketDatabase::MergingProcessor {
+ BucketId _update_bucket;
+ explicit UpdateBucketMergingProcessor(BucketId update_bucket) : _update_bucket(update_bucket) {}
+
+ Result merge(Merger& m) override {
+ if (m.bucket_id() == _update_bucket) {
+ auto& e = m.current_entry();
+ // Add a replica and alter the current one.
+ e->addNode(BucketCopy(123456, 0, api::BucketInfo(2, 3, 4)), toVector<uint16_t>(0));
+ e->addNode(BucketCopy(234567, 1, api::BucketInfo(3, 4, 5)), toVector<uint16_t>(1));
+ return Result::Update;
+ }
+ return Result::KeepUnchanged;
+ }
+};
+
+struct InsertBeforeBucketMergingProcessor : BucketDatabase::MergingProcessor {
+ BucketId _before_bucket;
+ explicit InsertBeforeBucketMergingProcessor(BucketId before_bucket) : _before_bucket(before_bucket) {}
+
+ Result merge(Merger& m) override {
+ if (m.bucket_id() == _before_bucket) {
+ // Assumes _before_bucket is > the inserted bucket
+ m.insert_before_current(BucketDatabase::Entry(document::BucketId(16, 2), BI(2)));
+ }
+ return Result::KeepUnchanged;
+ }
+};
+
+struct InsertAtEndMergingProcessor : BucketDatabase::MergingProcessor {
+ Result merge(Merger&) override {
+ return Result::KeepUnchanged;
+ }
+
+ void insert_remaining_at_end(TrailingInserter& inserter) override {
+ inserter.insert_at_end(BucketDatabase::Entry(document::BucketId(16, 3), BI(3)));
+ }
+};
+
+}
+
+TEST_P(BucketDatabaseTest, merge_keep_unchanged_result_does_not_alter_db_contents) {
+ db().update(BucketDatabase::Entry(BucketId(16, 1), BI(1)));
+ db().update(BucketDatabase::Entry(BucketId(16, 2), BI(2)));
+
+ KeepUnchangedMergingProcessor proc;
+ db().merge(proc);
+
+ EXPECT_EQ(dump_db(db()),
+ "BucketId(0x4000000000000002) : "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n");
+}
+
+TEST_P(BucketDatabaseTest, merge_entry_skipping_removes_entry_from_db) {
+ db().update(BucketDatabase::Entry(BucketId(16, 1), BI(1)));
+ db().update(BucketDatabase::Entry(BucketId(16, 2), BI(2)));
+ db().update(BucketDatabase::Entry(BucketId(16, 3), BI(3)));
+
+ SkipBucketMergingProcessor proc(BucketId(16, 2));
+ db().merge(proc);
+
+ EXPECT_EQ(dump_db(db()),
+ "BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000003) : "
+ "node(idx=3,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n");
+}
+
+TEST_P(BucketDatabaseTest, merge_update_result_updates_entry_in_db) {
+ db().update(BucketDatabase::Entry(BucketId(16, 1), BI(1)));
+ db().update(BucketDatabase::Entry(BucketId(16, 2), BI(2)));
+
+ UpdateBucketMergingProcessor proc(BucketId(16, 1));
+ db().merge(proc);
+
+ EXPECT_EQ(dump_db(db()),
+ "BucketId(0x4000000000000002) : "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x3,docs=4/4,bytes=5/5,trusted=false,active=false,ready=false), "
+ "node(idx=0,crc=0x2,docs=3/3,bytes=4/4,trusted=false,active=false,ready=false)\n");
+}
+
+TEST_P(BucketDatabaseTest, merge_can_insert_entry_before_current_bucket) {
+ db().update(BucketDatabase::Entry(BucketId(16, 1), BI(1)));
+ db().update(BucketDatabase::Entry(BucketId(16, 3), BI(3)));
+
+ InsertBeforeBucketMergingProcessor proc(BucketId(16, 1));
+ db().merge(proc);
+
+ // Bucket (...)00002 is inserted by the merge processor
+ EXPECT_EQ(dump_db(db()),
+ "BucketId(0x4000000000000002) : "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000003) : "
+ "node(idx=3,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n");
+}
+
+TEST_P(BucketDatabaseTest, merge_can_insert_entry_at_end) {
+ db().update(BucketDatabase::Entry(BucketId(16, 1), BI(1)));
+ db().update(BucketDatabase::Entry(BucketId(16, 2), BI(2)));
+
+ InsertAtEndMergingProcessor proc;
+ db().merge(proc);
+
+ EXPECT_EQ(dump_db(db()),
+ "BucketId(0x4000000000000002) : "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n"
+ "BucketId(0x4000000000000003) : "
+ "node(idx=3,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false,ready=false)\n");
+}
+
+TEST_P(BucketDatabaseTest, DISABLED_benchmark_const_iteration) {
+ constexpr uint32_t superbuckets = 1u << 16u;
+ constexpr uint32_t sub_buckets = 14;
+ constexpr uint32_t n_buckets = superbuckets * sub_buckets;
+
+ std::vector<uint64_t> bucket_keys;
+ bucket_keys.reserve(n_buckets);
+
+ for (uint32_t sb = 0; sb < superbuckets; ++sb) {
+ for (uint64_t i = 0; i < sub_buckets; ++i) {
+ document::BucketId bucket(48, (i << 32ULL) | sb);
+ bucket_keys.emplace_back(bucket.toKey());
+ }
+ }
+ std::sort(bucket_keys.begin(), bucket_keys.end());
+ for (uint64_t k : bucket_keys) {
+ db().update(BucketDatabase::Entry(BucketId(BucketId::keyToBucketId(k)), BI3(0, 1, 2)));
+ }
+
+ auto elapsed = vespalib::BenchmarkTimer::benchmark([&] {
+ DummyProcessor proc;
+ db().forEach(proc, document::BucketId());
+ }, 5);
+ fprintf(stderr, "Full DB iteration of %s takes %g seconds\n",
+ db().toString(false).c_str(), elapsed);
+}
+
}
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/bucketdbupdatertest.cpp
index 1cfc1692edb..cdaa6e9aaa3 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbupdatertest.cpp
@@ -514,7 +514,7 @@ public:
OutdatedNodesMap outdatedNodesMap;
state = PendingClusterState::createForClusterStateChange(
clock, clusterInfo, sender,
- owner.getBucketSpaceRepo(), owner.getReadOnlyBucketSpaceRepo(),
+ owner.getBucketSpaceRepo(),
cmd, outdatedNodesMap, api::Timestamp(1));
}
@@ -526,8 +526,7 @@ public:
owner.createClusterInfo(oldClusterState));
state = PendingClusterState::createForDistributionChange(
- clock, clusterInfo, sender, owner.getBucketSpaceRepo(),
- owner.getReadOnlyBucketSpaceRepo(), api::Timestamp(1));
+ clock, clusterInfo, sender, owner.getBucketSpaceRepo(), api::Timestamp(1));
}
};
@@ -543,6 +542,8 @@ public:
{
return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState);
}
+
+ uint32_t populate_bucket_db_via_request_bucket_info_for_benchmarking();
};
BucketDBUpdaterTest::BucketDBUpdaterTest()
@@ -863,14 +864,14 @@ TEST_F(BucketDBUpdaterTest, testBitChange) {
const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.commands[bsi]);
auto sreply = std::make_shared<RequestBucketInfoReply>(req);
sreply->setAddress(storageAddress(0));
- api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo();
+ auto& vec = sreply->getBucketInfo();
if (req.getBucketSpace() == FixedBucketSpaces::default_space()) {
int cnt=0;
for (int i=0; cnt < 2; i++) {
lib::Distribution distribution = defaultDistributorBucketSpace().getDistribution();
std::vector<uint16_t> distributors;
if (distribution.getIdealDistributorNode(
- lib::ClusterState("redundancy:1 bits:14 storage:1 distributor:2"),
+ lib::ClusterState("bits:14 storage:1 distributor:2"),
document::BucketId(16, i))
== 0)
{
@@ -1373,8 +1374,7 @@ BucketDBUpdaterTest::getSentNodesDistributionChanged(
ClusterInformation::CSP clusterInfo(createClusterInfo(oldClusterState));
std::unique_ptr<PendingClusterState> state(
PendingClusterState::createForDistributionChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(),
- getReadOnlyBucketSpaceRepo(), api::Timestamp(1)));
+ clock, clusterInfo, sender, getBucketSpaceRepo(), api::Timestamp(1)));
sortSentMessagesByIndex(sender);
@@ -1508,7 +1508,7 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateReceive) {
OutdatedNodesMap outdatedNodesMap;
std::unique_ptr<PendingClusterState> state(
PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(), getReadOnlyBucketSpaceRepo(),
+ clock, clusterInfo, sender, getBucketSpaceRepo(),
cmd, outdatedNodesMap, api::Timestamp(1)));
ASSERT_EQ(messageCount(3), sender.commands.size());
@@ -1617,7 +1617,7 @@ struct BucketDumper : public BucketDatabase::EntryProcessor
{
}
- bool process(const BucketDatabase::Entry& e) override {
+ bool process(const BucketDatabase::ConstEntryRef& e) override {
document::BucketId bucketId(e.getBucketId());
ost << (uint32_t)bucketId.getRawId() << ":";
@@ -1661,7 +1661,7 @@ BucketDBUpdaterTest::mergeBucketLists(
ClusterInformation::CSP clusterInfo(createClusterInfo("cluster:d"));
std::unique_ptr<PendingClusterState> state(
PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(), getReadOnlyBucketSpaceRepo(),
+ clock, clusterInfo, sender, getBucketSpaceRepo(),
cmd, outdatedNodesMap, beforeTime));
parseInputData(existingData, beforeTime, *state, includeBucketInfo);
@@ -1680,7 +1680,7 @@ BucketDBUpdaterTest::mergeBucketLists(
ClusterInformation::CSP clusterInfo(createClusterInfo(oldState.toString()));
std::unique_ptr<PendingClusterState> state(
PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(), getReadOnlyBucketSpaceRepo(),
+ clock, clusterInfo, sender, getBucketSpaceRepo(),
cmd, outdatedNodesMap, afterTime));
parseInputData(newData, afterTime, *state, includeBucketInfo);
@@ -1931,7 +1931,7 @@ struct FunctorProcessor : BucketDatabase::EntryProcessor {
template <typename F>
explicit FunctorProcessor(F&& f) : _f(std::forward<F>(f)) {}
- bool process(const BucketDatabase::Entry& e) override {
+ bool process(const BucketDatabase::ConstEntryRef& e) override {
_f(e);
return true;
}
@@ -2580,19 +2580,17 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_trans
TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
// Need to trigger an initial edge to complete first bucket scan
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("storage:1 distributor:2"),
+ ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"),
messageCount(1), 0));
_sender.clear();
- lib::ClusterState state("storage:1 distributor:1");
+ lib::ClusterState state("distributor:1 storage:1");
setSystemState(state);
constexpr uint32_t superbuckets = 1u << 16u;
constexpr uint32_t sub_buckets = 14;
constexpr uint32_t n_buckets = superbuckets * sub_buckets;
- vespalib::BenchmarkTimer timer(1.0);
-
ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
ASSERT_EQ(_sender.commands[bsi]->getType(), MessageType::REQUESTBUCKETINFO);
@@ -2610,6 +2608,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
}
}
+ vespalib::BenchmarkTimer timer(1.0);
// Global space has no buckets but will serve as a trigger for merging
// buckets into the DB. This lets us measure the overhead of just this part.
if (req.getBucketSpace() == FixedBucketSpaces::global_space()) {
@@ -2626,6 +2625,77 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
EXPECT_EQ(size_t(0), mutable_global_db().size());
}
+uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() {
+ // Need to trigger an initial edge to complete first bucket scan
+ setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), messageCount(1), 0);
+ _sender.clear();
+
+ lib::ClusterState state("distributor:1 storage:1");
+ setSystemState(state);
+
+ constexpr uint32_t superbuckets = 1u << 16u;
+ constexpr uint32_t sub_buckets = 14;
+ constexpr uint32_t n_buckets = superbuckets * sub_buckets;
+
+ assert(_bucketSpaces.size() == _sender.commands.size());
+ for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
+ assert(_sender.commands[bsi]->getType() == MessageType::REQUESTBUCKETINFO);
+ const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.commands[bsi]);
+
+ auto sreply = std::make_shared<RequestBucketInfoReply>(req);
+ sreply->setAddress(storageAddress(0));
+ auto& vec = sreply->getBucketInfo();
+ if (req.getBucketSpace() == FixedBucketSpaces::default_space()) {
+ for (uint32_t sb = 0; sb < superbuckets; ++sb) {
+ for (uint64_t i = 0; i < sub_buckets; ++i) {
+ document::BucketId bucket(48, (i << 32ULL) | sb);
+ vec.push_back(api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(10, 1, 1)));
+ }
+ }
+ }
+ getBucketDBUpdater().onRequestBucketInfoReply(sreply);
+ }
+
+ assert(mutable_default_db().size() == n_buckets);
+ assert(mutable_global_db().size() == 0);
+ return n_buckets;
+}
+
+TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) {
+ const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
+
+ lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via ownership
+ vespalib::BenchmarkTimer timer(1.0);
+ timer.before();
+ setSystemState(no_op_state);
+ timer.after();
+ fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
+}
+
+TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) {
+ const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
+
+ // TODO this benchmark is void if we further restrict the pruning elision logic to allow
+ // elision when storage nodes come online.
+ lib::ClusterState no_op_state("distributor:1 storage:2"); // Not removing any buckets
+ vespalib::BenchmarkTimer timer(1.0);
+ timer.before();
+ setSystemState(no_op_state);
+ timer.after();
+ fprintf(stderr, "Took %g seconds to scan %u buckets with no-op action\n", timer.min_time(), n_buckets);
+}
+
+TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) {
+ const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
+
+ lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via all replicas gone
+ vespalib::BenchmarkTimer timer(1.0);
+ timer.before();
+ setSystemState(no_op_state);
+ timer.after();
+ fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
+}
+
TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d");
auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m");