summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2023-08-28 13:58:23 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2023-08-28 13:58:23 +0000
commit81ae96dc2f3164b024ae57719bb1ebdcfbf813d0 (patch)
tree130e5358bc18923d7e4e55d74cb936a19c582399 /storage
parentbd5fa1d77691b408a52de516657dc7e7410be81d (diff)
- Remove methods not used.
- Remove parameters not used. - Make template parameter runtime parameter.
Diffstat (limited to 'storage')
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketmanager.cpp127
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketmanager.h13
2 files changed, 68 insertions, 72 deletions
diff --git a/storage/src/vespa/storage/bucketdb/bucketmanager.cpp b/storage/src/vespa/storage/bucketdb/bucketmanager.cpp
index 57d3c241cf0..d7a0000f270 100644
--- a/storage/src/vespa/storage/bucketdb/bucketmanager.cpp
+++ b/storage/src/vespa/storage/bucketdb/bucketmanager.cpp
@@ -90,56 +90,60 @@ BucketManager::print(std::ostream& out, bool ,const std::string& ) const
namespace {
-template<bool log>
class DistributorInfoGatherer
{
using ResultArray = api::RequestBucketInfoReply::EntryVector;
DistributorStateCache _state;
std::unordered_map<uint16_t, ResultArray>& _result;
- const document::BucketIdFactory& _factory;
- std::shared_ptr<const lib::Distribution> _storageDistribution;
+ bool _spam;
public:
- DistributorInfoGatherer(
- const lib::ClusterState& systemState,
- std::unordered_map<uint16_t, ResultArray>& result,
- const document::BucketIdFactory& factory,
- std::shared_ptr<const lib::Distribution> distribution)
- : _state(*distribution, systemState),
+ DistributorInfoGatherer(const lib::ClusterState& systemState,
+ std::unordered_map<uint16_t, ResultArray>& result,
+ const lib::Distribution & distribution,
+ bool spam);
+
+ StorBucketDatabase::Decision operator()(uint64_t bucketId, const StorBucketDatabase::Entry& data);
+
+};
+
+DistributorInfoGatherer::DistributorInfoGatherer(const lib::ClusterState& systemState,
+ std::unordered_map<uint16_t, ResultArray>& result,
+ const lib::Distribution & distribution,
+ bool spam)
+ : _state(distribution, systemState),
_result(result),
- _factory(factory),
- _storageDistribution(std::move(distribution))
- {
- }
+ _spam(spam)
+{
+}
- StorBucketDatabase::Decision operator()(uint64_t bucketId,const StorBucketDatabase::Entry& data)
- {
- document::BucketId b(document::BucketId::keyToBucketId(bucketId));
- try{
- uint16_t i = _state.getOwner(b);
- auto it = _result.find(i);
- if constexpr (log) {
- LOG(spam, "Bucket %s (reverse %" PRIu64 "), should be handled by distributor %u which we are %sgenerating state for.",
- b.toString().c_str(), bucketId, i, it == _result.end() ? "not " : "");
- }
- if (it != _result.end()) {
- api::RequestBucketInfoReply::Entry entry;
- entry._bucketId = b;
- entry._info = data.getBucketInfo();
- it->second.push_back(entry);
- }
- } catch (lib::TooFewBucketBitsInUseException& e) {
- LOGBP(warning, "Cannot assign bucket %s to a distributor as bucket only specifies %u bits.",
- b.toString().c_str(), b.getUsedBits());
- } catch (lib::NoDistributorsAvailableException& e) {
- LOGBP(warning, "No distributors available while processing request bucket info. Distribution hash: %s, cluster state: %s",
- _state.getDistribution().getNodeGraph().getDistributionConfigHash().c_str(), _state.getClusterState().toString().c_str());
+StorBucketDatabase::Decision
+DistributorInfoGatherer::operator()(uint64_t bucketId, const StorBucketDatabase::Entry& data)
+{
+ document::BucketId b(document::BucketId::keyToBucketId(bucketId));
+ try {
+ uint16_t i = _state.getOwner(b);
+ auto it = _result.find(i);
+ if (_spam) {
+ LOG(spam, "Bucket %s (reverse %" PRIu64 "), should be handled by distributor %u which we are %sgenerating state for.",
+ b.toString().c_str(), bucketId, i, it == _result.end() ? "not " : "");
}
- return StorBucketDatabase::Decision::CONTINUE;
+ if (it != _result.end()) {
+ api::RequestBucketInfoReply::Entry entry;
+ entry._bucketId = b;
+ entry._info = data.getBucketInfo();
+ it->second.push_back(entry);
+ }
+ } catch (lib::TooFewBucketBitsInUseException& e) {
+ LOGBP(warning, "Cannot assign bucket %s to a distributor as bucket only specifies %u bits.",
+ b.toString().c_str(), b.getUsedBits());
+ } catch (lib::NoDistributorsAvailableException& e) {
+ LOGBP(warning, "No distributors available while processing request bucket info. Distribution hash: %s, cluster state: %s",
+ _state.getDistribution().getNodeGraph().getDistributionConfigHash().c_str(), _state.getClusterState().toString().c_str());
}
-
-};
+ return StorBucketDatabase::Decision::CONTINUE;
+}
struct MetricsUpdater {
struct Count {
@@ -201,38 +205,33 @@ BucketManager::getBucketInfo(const document::Bucket &bucket) const
}
void
-BucketManager::updateMetrics(bool updateDocCount)
+BucketManager::updateMetrics()
{
- LOG(debug, "Iterating bucket database to update metrics%s%s",
- updateDocCount ? "" : ", minusedbits only",
+ LOG(debug, "Iterating bucket database to update metrics%s",
_doneInitialized ? "" : ", server is not done initializing");
- if (!updateDocCount || _doneInitialized) {
+ if (_doneInitialized) {
MetricsUpdater total;
for (const auto& space : _component.getBucketSpaceRepo()) {
MetricsUpdater m;
auto guard = space.second->bucketDatabase().acquire_read_guard();
guard->for_each(std::ref(m));
total.add(m);
- if (updateDocCount) {
- auto bm = _metrics->bucket_spaces.find(space.first);
- assert(bm != _metrics->bucket_spaces.end());
- bm->second->buckets_total.set(m.count.buckets);
- bm->second->docs.set(m.count.docs);
- bm->second->bytes.set(m.count.bytes);
- bm->second->active_buckets.set(m.count.active);
- bm->second->ready_buckets.set(m.count.ready);
- }
- }
- if (updateDocCount) {
- auto & dest = *_metrics->disk;
- const auto & src = total.count;
- dest.buckets.addValue(src.buckets);
- dest.docs.addValue(src.docs);
- dest.bytes.addValue(src.bytes);
- dest.active.addValue(src.active);
- dest.ready.addValue(src.ready);
+ auto bm = _metrics->bucket_spaces.find(space.first);
+ assert(bm != _metrics->bucket_spaces.end());
+ bm->second->buckets_total.set(m.count.buckets);
+ bm->second->docs.set(m.count.docs);
+ bm->second->bytes.set(m.count.bytes);
+ bm->second->active_buckets.set(m.count.active);
+ bm->second->ready_buckets.set(m.count.ready);
}
+ auto & dest = *_metrics->disk;
+ const auto & src = total.count;
+ dest.buckets.addValue(src.buckets);
+ dest.docs.addValue(src.docs);
+ dest.bytes.addValue(src.bytes);
+ dest.active.addValue(src.active);
+ dest.ready.addValue(src.ready);
}
update_bucket_db_memory_usage_metrics();
}
@@ -494,8 +493,7 @@ BucketManager::processRequestBucketInfoCommands(document::BucketSpace bucketSpac
reqs.size(), bucketSpace.toString().c_str(), clusterState->toString().c_str(), our_hash.c_str());
std::lock_guard clusterStateGuard(_clusterStateLock);
- for (auto it = reqs.rbegin(); it != reqs.rend(); it++) {
- const auto & req = *it;
+ for (const auto & req : std::ranges::reverse_view(reqs)) {
// Currently small requests should not be forwarded to worker thread
assert(req->hasSystemState());
const auto their_hash = req->getDistributionHash();
@@ -566,13 +564,12 @@ BucketManager::processRequestBucketInfoCommands(document::BucketSpace bucketSpac
framework::MilliSecTimer runStartTime(_component.getClock());
// Don't allow logging to lower performance of inner loop.
// Call other type of instance if logging
- const document::BucketIdFactory& idFac(_component.getBucketIdFactory());
if (LOG_WOULD_LOG(spam)) {
- DistributorInfoGatherer<true> builder(*clusterState, result, idFac, distribution);
+ DistributorInfoGatherer builder(*clusterState, result, *distribution, true);
_component.getBucketDatabase(bucketSpace).for_each_chunked(std::ref(builder),
"BucketManager::processRequestBucketInfoCommands-1");
} else {
- DistributorInfoGatherer<false> builder(*clusterState, result, idFac, distribution);
+ DistributorInfoGatherer builder(*clusterState, result, *distribution, false);
_component.getBucketDatabase(bucketSpace).for_each_chunked(std::ref(builder),
"BucketManager::processRequestBucketInfoCommands-2");
}
diff --git a/storage/src/vespa/storage/bucketdb/bucketmanager.h b/storage/src/vespa/storage/bucketdb/bucketmanager.h
index cb0bc6a9f95..547ff87dfc0 100644
--- a/storage/src/vespa/storage/bucketdb/bucketmanager.h
+++ b/storage/src/vespa/storage/bucketdb/bucketmanager.h
@@ -72,7 +72,7 @@ private:
class ScopedQueueDispatchGuard {
BucketManager& _mgr;
public:
- ScopedQueueDispatchGuard(BucketManager&);
+ explicit ScopedQueueDispatchGuard(BucketManager&);
~ScopedQueueDispatchGuard();
ScopedQueueDispatchGuard(const ScopedQueueDispatchGuard&) = delete;
@@ -83,7 +83,7 @@ public:
BucketManager(const config::ConfigUri&, ServiceLayerComponentRegister&);
BucketManager(const BucketManager&) = delete;
BucketManager& operator=(const BucketManager&) = delete;
- ~BucketManager();
+ ~BucketManager() override;
void startWorkerThread();
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
@@ -94,7 +94,7 @@ public:
/** Get info for given bucket (Used for whitebox testing) */
StorBucketDatabase::Entry getBucketInfo(const document::Bucket &id) const;
- void force_db_sweep_and_metric_update() { updateMetrics(true); }
+ void force_db_sweep_and_metric_update() { updateMetrics(); }
bool onUp(const std::shared_ptr<api::StorageMessage>&) override;
@@ -112,8 +112,8 @@ private:
void onDoneInit() override { _doneInitialized = true; }
void onClose() override;
- void updateMetrics(bool updateDocCount);
- void updateMetrics(const MetricLockGuard &) override { updateMetrics(true); }
+ void updateMetrics();
+ void updateMetrics(const MetricLockGuard &) override { updateMetrics(); }
void update_bucket_db_memory_usage_metrics();
void updateMinUsedBits();
@@ -127,8 +127,7 @@ private:
* Returns whether request was enqueued (and should thus not be forwarded
* by the caller).
*/
- bool enqueueAsConflictIfProcessingRequest(
- const api::StorageReply::SP& reply);
+ bool enqueueAsConflictIfProcessingRequest(const api::StorageReply::SP& reply);
/**
* Signals that code is entering a section where certain bucket tree