summaryrefslogtreecommitdiffstats
path: root/searchlib
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2022-01-28 12:14:49 +0100
committerGitHub <noreply@github.com>2022-01-28 12:14:49 +0100
commit30158a9d1d4f275ce087744e50f4049049afdb6d (patch)
tree331cffed8f712af119a23b827ea8cd2068e70709 /searchlib
parent3337963bf5e89f27be9cba0b9b92b4d6a6e75737 (diff)
parent02854fd7ecbb93b3bba1aecea0b9ed83b4ce8545 (diff)
Merge pull request #20968 from vespa-engine/balder/ignore-spread-in-the-active-file
Ignore the current active file when computing spread.
Diffstat (limited to 'searchlib')
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp34
1 files changed, 18 insertions, 16 deletions
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index 3af7338d73a..f2c4e12488a 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -330,21 +330,6 @@ LogDataStore::initFlush(uint64_t syncToken)
return syncToken;
}
-double
-LogDataStore::getMaxBucketSpread() const
-{
- double maxSpread(1.0);
- MonitorGuard guard(_updateLock);
- for (const auto & fc : _fileChunks) {
- if (fc) {
- if (_bucketizer && fc->frozen()) {
- maxSpread = std::max(maxSpread, fc->getBucketSpread());
- }
- }
- }
- return maxSpread;
-}
-
std::pair<bool, LogDataStore::FileId>
LogDataStore::findNextToCompact(bool dueToBloat)
{
@@ -579,6 +564,22 @@ LogDataStore::getDiskHeaderFootprint() const
return sz;
}
+double
+LogDataStore::getMaxBucketSpread() const
+{
+ double maxSpread(1.0);
+ MonitorGuard guard(_updateLock);
+ for (FileId i(0); i < FileId(_fileChunks.size()); i = i.next()) {
+ /// Ignore the the active file as it is never considered for reordering until completed and frozen.
+ if (i != _active) {
+ const auto & fc = _fileChunks[i.getId()];
+ if (fc && _bucketizer && fc->frozen()) {
+ maxSpread = std::max(maxSpread, fc->getBucketSpread());
+ }
+ }
+ }
+ return maxSpread;
+}
size_t
LogDataStore::getDiskBloat() const
@@ -586,7 +587,8 @@ LogDataStore::getDiskBloat() const
MonitorGuard guard(_updateLock);
size_t sz(0);
for (FileId i(0); i < FileId(_fileChunks.size()); i = i.next()) {
- /// Do not count the holes in the last file as bloat
+ /// Do not count the holes in the last file as bloat as it is
+ /// never considered for compaction until completed and frozen.
if (i != _active) {
const auto & chunk = _fileChunks[i.getId()];
if (chunk) {