aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2022-10-04 23:31:58 +0200
committerTor Egge <Tor.Egge@online.no>2022-10-04 23:31:58 +0200
commit12ef321b18b2f3a0ff94270d47dec1f91fb8b4ed (patch)
tree6fbbc6db055a0b8d2ae9b098382bbc991822c11a /vespalib
parentd03281e17fea3d4672533f6410808400481e9730 (diff)
Add vespalib::datastore::CompactingBuffers.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp27
-rw-r--r--vespalib/src/tests/btree/btree_store/btree_store_test.cpp18
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h2
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.h4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.hpp5
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h12
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp44
-rw-r--r--vespalib/src/vespa/vespalib/datastore/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp23
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compacting_buffers.cpp38
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compacting_buffers.h32
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp10
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp36
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h10
18 files changed, 147 insertions, 131 deletions
diff --git a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
index 4716e91c2c4..c68ff07491e 100644
--- a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
+++ b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
@@ -64,8 +64,7 @@ public:
uint32_t get(EntryRef ref) const { return _store.getEntry(ref); }
uint32_t get_acquire(const AtomicEntryRef& ref) const { return get(ref.load_acquire()); }
uint32_t get_relaxed(const AtomicEntryRef& ref) const { return get(ref.load_relaxed()); }
- std::vector<uint32_t> start_compact();
- void finish_compact(std::vector<uint32_t> to_hold);
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact();
static constexpr bool is_indirect = true;
static uint32_t get_offset_bits() { return StoreRefType::offset_bits; }
static uint32_t get_num_buffers() { return StoreRefType::numBuffers(); }
@@ -79,19 +78,13 @@ RealIntStore::RealIntStore()
RealIntStore::~RealIntStore() = default;
-std::vector<uint32_t>
+std::unique_ptr<vespalib::datastore::CompactingBuffers>
RealIntStore::start_compact()
{
// Use a compaction strategy that will compact all active buffers
CompactionStrategy compaction_strategy(0.0, 0.0, get_num_buffers(), 1.0);
CompactionSpec compaction_spec(true, false);
- return _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
-}
-
-void
-RealIntStore::finish_compact(std::vector<uint32_t> to_hold)
-{
- _store.finishCompact(to_hold);
+ return _store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
}
EntryRef
@@ -347,9 +340,8 @@ void
Fixture<Params>::compact_keys()
{
if constexpr (KeyStore::is_indirect) {
- auto to_hold = _keys.start_compact();
- EntryRefFilter filter(_keys.get_num_buffers(), _keys.get_offset_bits());
- filter.add_buffers(to_hold);
+ auto compacting_buffers = _keys.start_compact();
+ auto filter = compacting_buffers->make_entry_ref_filter();
auto itr = _tree.begin();
while (itr.valid()) {
auto old_ref = itr.getKey().load_relaxed();
@@ -359,7 +351,7 @@ Fixture<Params>::compact_keys()
}
++itr;
}
- _keys.finish_compact(std::move(to_hold));
+ compacting_buffers->finish();
}
_compact_keys.track_compacted();
}
@@ -369,9 +361,8 @@ void
Fixture<Params>::compact_values()
{
if constexpr (ValueStore::is_indirect) {
- auto to_hold = _values.start_compact();
- EntryRefFilter filter(_values.get_num_buffers(), _values.get_offset_bits());
- filter.add_buffers(to_hold);
+ auto compacting_buffers = _values.start_compact();
+ auto filter = compacting_buffers->make_entry_ref_filter();
auto itr = _tree.begin();
while (itr.valid()) {
auto old_ref = itr.getData().load_relaxed();
@@ -381,7 +372,7 @@ Fixture<Params>::compact_values()
}
++itr;
}
- _values.finish_compact(std::move(to_hold));
+ compacting_buffers->finish();
}
_compact_values.track_compacted();
}
diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
index 5e2aa89b59e..4da34c64ed9 100644
--- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
+++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
@@ -5,7 +5,9 @@
#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/datastore/compacting_buffers.h>
#include <vespa/vespalib/datastore/compaction_strategy.h>
+#include <vespa/vespalib/datastore/entry_ref_filter.h>
#include <vespa/vespalib/gtest/gtest.h>
using vespalib::GenerationHandler;
@@ -114,7 +116,6 @@ void
BTreeStoreTest::test_compact_sequence(uint32_t sequence_length)
{
auto &store = _store;
- uint32_t entry_ref_offset_bits = TreeStore::RefType::offset_bits;
EntryRef ref1 = add_sequence(4, 4 + sequence_length);
EntryRef ref2 = add_sequence(5, 5 + sequence_length);
std::vector<EntryRef> refs;
@@ -136,13 +137,10 @@ BTreeStoreTest::test_compact_sequence(uint32_t sequence_length)
for (uint32_t pass = 0; pass < 15; ++pass) {
CompactionSpec compaction_spec(true, false);
CompactionStrategy compaction_strategy;
- auto to_hold = store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
- std::vector<bool> filter(TreeStore::RefType::numBuffers());
- for (auto buffer_id : to_hold) {
- filter[buffer_id] = true;
- }
+ auto compacting_buffers = store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
+ auto filter = compacting_buffers->make_entry_ref_filter();
for (auto& ref : refs) {
- if (ref.valid() && filter[ref.buffer_id(entry_ref_offset_bits)]) {
+ if (ref.valid() && filter.has(ref)) {
move_refs.emplace_back(ref);
change_writer.emplace_back(ref);
}
@@ -150,7 +148,7 @@ BTreeStoreTest::test_compact_sequence(uint32_t sequence_length)
store.move(move_refs);
change_writer.write(move_refs);
move_refs.clear();
- store.finishCompact(to_hold);
+ compacting_buffers->finish();
inc_generation();
}
EXPECT_NE(ref1, refs[0]);
@@ -174,9 +172,9 @@ TEST_F(BTreeStoreTest, require_that_nodes_for_multiple_btrees_are_compacted)
auto usage_before = store.getMemoryUsage();
for (uint32_t pass = 0; pass < 15; ++pass) {
CompactionStrategy compaction_strategy;
- auto to_hold = store.start_compact_worst_btree_nodes(compaction_strategy);
+ auto compacting_buffers = store.start_compact_worst_btree_nodes(compaction_strategy);
store.move_btree_nodes(refs);
- store.finish_compact_worst_btree_nodes(to_hold);
+ compacting_buffers->finish();
inc_generation();
}
EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(refs[0]));
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 2ff2897461b..f4bd0028e7c 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -208,8 +208,8 @@ TEST_P(NumberStoreTest, control_static_sizes) {
EXPECT_EQ(440u, sizeof(f.store));
EXPECT_EQ(296u, sizeof(NumberStoreTest::ArrayStoreType::DataStoreType));
#else
- EXPECT_EQ(488u, sizeof(store));
- EXPECT_EQ(328u, sizeof(NumberStoreTest::ArrayStoreType::DataStoreType));
+ EXPECT_EQ(496u, sizeof(store));
+ EXPECT_EQ(336u, sizeof(NumberStoreTest::ArrayStoreType::DataStoreType));
#endif
EXPECT_EQ(112u, sizeof(NumberStoreTest::ArrayStoreType::SmallBufferType));
MemoryUsage usage = store.getMemoryUsage();
diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp
index 473d1f4735e..c6d8886254d 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btree.hpp
@@ -28,9 +28,9 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
void
BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst(const datastore::CompactionStrategy& compaction_strategy)
{
- auto to_hold = _alloc.start_compact_worst(compaction_strategy);
+ auto compacting_buffers = _alloc.start_compact_worst(compaction_strategy);
_tree.move_nodes(_alloc);
- _alloc.finishCompact(to_hold);
+ compacting_buffers->finish();
}
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index 27e73b3a2b6..86c9621f869 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -166,7 +166,7 @@ public:
bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); }
std::vector<uint32_t> startCompact() { return _nodeStore.startCompact(); }
- std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); }
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); }
void finishCompact(const std::vector<uint32_t> &toHold) {
return _nodeStore.finishCompact(toHold);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
index d4a5ae42ef8..d05ec840f83 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
@@ -6,6 +6,8 @@
#include "btreetraits.h"
#include <vespa/vespalib/datastore/datastore.h>
+namespace vespalib::datastore { class CompactingBuffers; }
+
namespace vespalib::btree {
class BTreeNodeReclaimer
@@ -160,7 +162,7 @@ public:
std::vector<uint32_t> startCompact();
- std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy);
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy);
void finishCompact(const std::vector<uint32_t> &toHold);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
index 91953507eb0..0f9eeb9daec 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
@@ -3,6 +3,7 @@
#pragma once
#include "btreenodestore.h"
+#include <vespa/vespalib/datastore/compacting_buffers.h>
#include <vespa/vespalib/datastore/compaction_spec.h>
#include <vespa/vespalib/datastore/datastore.hpp>
@@ -70,11 +71,11 @@ startCompact()
template <typename KeyT, typename DataT, typename AggrT,
size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
-std::vector<uint32_t>
+std::unique_ptr<vespalib::datastore::CompactingBuffers>
BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
start_compact_worst(const CompactionStrategy &compaction_strategy)
{
- return _store.startCompactWorstBuffers(datastore::CompactionSpec(true, false), compaction_strategy);
+ return _store.start_compact_worst_buffers(datastore::CompactionSpec(true, false), compaction_strategy);
}
template <typename KeyT, typename DataT, typename AggrT,
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index a79259c6e57..54bc397175d 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -149,13 +149,6 @@ public:
KeyDataTypeRefPair
allocKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize);
- std::vector<uint32_t>
- startCompact();
-
- void
- finishCompact(const std::vector<uint32_t> &toHold);
-
-
const KeyDataType *
lower_bound(const KeyDataType *b, const KeyDataType *e,
const KeyType &key, CompareT comp);
@@ -394,11 +387,10 @@ public:
void
foreach_frozen(EntryRef ref, FunctionType func) const;
- std::vector<uint32_t> start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
- void finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold);
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
void move_btree_nodes(const std::vector<EntryRef>& refs);
- std::vector<uint32_t> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
void move(std::vector<EntryRef>& refs);
private:
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index c0985ff8f94..ffd337d642b 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -5,6 +5,7 @@
#include "btreestore.h"
#include "btreebuilder.h"
#include "btreebuilder.hpp"
+#include <vespa/vespalib/datastore/compacting_buffers.h>
#include <vespa/vespalib/datastore/compaction_spec.h>
#include <vespa/vespalib/datastore/datastore.hpp>
#include <vespa/vespalib/util/optimized.h>
@@ -116,34 +117,6 @@ allocKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize)
allocArray(vespalib::ConstArrayRef<KeyDataType>(rhs, clusterSize));
}
-
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT, typename AggrCalcT>
-std::vector<uint32_t>
-BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::startCompact()
-{
- std::vector<uint32_t> ret = _store.startCompact(clusterLimit);
- for (uint32_t clusterSize = 1; clusterSize <= clusterLimit; ++clusterSize) {
- uint32_t typeId = clusterSize - 1;
- std::vector<uint32_t> toHold = _store.startCompact(typeId);
- for (auto i : toHold) {
- ret.push_back(i);
- }
- }
- return ret;
-}
-
-
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT, typename AggrCalcT>
-void
-BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-finishCompact(const std::vector<uint32_t> &toHold)
-{
- _store.finishCompact(toHold);
-}
-
-
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
const typename BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
@@ -971,7 +944,7 @@ getAggregated(const EntryRef ref) const
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
-std::vector<uint32_t>
+std::unique_ptr<vespalib::datastore::CompactingBuffers>
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy)
{
@@ -983,15 +956,6 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
void
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold)
-{
- _allocator.finishCompact(to_hold);
-}
-
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT, typename AggrCalcT>
-void
-BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
move_btree_nodes(const std::vector<EntryRef>& refs)
{
for (auto& ref : refs) {
@@ -1006,12 +970,12 @@ move_btree_nodes(const std::vector<EntryRef>& refs)
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
-std::vector<uint32_t>
+std::unique_ptr<vespalib::datastore::CompactingBuffers>
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
freeze();
- return _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
+ return _store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
}
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
diff --git a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
index abbdc79c527..f0f548c41fe 100644
--- a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
@@ -6,6 +6,7 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT
atomic_entry_ref.cpp
buffer_type.cpp
bufferstate.cpp
+ compacting_buffers.cpp
compaction_strategy.cpp
compact_buffer_candidates.cpp
datastore.cpp
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index 4fc13396f6b..e79398271fb 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -3,6 +3,7 @@
#pragma once
#include "array_store.h"
+#include "compacting_buffers.h"
#include "compaction_spec.h"
#include "entry_ref_filter.h"
#include "datastore.hpp"
@@ -150,24 +151,20 @@ template <typename EntryT, typename RefT, typename TypeMapperT>
class CompactionContext : public ICompactionContext {
private:
using ArrayStoreType = ArrayStore<EntryT, RefT, TypeMapperT>;
- DataStoreBase &_dataStore;
ArrayStoreType &_store;
- std::vector<uint32_t> _bufferIdsToCompact;
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> _compacting_buffers;
EntryRefFilter _filter;
public:
- CompactionContext(DataStoreBase &dataStore,
- ArrayStoreType &store,
- std::vector<uint32_t> bufferIdsToCompact)
- : _dataStore(dataStore),
- _store(store),
- _bufferIdsToCompact(std::move(bufferIdsToCompact)),
- _filter(RefT::numBuffers(), RefT::offset_bits)
+ CompactionContext(ArrayStoreType &store,
+ std::unique_ptr<vespalib::datastore::CompactingBuffers> compacting_buffers)
+ : _store(store),
+ _compacting_buffers(std::move(compacting_buffers)),
+ _filter(_compacting_buffers->make_entry_ref_filter())
{
- _filter.add_buffers(_bufferIdsToCompact);
}
~CompactionContext() override {
- _dataStore.finishCompact(_bufferIdsToCompact);
+ _compacting_buffers->finish();
}
void compact(vespalib::ArrayRef<AtomicEntryRef> refs) override {
for (auto &atomic_entry_ref : refs) {
@@ -186,9 +183,9 @@ template <typename EntryT, typename RefT, typename TypeMapperT>
ICompactionContext::UP
ArrayStore<EntryT, RefT, TypeMapperT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy)
{
- std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
+ auto compacting_buffers = _store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
return std::make_unique<arraystore::CompactionContext<EntryT, RefT, TypeMapperT>>
- (_store, *this, std::move(bufferIdsToCompact));
+ (*this, std::move(compacting_buffers));
}
template <typename EntryT, typename RefT, typename TypeMapperT>
diff --git a/vespalib/src/vespa/vespalib/datastore/compacting_buffers.cpp b/vespalib/src/vespa/vespalib/datastore/compacting_buffers.cpp
new file mode 100644
index 00000000000..e350ef5056e
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/compacting_buffers.cpp
@@ -0,0 +1,38 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "compacting_buffers.h"
+#include "datastorebase.h"
+#include "entry_ref_filter.h"
+#include <cassert>
+
+namespace vespalib::datastore {
+
+CompactingBuffers::CompactingBuffers(DataStoreBase& store, uint32_t num_buffers, uint32_t offset_bits, std::vector<uint32_t> buffer_ids)
+ : _store(store),
+ _num_buffers(num_buffers),
+ _offset_bits(offset_bits),
+ _buffer_ids(std::move(buffer_ids))
+{
+}
+
+CompactingBuffers::~CompactingBuffers()
+{
+ assert(_buffer_ids.empty());
+}
+
+void
+CompactingBuffers::finish()
+{
+ _store.finishCompact(_buffer_ids);
+ _buffer_ids.clear();
+}
+
+EntryRefFilter
+CompactingBuffers::make_entry_ref_filter() const
+{
+ EntryRefFilter filter(_num_buffers, _offset_bits);
+ filter.add_buffers(_buffer_ids);
+ return filter;
+}
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/compacting_buffers.h b/vespalib/src/vespa/vespalib/datastore/compacting_buffers.h
new file mode 100644
index 00000000000..87e698c4eca
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/compacting_buffers.h
@@ -0,0 +1,32 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <cstdint>
+#include <vector>
+
+namespace vespalib::datastore {
+
+class DataStoreBase;
+class EntryRefFilter;
+
+/*
+ * Class representing the buffers currently being compacted in a data store.
+ */
+class CompactingBuffers
+{
+ DataStoreBase& _store;
+ uint32_t _num_buffers;
+ uint32_t _offset_bits;
+ std::vector<uint32_t> _buffer_ids;
+public:
+ CompactingBuffers(DataStoreBase& store, uint32_t num_buffers, uint32_t offset_bits, std::vector<uint32_t> buffer_ids);
+ ~CompactingBuffers();
+ DataStoreBase& get_store() const noexcept { return _store; }
+ const std::vector<uint32_t>& get_buffer_ids() const noexcept { return _buffer_ids; }
+ bool empty() const noexcept { return _buffer_ids.empty(); }
+ void finish();
+ EntryRefFilter make_entry_ref_filter() const;
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
index 4d09ffe4bc6..72d08460eea 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
@@ -13,7 +13,7 @@ namespace vespalib::datastore {
template <typename RefT>
DataStoreT<RefT>::DataStoreT()
- : DataStoreBase(RefType::numBuffers(), RefType::offsetSize())
+ : DataStoreBase(RefType::numBuffers(), RefType::offset_bits, RefType::offsetSize())
{
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index 60671afb1a0..79113f76941 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -2,6 +2,7 @@
#include "datastorebase.h"
#include "compact_buffer_candidates.h"
+#include "compacting_buffers.h"
#include "compaction_spec.h"
#include "compaction_strategy.h"
#include <vespa/vespalib/util/array.hpp>
@@ -79,7 +80,7 @@ public:
}
};
-DataStoreBase::DataStoreBase(uint32_t numBuffers, size_t maxArrays)
+DataStoreBase::DataStoreBase(uint32_t numBuffers, uint32_t offset_bits, size_t maxArrays)
: _buffers(numBuffers),
_primary_buffer_ids(),
_states(numBuffers),
@@ -90,6 +91,7 @@ DataStoreBase::DataStoreBase(uint32_t numBuffers, size_t maxArrays)
_elemHold1List(),
_elemHold2List(),
_numBuffers(numBuffers),
+ _offset_bits(offset_bits),
_hold_buffer_count(0u),
_maxArrays(maxArrays),
_compaction_count(0u),
@@ -529,8 +531,8 @@ DataStoreBase::markCompacting(uint32_t bufferId)
inc_compaction_count();
}
-std::vector<uint32_t>
-DataStoreBase::startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
+std::unique_ptr<CompactingBuffers>
+DataStoreBase::start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
// compact memory usage
CompactBufferCandidates elem_buffers(_numBuffers, compaction_strategy.get_max_buffers(), compaction_strategy.get_active_buffers_ratio(), compaction_strategy.getMaxDeadBytesRatio() / 2, CompactionStrategy::DEAD_BYTES_SLACK);
@@ -567,7 +569,7 @@ DataStoreBase::startCompactWorstBuffers(CompactionSpec compaction_spec, const Co
for (auto buffer_id : result) {
markCompacting(buffer_id);
}
- return result;
+ return std::make_unique<CompactingBuffers>(*this, _numBuffers, _offset_bits, std::move(result));
}
void
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 20104670085..40730252139 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -12,6 +12,7 @@
namespace vespalib::datastore {
+class CompactingBuffers;
class CompactionSpec;
class CompactionStrategy;
@@ -159,13 +160,14 @@ protected:
ElemHold2List _elemHold2List;
const uint32_t _numBuffers;
+ const uint32_t _offset_bits;
uint32_t _hold_buffer_count;
const size_t _maxArrays;
mutable std::atomic<uint64_t> _compaction_count;
vespalib::GenerationHolder _genHolder;
- DataStoreBase(uint32_t numBuffers, size_t maxArrays);
+ DataStoreBase(uint32_t numBuffers, uint32_t offset_bits, size_t maxArrays);
DataStoreBase(const DataStoreBase &) = delete;
DataStoreBase &operator=(const DataStoreBase &) = delete;
@@ -376,7 +378,7 @@ public:
}
uint32_t startCompactWorstBuffer(uint32_t typeId);
- std::vector<uint32_t> startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy);
+ std::unique_ptr<CompactingBuffers> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy);
uint64_t get_compaction_count() const { return _compaction_count.load(std::memory_order_relaxed); }
void inc_compaction_count() const { ++_compaction_count; }
bool has_held_buffers() const noexcept { return _hold_buffer_count != 0u; }
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index cbb8369e1f2..37a56bf2561 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -94,17 +94,17 @@ private:
btree::NoAggregated,
EntryComparatorWrapper,
DictionaryTraits>;
- using UniqueStoreRemapper<RefT>::_compacting_buffer;
+ using UniqueStoreRemapper<RefT>::_filter;
using UniqueStoreRemapper<RefT>::_mapping;
- DataStoreBase &_dataStore;
IUniqueStoreDictionary &_dict;
ICompactable &_store;
- std::vector<uint32_t> _bufferIdsToCompact;
+ std::unique_ptr<CompactingBuffers> _compacting_buffers;
void allocMapping() {
_mapping.resize(RefT::numBuffers());
- for (const auto bufferId : _bufferIdsToCompact) {
- BufferState &state = _dataStore.getBufferState(bufferId);
+ auto& data_store = _compacting_buffers->get_store();
+ for (const auto bufferId : _compacting_buffers->get_buffer_ids()) {
+ BufferState &state = data_store.getBufferState(bufferId);
_mapping[bufferId].resize(state.get_used_arrays());
}
}
@@ -122,34 +122,30 @@ private:
}
void fillMapping() {
- _dict.move_keys(*this, _compacting_buffer);
+ _dict.move_keys(*this, _filter);
}
public:
- CompactionContext(DataStoreBase &dataStore,
- IUniqueStoreDictionary &dict,
+ CompactionContext(IUniqueStoreDictionary &dict,
ICompactable &store,
- std::vector<uint32_t> bufferIdsToCompact)
- : UniqueStoreRemapper<RefT>(),
+ std::unique_ptr<CompactingBuffers> compacting_buffers)
+ : UniqueStoreRemapper<RefT>(compacting_buffers->make_entry_ref_filter()),
ICompactable(),
- _dataStore(dataStore),
_dict(dict),
_store(store),
- _bufferIdsToCompact(std::move(bufferIdsToCompact))
+ _compacting_buffers(std::move(compacting_buffers))
{
- if (!_bufferIdsToCompact.empty()) {
- _compacting_buffer.add_buffers(_bufferIdsToCompact);
+ if (!_compacting_buffers->empty()) {
allocMapping();
fillMapping();
}
}
void done() override {
- _dataStore.finishCompact(_bufferIdsToCompact);
- _bufferIdsToCompact.clear();
+ _compacting_buffers->finish();
}
~CompactionContext() override {
- assert(_bufferIdsToCompact.empty());
+ assert(_compacting_buffers->empty());
}
};
@@ -159,11 +155,11 @@ template <typename EntryT, typename RefT, typename Compare, typename Allocator>
std::unique_ptr<typename UniqueStore<EntryT, RefT, Compare, Allocator>::Remapper>
UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
- if (bufferIdsToCompact.empty()) {
+ auto compacting_buffers = _store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
+ if (compacting_buffers->empty()) {
return std::unique_ptr<Remapper>();
} else {
- return std::make_unique<uniquestore::CompactionContext<RefT>>(_store, *_dict, _allocator, std::move(bufferIdsToCompact));
+ return std::make_unique<uniquestore::CompactionContext<RefT>>(*_dict, _allocator, std::move(compacting_buffers));
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
index 4babd6204c7..174c74a62d2 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
@@ -19,11 +19,11 @@ public:
using RefType = RefT;
protected:
- EntryRefFilter _compacting_buffer;
+ EntryRefFilter _filter;
std::vector<std::vector<EntryRef, allocator_large<EntryRef>>> _mapping;
public:
- UniqueStoreRemapper()
- : _compacting_buffer(RefT::numBuffers(), RefT::offset_bits),
+ UniqueStoreRemapper(EntryRefFilter&& filter)
+ : _filter(std::move(filter)),
_mapping()
{
}
@@ -41,13 +41,13 @@ public:
void remap(vespalib::ArrayRef<AtomicEntryRef> refs) const {
for (auto &atomic_ref : refs) {
auto ref = atomic_ref.load_relaxed();
- if (ref.valid() && _compacting_buffer.has(ref)) {
+ if (ref.valid() && _filter.has(ref)) {
atomic_ref.store_release(remap(ref));
}
}
}
- const EntryRefFilter& get_entry_ref_filter() const noexcept { return _compacting_buffer; }
+ const EntryRefFilter& get_entry_ref_filter() const noexcept { return _filter; }
virtual void done() = 0;
};