aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2021-12-04 20:50:07 +0100
committerTor Egge <Tor.Egge@online.no>2021-12-04 20:50:07 +0100
commit2ad949884ee12126f00b18d6e8890af8cbc61391 (patch)
tree59276804bfeb740e83b18d7dd1ae252471e5355e /vespalib
parent1e00538c210421e3d774db73fe9b0eb7dbf89bce (diff)
Change signatures for move() and move_btree_nodes() in BTreeStore to match
the signatures in PostingStore.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/btree/btree_store/btree_store_test.cpp91
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h7
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp44
3 files changed, 99 insertions, 43 deletions
diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
index e7d923d0e87..77cb8e519e4 100644
--- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
+++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
@@ -73,61 +73,112 @@ BTreeStoreTest::~BTreeStoreTest()
inc_generation();
}
+namespace {
+
+class ChangeWriter {
+ std::vector<EntryRef*> _old_refs;
+public:
+ ChangeWriter(uint32_t capacity);
+ ~ChangeWriter();
+ void write(const std::vector<EntryRef>& refs);
+ void emplace_back(EntryRef& ref) { _old_refs.emplace_back(&ref); }
+};
+
+ChangeWriter::ChangeWriter(uint32_t capacity)
+ : _old_refs()
+{
+ _old_refs.reserve(capacity);
+}
+
+ChangeWriter::~ChangeWriter() = default;
+
+void
+ChangeWriter::write(const std::vector<EntryRef> &refs)
+{
+ assert(refs.size() == _old_refs.size());
+ auto old_ref_itr = _old_refs.begin();
+ for (auto ref : refs) {
+ **old_ref_itr = ref;
+ ++old_ref_itr;
+ }
+ assert(old_ref_itr == _old_refs.end());
+ _old_refs.clear();
+}
+
+}
+
void
BTreeStoreTest::test_compact_sequence(uint32_t sequence_length)
{
auto &store = _store;
+ uint32_t entry_ref_offset_bits = TreeStore::RefType::offset_bits;
EntryRef ref1 = add_sequence(4, 4 + sequence_length);
EntryRef ref2 = add_sequence(5, 5 + sequence_length);
- EntryRef old_ref1 = ref1;
- EntryRef old_ref2 = ref2;
std::vector<EntryRef> refs;
+ refs.reserve(2);
+ refs.emplace_back(ref1);
+ refs.emplace_back(ref2);
+ std::vector<EntryRef> temp_refs;
for (int i = 0; i < 1000; ++i) {
- refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
+ temp_refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
}
- for (auto& ref : refs) {
+ for (auto& ref : temp_refs) {
store.clear(ref);
}
inc_generation();
+ ChangeWriter change_writer(refs.size());
+ std::vector<EntryRef> move_refs;
+ move_refs.reserve(refs.size());
auto usage_before = store.getMemoryUsage();
for (uint32_t pass = 0; pass < 15; ++pass) {
auto to_hold = store.start_compact_worst_buffers();
- ref1 = store.move(ref1);
- ref2 = store.move(ref2);
+ std::vector<bool> filter(TreeStore::RefType::numBuffers());
+ for (auto buffer_id : to_hold) {
+ filter[buffer_id] = true;
+ }
+ for (auto& ref : refs) {
+ if (ref.valid() && filter[ref.buffer_id(entry_ref_offset_bits)]) {
+ move_refs.emplace_back(ref);
+ change_writer.emplace_back(ref);
+ }
+ }
+ store.move(move_refs);
+ change_writer.write(move_refs);
+ move_refs.clear();
store.finishCompact(to_hold);
inc_generation();
}
- EXPECT_NE(old_ref1, ref1);
- EXPECT_NE(old_ref2, ref2);
- EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1));
- EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2));
+ EXPECT_NE(ref1, refs[0]);
+ EXPECT_NE(ref2, refs[1]);
+ EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(refs[0]));
+ EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(refs[1]));
auto usage_after = store.getMemoryUsage();
EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
- store.clear(ref1);
- store.clear(ref2);
+ store.clear(refs[0]);
+ store.clear(refs[1]);
}
TEST_F(BTreeStoreTest, require_that_nodes_for_multiple_btrees_are_compacted)
{
auto &store = this->_store;
- EntryRef ref1 = add_sequence(4, 40);
- EntryRef ref2 = add_sequence(100, 130);
+ std::vector<EntryRef> refs;
+ refs.emplace_back(add_sequence(4, 40));
+ refs.emplace_back(add_sequence(100, 130));
store.clear(add_sequence(1000, 20000));
inc_generation();
auto usage_before = store.getMemoryUsage();
for (uint32_t pass = 0; pass < 15; ++pass) {
auto to_hold = store.start_compact_worst_btree_nodes();
- store.move_btree_nodes(ref1);
- store.move_btree_nodes(ref2);
+ store.move_btree_nodes(refs);
store.finish_compact_worst_btree_nodes(to_hold);
inc_generation();
}
- EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(ref1));
- EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(ref2));
+ EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(refs[0]));
+ EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(refs[1]));
auto usage_after = store.getMemoryUsage();
EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
- store.clear(ref1);
- store.clear(ref2);
+ store.clear(refs[0]);
+ store.clear(refs[1]);
}
TEST_F(BTreeStoreTest, require_that_short_arrays_are_compacted)
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index 82913987e44..b4238757e46 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -298,6 +298,9 @@ public:
bool
isSmallArray(const EntryRef ref) const;
+ static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; }
+ bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); }
+
/**
* Returns the cluster size for the type id.
* Cluster size == 0 means we have a tree for the given reference.
@@ -391,10 +394,10 @@ public:
std::vector<uint32_t> start_compact_worst_btree_nodes();
void finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold);
- void move_btree_nodes(EntryRef ref);
+ void move_btree_nodes(const std::vector<EntryRef>& refs);
std::vector<uint32_t> start_compact_worst_buffers();
- EntryRef move(EntryRef ref);
+ void move(std::vector<EntryRef>& refs);
private:
static constexpr size_t MIN_BUFFER_ARRAYS = 128u;
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index 15c546a0368..795e526f927 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -991,15 +991,15 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
void
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-move_btree_nodes(EntryRef ref)
+move_btree_nodes(const std::vector<EntryRef>& refs)
{
- if (ref.valid()) {
+ for (auto& ref : refs) {
RefType iRef(ref);
- uint32_t clusterSize = getClusterSize(iRef);
- if (clusterSize == 0) {
- BTreeType *tree = getWTreeEntry(iRef);
- tree->move_nodes(_allocator);
- }
+ assert(iRef.valid());
+ uint32_t typeId = getTypeId(iRef);
+ assert(isBTree(typeId));
+ BTreeType *tree = getWTreeEntry(iRef);
+ tree->move_nodes(_allocator);
}
}
@@ -1015,23 +1015,25 @@ start_compact_worst_buffers()
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
-typename BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::EntryRef
+void
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-move(EntryRef ref)
+move(std::vector<EntryRef> &refs)
{
- if (!ref.valid() || !_store.getCompacting(ref)) {
- return ref;
- }
- RefType iRef(ref);
- uint32_t clusterSize = getClusterSize(iRef);
- if (clusterSize == 0) {
- BTreeType *tree = getWTreeEntry(iRef);
- auto ref_and_ptr = allocBTreeCopy(*tree);
- tree->prepare_hold();
- return ref_and_ptr.ref;
+ for (auto& ref : refs) {
+ RefType iRef(ref);
+ assert(iRef.valid());
+ assert(_store.getCompacting(iRef));
+ uint32_t clusterSize = getClusterSize(iRef);
+ if (clusterSize == 0) {
+ BTreeType *tree = getWTreeEntry(iRef);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ ref = ref_and_ptr.ref;
+ } else {
+ const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
+ ref = allocKeyDataCopy(shortArray, clusterSize).ref;
+ }
}
- const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
- return allocKeyDataCopy(shortArray, clusterSize).ref;
}
}