diff options
author | HÃ¥vard Pettersen <3535158+havardpe@users.noreply.github.com> | 2022-10-13 10:37:12 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-10-13 10:37:12 +0200 |
commit | a4d00a65e72428b5e1f86cc8f8b9ec16d1d37074 (patch) | |
tree | a1f89cd6c841cd506cdbf6d0baa08fe3b013fd52 /vespalib | |
parent | 8568274e54d065ae904907537dcf4ce6a9581bfa (diff) | |
parent | 9259007b603c4f5ec98597a4e1bf63e23cb660a6 (diff) |
Merge branch 'master' into havardpe/more-coroutines
Diffstat (limited to 'vespalib')
44 files changed, 255 insertions, 253 deletions
diff --git a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp index 3ba7bf85e42..caed5c3543c 100644 --- a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp +++ b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp @@ -59,8 +59,8 @@ public: AtomicEntryRef add_relaxed(uint32_t value) { return AtomicEntryRef(add(value)); } void hold(const AtomicEntryRef& ref) { _store.holdElem(ref.load_relaxed(), 1); } EntryRef move(EntryRef ref); - void transfer_hold_lists(generation_t gen) { _store.transferHoldLists(gen); } - void trim_hold_lists(generation_t gen) { _store.trimHoldLists(gen); } + void assign_generation(generation_t current_gen) { _store.assign_generation(current_gen); } + void reclaim_memory(generation_t gen) { _store.reclaim_memory(gen); } uint32_t get(EntryRef ref) const { return _store.getEntry(ref); } uint32_t get_acquire(const AtomicEntryRef& ref) const { return get(ref.load_acquire()); } uint32_t get_relaxed(const AtomicEntryRef& ref) const { return get(ref.load_relaxed()); } @@ -118,8 +118,8 @@ public: static uint32_t add(uint32_t value) noexcept { return value; } static uint32_t add_relaxed(uint32_t value) noexcept { return value; } static void hold(uint32_t) noexcept { } - static void transfer_hold_lists(generation_t) noexcept { } - static void trim_hold_lists(generation_t) noexcept { } + static void assign_generation(generation_t) noexcept { } + static void reclaim_memory(generation_t) noexcept { } static uint32_t get(uint32_t value) noexcept { return value; } static uint32_t get_acquire(uint32_t value) noexcept { return value; } static uint32_t get_relaxed(uint32_t value) noexcept { return value; } @@ -274,15 +274,15 @@ Fixture<Params>::commit() auto &allocator = _tree.getAllocator(); allocator.freeze(); auto current_gen = _generationHandler.getCurrentGeneration(); - allocator.transferHoldLists(current_gen); - _keys.transfer_hold_lists(current_gen); - _values.transfer_hold_lists(current_gen); - allocator.transferHoldLists(_generationHandler.getCurrentGeneration()); + allocator.assign_generation(current_gen); + _keys.assign_generation(current_gen); + _values.assign_generation(current_gen); + allocator.assign_generation(_generationHandler.getCurrentGeneration()); _generationHandler.incGeneration(); - auto first_used_gen = _generationHandler.getFirstUsedGeneration(); - allocator.trimHoldLists(first_used_gen); - _keys.trim_hold_lists(first_used_gen); - _values.trim_hold_lists(first_used_gen); + auto oldest_used_gen = _generationHandler.get_oldest_used_generation(); + allocator.reclaim_memory(oldest_used_gen); + _keys.reclaim_memory(oldest_used_gen); + _values.reclaim_memory(oldest_used_gen); } template <typename Params> diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp index 4da34c64ed9..0370b1ce2eb 100644 --- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp +++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp @@ -31,9 +31,9 @@ protected: void inc_generation() { _store.freeze(); - _store.transferHoldLists(_gen_handler.getCurrentGeneration()); + _store.assign_generation(_gen_handler.getCurrentGeneration()); _gen_handler.incGeneration(); - _store.trimHoldLists(_gen_handler.getFirstUsedGeneration()); + _store.reclaim_memory(_gen_handler.get_oldest_used_generation()); } EntryRef add_sequence(int start_key, int end_key) diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp index 3fd00a26189..f2896cb783c 100644 --- a/vespalib/src/tests/btree/btree_test.cpp +++ b/vespalib/src/tests/btree/btree_test.cpp @@ -163,9 +163,9 @@ void cleanup(GenerationHandler & g, ManagerType & m) { m.freeze(); - m.transferHoldLists(g.getCurrentGeneration()); + m.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - m.trimHoldLists(g.getFirstUsedGeneration()); + m.reclaim_memory(g.get_oldest_used_generation()); } template <typename ManagerType, typename NodeType> @@ -874,9 +874,9 @@ TEST_F(BTreeTest, require_that_we_can_insert_and_remove_from_tree) } compacting_buffers->finish(); manager.freeze(); - manager.transferHoldLists(g.getCurrentGeneration()); + manager.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - manager.trimHoldLists(g.getFirstUsedGeneration()); + manager.reclaim_memory(g.get_oldest_used_generation()); } // remove entries for (size_t i = 0; i < numEntries; ++i) { @@ -1106,9 +1106,9 @@ TEST_F(BTreeTest, require_that_memory_usage_is_calculated) EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); // trim hold lists - tm.transferHoldLists(gh.getCurrentGeneration()); + tm.assign_generation(gh.getCurrentGeneration()); gh.incGeneration(); - tm.trimHoldLists(gh.getFirstUsedGeneration()); + tm.reclaim_memory(gh.get_oldest_used_generation()); mu = vespalib::MemoryUsage(); mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode))); mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode))); @@ -1282,9 +1282,9 @@ TEST_F(BTreeTest, require_that_small_nodes_works) s.clear(root); s.clearBuilder(); s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); + s.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); + s.reclaim_memory(g.get_oldest_used_generation()); } namespace { @@ -1416,9 +1416,9 @@ TEST_F(BTreeTest, require_that_apply_works) s.clear(root); s.clearBuilder(); s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); + s.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); + s.reclaim_memory(g.get_oldest_used_generation()); } class MyTreeTestIterator : public MyTree::Iterator @@ -1553,9 +1553,9 @@ inc_generation(GenerationHandler &g, Tree &t) { auto &s = t.getAllocator(); s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); + s.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); + s.reclaim_memory(g.get_oldest_used_generation()); } template <typename Tree> diff --git a/vespalib/src/tests/btree/btreeaggregation_test.cpp b/vespalib/src/tests/btree/btreeaggregation_test.cpp index dff7de6660f..fb394df9861 100644 --- a/vespalib/src/tests/btree/btreeaggregation_test.cpp +++ b/vespalib/src/tests/btree/btreeaggregation_test.cpp @@ -272,9 +272,9 @@ void freezeTree(GenerationHandler &g, ManagerType &m) { m.freeze(); - m.transferHoldLists(g.getCurrentGeneration()); + m.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - m.trimHoldLists(g.getFirstUsedGeneration()); + m.reclaim_memory(g.get_oldest_used_generation()); } template <typename ManagerType> @@ -891,9 +891,9 @@ Test::requireThatWeCanInsertAndRemoveFromTree() } compacting_buffers->finish(); manager.freeze(); - manager.transferHoldLists(g.getCurrentGeneration()); + manager.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - manager.trimHoldLists(g.getFirstUsedGeneration()); + manager.reclaim_memory(g.get_oldest_used_generation()); } // remove entries for (size_t i = 0; i < numEntries; ++i) { @@ -1190,9 +1190,9 @@ Test::requireThatSmallNodesWorks() s.clear(root); s.clearBuilder(); s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); + s.assign_generation(g.getCurrentGeneration()); g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); + s.reclaim_memory(g.get_oldest_used_generation()); } void diff --git a/vespalib/src/tests/btree/frozenbtree_test.cpp b/vespalib/src/tests/btree/frozenbtree_test.cpp index 01748b9edeb..3471d5dc3df 100644 --- a/vespalib/src/tests/btree/frozenbtree_test.cpp +++ b/vespalib/src/tests/btree/frozenbtree_test.cpp @@ -114,7 +114,7 @@ FrozenBTreeTest::freeTree(bool verbose) static_cast<uint64_t>(_intTree->getUsedMemory()), static_cast<uint64_t>(_intTree->getHeldMemory())); _intTree->dropFrozen(); - _intTree->removeOldGenerations(_intTree->getGeneration() + 1); + _intTree->reclaim_memory(_intTree->getGeneration() + 1); LOG(info, "freeTree after unhold: %" PRIu64 " (%" PRIu64 " held)", static_cast<uint64_t>(_intTree->getUsedMemory()), @@ -134,9 +134,9 @@ FrozenBTreeTest::freeTree(bool verbose) (void) verbose; _tree->clear(*_allocator); _allocator->freeze(); - _allocator->transferHoldLists(_generationHandler->getCurrentGeneration()); + _allocator->assign_generation(_generationHandler->getCurrentGeneration()); _generationHandler->incGeneration(); - _allocator->trimHoldLists(_generationHandler->getFirstUsedGeneration()); + _allocator->reclaim_memory(_generationHandler->get_oldest_used_generation()); delete _tree; _tree = NULL; delete _allocator; @@ -425,7 +425,7 @@ FrozenBTreeTest::Main() EXPECT_TRUE(_tree->getFrozenView(*_allocator).empty()); _allocator->freeze(); EXPECT_FALSE(_tree->getFrozenView(*_allocator).empty()); - _allocator->transferHoldLists(_generationHandler->getCurrentGeneration()); + _allocator->assign_generation(_generationHandler->getCurrentGeneration()); lookupFrozenRandomValues(*_tree, *_allocator, _randomValues); traverseTreeIterator(*_tree, *_allocator, diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp index 1708b0fd948..afef530b33e 100644 --- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp +++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp @@ -123,7 +123,7 @@ struct ArrayStoreTest : public TestT void assert_ref_reused(const EntryVector& first, const EntryVector& second, bool should_reuse) { EntryRef ref1 = add(first); remove(ref1); - trimHoldLists(); + reclaim_memory(); EntryRef ref2 = add(second); EXPECT_EQ(should_reuse, (ref2 == ref1)); assertGet(ref2, second); @@ -136,9 +136,9 @@ struct ArrayStoreTest : public TestT } return EntryRef(); } - void trimHoldLists() { - store.transferHoldLists(generation++); - store.trimHoldLists(generation); + void reclaim_memory() { + store.assign_generation(generation++); + store.reclaim_memory(generation); } void compactWorst(bool compactMemory, bool compactAddressSpace) { CompactionSpec compaction_spec(compactMemory, compactAddressSpace); @@ -283,7 +283,7 @@ TEST_P(NumberStoreTest, track_size_of_large_array_allocations_with_free_lists_en assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(16)); remove({1,2,3,4}); assert_buffer_stats(ref, TestBufferStats().used(2).hold(1).dead(1).extra_hold(16).extra_used(16)); - trimHoldLists(); + reclaim_memory(); assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(2).extra_used(0)); add({5,6,7,8,9}); assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(20)); @@ -316,7 +316,7 @@ test_compaction(NumberStoreBasicTest &f) EntryRef size2Ref = f.add({2,2}); EntryRef size3Ref = f.add({3,3,3}); f.remove(f.add({5,5})); - f.trimHoldLists(); + f.reclaim_memory(); f.assertBufferState(size1Ref, MemStats().used(1).dead(0)); f.assertBufferState(size2Ref, MemStats().used(4).dead(2)); f.assertBufferState(size3Ref, MemStats().used(2).dead(1)); // Note: First element is reserved @@ -335,7 +335,7 @@ test_compaction(NumberStoreBasicTest &f) EXPECT_NE(size2BufferId, f.getBufferId(f.getEntryRef({2,2}))); f.assertGet(size2Ref, {2,2}); // Old ref should still point to data. EXPECT_TRUE(f.store.bufferState(size2Ref).isOnHold()); - f.trimHoldLists(); + f.reclaim_memory(); EXPECT_TRUE(f.store.bufferState(size2Ref).isFree()); } @@ -360,7 +360,7 @@ void testCompaction(NumberStoreTest &f, bool compactMemory, bool compactAddressS f.remove(f.add({5,5,5})); f.remove(f.add({6})); f.remove(f.add({7})); - f.trimHoldLists(); + f.reclaim_memory(); f.assertBufferState(size1Ref, MemStats().used(3).dead(2)); f.assertBufferState(size2Ref, MemStats().used(2).dead(0)); f.assertBufferState(size3Ref, MemStats().used(6).dead(3)); @@ -397,7 +397,7 @@ void testCompaction(NumberStoreTest &f, bool compactMemory, bool compactAddressS EXPECT_FALSE(f.store.bufferState(size1Ref).isOnHold()); } EXPECT_FALSE(f.store.bufferState(size2Ref).isOnHold()); - f.trimHoldLists(); + f.reclaim_memory(); if (compactMemory) { EXPECT_TRUE(f.store.bufferState(size3Ref).isFree()); } else { @@ -436,7 +436,7 @@ TEST_P(NumberStoreTest, used_onHold_and_dead_memory_usage_is_tracked_for_small_a assertMemoryUsage(exp.used(entrySize() * 3)); remove({1,2,3}); assertMemoryUsage(exp.hold(entrySize() * 3)); - trimHoldLists(); + reclaim_memory(); assertMemoryUsage(exp.holdToDead(entrySize() * 3)); } @@ -447,7 +447,7 @@ TEST_P(NumberStoreTest, used_onHold_and_dead_memory_usage_is_tracked_for_large_a assertMemoryUsage(exp.used(largeArraySize() + entrySize() * 4)); remove({1,2,3,4}); assertMemoryUsage(exp.hold(largeArraySize() + entrySize() * 4)); - trimHoldLists(); + reclaim_memory(); assertMemoryUsage(exp.decUsed(entrySize() * 4).decHold(largeArraySize() + entrySize() * 4). dead(largeArraySize())); } diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp index 1cb4f3e2307..10b96a87444 100644 --- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp +++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp @@ -28,8 +28,8 @@ public: void holdElem(EntryRef ref, uint64_t len) { ParentType::holdElem(ref, len); } - void transferHoldLists(generation_t generation) { - ParentType::transferHoldLists(generation); + void assign_generation(generation_t current_gen) { + ParentType::assign_generation(current_gen); } void reclaim_entry_refs(generation_t oldest_used_gen) override { ParentType::reclaim_entry_refs(oldest_used_gen); @@ -261,29 +261,29 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers) s.switch_primary_buffer(); EXPECT_EQ(1u, s.primary_buffer_id()); s.holdBuffer(0); // hold last buffer - s.transferHoldLists(10); + s.assign_generation(10); EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId()); s.switch_primary_buffer(); EXPECT_EQ(2u, s.primary_buffer_id()); s.holdBuffer(1); // hold last buffer - s.transferHoldLists(20); + s.assign_generation(20); EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId()); s.switch_primary_buffer(); EXPECT_EQ(3u, s.primary_buffer_id()); s.holdBuffer(2); // hold last buffer - s.transferHoldLists(30); + s.assign_generation(30); EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId()); s.holdBuffer(3); // hold current buffer - s.transferHoldLists(40); + s.assign_generation(40); EXPECT_TRUE(s.getBufferState(0).size() != 0); EXPECT_TRUE(s.getBufferState(1).size() != 0); EXPECT_TRUE(s.getBufferState(2).size() != 0); EXPECT_TRUE(s.getBufferState(3).size() != 0); - s.trimHoldLists(11); + s.reclaim_memory(11); EXPECT_TRUE(s.getBufferState(0).size() == 0); EXPECT_TRUE(s.getBufferState(1).size() != 0); EXPECT_TRUE(s.getBufferState(2).size() != 0); @@ -292,7 +292,7 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers) s.switch_primary_buffer(); EXPECT_EQ(0u, s.primary_buffer_id()); EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId()); - s.trimHoldLists(41); + s.reclaim_memory(41); EXPECT_TRUE(s.getBufferState(0).size() != 0); EXPECT_TRUE(s.getBufferState(1).size() == 0); EXPECT_TRUE(s.getBufferState(2).size() == 0); @@ -304,13 +304,13 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements) MyStore s; MyRef r1 = s.addEntry(1); s.holdElem(r1, 1); - s.transferHoldLists(10); + s.assign_generation(10); MyRef r2 = s.addEntry(2); s.holdElem(r2, 1); - s.transferHoldLists(20); + s.assign_generation(20); MyRef r3 = s.addEntry(3); s.holdElem(r3, 1); - s.transferHoldLists(30); + s.assign_generation(30); EXPECT_EQ(1, s.getEntry(r1)); EXPECT_EQ(2, s.getEntry(r2)); EXPECT_EQ(3, s.getEntry(r3)); @@ -358,11 +358,11 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists) s.enableFreeLists(); auto r1 = s.addEntry(1); s.holdElem(r1, 1); - s.transferHoldLists(10); + s.assign_generation(10); auto r2 = s.addEntry(2); expect_successive_refs(r1, r2); s.holdElem(r2, 1); - s.transferHoldLists(20); + s.assign_generation(20); s.reclaim_entry_refs(11); auto r3 = s.addEntry(3); // reuse r1 EXPECT_EQ(r1, r3); @@ -393,7 +393,7 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator) expect_successive_handles(h1, h2); s.holdElem(h1.ref, 3); s.holdElem(h2.ref, 3); - s.transferHoldLists(10); + s.assign_generation(10); s.reclaim_entry_refs(11); auto h3 = allocator.alloc(3); // reuse h2.ref from free list @@ -429,7 +429,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated) s.addEntry(20); s.addEntry(30); s.holdBuffer(r.bufferId()); - s.transferHoldLists(100); + s.assign_generation(100); m._usedElems += 2; m._holdElems = m._usedElems; m._deadElems = 0; @@ -446,7 +446,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated) m._freeBuffers--; // trim hold buffer - s.trimHoldLists(101); + s.reclaim_memory(101); m._allocElems -= MyRef::offsetSize(); m._usedElems = 1; m._deadElems = 0; @@ -479,13 +479,13 @@ TEST(DataStoreTest, require_that_memory_usage_is_calculated) s.addEntry(30); s.addEntry(40); s.holdBuffer(r.bufferId()); - s.transferHoldLists(100); + s.assign_generation(100); vespalib::MemoryUsage m = s.getMemoryUsage(); EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes()); EXPECT_EQ(5 * sizeof(int), m.usedBytes()); EXPECT_EQ(0 * sizeof(int), m.deadBytes()); EXPECT_EQ(5 * sizeof(int), m.allocatedBytesOnHold()); - s.trimHoldLists(101); + s.reclaim_memory(101); } TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list) @@ -513,8 +513,8 @@ TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list) EXPECT_EQ(4 * sizeof(int), m.usedBytes()); EXPECT_EQ(2 * sizeof(int), m.deadBytes()); EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold()); - s.transferHoldLists(100); - s.trimHoldLists(101); + s.assign_generation(100); + s.reclaim_memory(101); } using IntGrowStore = GrowStore<int, EntryRefT<24>>; @@ -634,9 +634,9 @@ TEST(DataStoreTest, can_set_memory_allocator) s.switch_primary_buffer(); EXPECT_EQ(AllocStats(3, 0), stats); s.holdBuffer(0); - s.transferHoldLists(10); + s.assign_generation(10); EXPECT_EQ(AllocStats(3, 0), stats); - s.trimHoldLists(11); + s.reclaim_memory(11); EXPECT_EQ(AllocStats(3, 2), stats); } EXPECT_EQ(AllocStats(3, 3), stats); @@ -693,8 +693,8 @@ void test_free_element_to_held_buffer(bool direct, bool before_hold_buffer) ASSERT_DEATH({ s.holdElem(ref, 1); }, "isActive\\(\\)"); } } - s.transferHoldLists(100); - s.trimHoldLists(101); + s.assign_generation(100); + s.reclaim_memory(101); } } diff --git a/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp b/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp index ad10bc5c7e6..4f4c3ac94eb 100644 --- a/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp +++ b/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp @@ -88,13 +88,13 @@ DataStoreFixedSizeHashTest::~DataStoreFixedSizeHashTest() void DataStoreFixedSizeHashTest::commit() { - _store.transferHoldLists(_generation_handler.getCurrentGeneration()); - _hash_map->transfer_hold_lists(_generation_handler.getCurrentGeneration()); + _store.assign_generation(_generation_handler.getCurrentGeneration()); + _hash_map->assign_generation(_generation_handler.getCurrentGeneration()); _generation_holder.assign_generation(_generation_handler.getCurrentGeneration()); _generation_handler.incGeneration(); - _store.trimHoldLists(_generation_handler.getFirstUsedGeneration()); - _hash_map->trim_hold_lists(_generation_handler.getFirstUsedGeneration()); - _generation_holder.reclaim(_generation_handler.getFirstUsedGeneration()); + _store.reclaim_memory(_generation_handler.get_oldest_used_generation()); + _hash_map->reclaim_memory(_generation_handler.get_oldest_used_generation()); + _generation_holder.reclaim(_generation_handler.get_oldest_used_generation()); } size_t diff --git a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp index 13f9ae251b6..4c3fe1756c5 100644 --- a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp +++ b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp @@ -73,8 +73,8 @@ public: } ~MyCompactable() override = default; - EntryRef move(EntryRef ref) override { - auto new_ref = _allocator.move(ref); + EntryRef move_on_compact(EntryRef ref) override { + auto new_ref = _allocator.move_on_compact(ref); _allocator.hold(ref); _new_refs.emplace_back(new_ref); return new_ref; @@ -168,11 +168,11 @@ DataStoreShardedHashTest::~DataStoreShardedHashTest() void DataStoreShardedHashTest::commit() { - _store.transferHoldLists(_generationHandler.getCurrentGeneration()); - _hash_map.transfer_hold_lists(_generationHandler.getCurrentGeneration()); + _store.assign_generation(_generationHandler.getCurrentGeneration()); + _hash_map.assign_generation(_generationHandler.getCurrentGeneration()); _generationHandler.incGeneration(); - _store.trimHoldLists(_generationHandler.getFirstUsedGeneration()); - _hash_map.trim_hold_lists(_generationHandler.getFirstUsedGeneration()); + _store.reclaim_memory(_generationHandler.get_oldest_used_generation()); + _hash_map.reclaim_memory(_generationHandler.get_oldest_used_generation()); } void @@ -395,7 +395,7 @@ TEST_F(DataStoreShardedHashTest, foreach_key_works) } } -TEST_F(DataStoreShardedHashTest, move_keys_works) +TEST_F(DataStoreShardedHashTest, move_keys_on_compact_works) { populate_sample_data(small_population); std::vector<EntryRef> refs; @@ -403,7 +403,7 @@ TEST_F(DataStoreShardedHashTest, move_keys_works) std::vector<EntryRef> new_refs; MyCompactable my_compactable(_allocator, new_refs); auto filter = make_entry_ref_filter<RefT>(false); - _hash_map.move_keys(my_compactable, filter); + _hash_map.move_keys_on_compact(my_compactable, filter); std::vector<EntryRef> verify_new_refs; _hash_map.foreach_key([&verify_new_refs](EntryRef ref) { verify_new_refs.emplace_back(ref); }); EXPECT_EQ(small_population, refs.size()); diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp index 6612ef998c5..48a0ecafbc6 100644 --- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp +++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp @@ -112,10 +112,10 @@ struct TestBase : public ::testing::Test { } return EntryRef(); } - void trimHoldLists() { + void reclaim_memory() { store.freeze(); - store.transferHoldLists(generation++); - store.trimHoldLists(generation); + store.assign_generation(generation++); + store.reclaim_memory(generation); } void compactWorst() { CompactionSpec compaction_spec(true, true); @@ -364,7 +364,7 @@ TYPED_TEST(TestBase, store_can_be_compacted) EntryRef val0Ref = this->add(this->values()[0]); EntryRef val1Ref = this->add(this->values()[1]); this->remove(this->add(this->values()[2])); - this->trimHoldLists(); + this->reclaim_memory(); size_t reserved = this->get_reserved(val0Ref); size_t array_size = this->get_array_size(val0Ref); this->assertBufferState(val0Ref, TestBufferStats().used(reserved + 3 * array_size).dead(reserved + array_size)); @@ -381,7 +381,7 @@ TYPED_TEST(TestBase, store_can_be_compacted) this->assertGet(val0Ref, this->values()[0]); this->assertGet(val1Ref, this->values()[1]); EXPECT_TRUE(this->store.bufferState(val0Ref).isOnHold()); - this->trimHoldLists(); + this->reclaim_memory(); EXPECT_TRUE(this->store.bufferState(val0Ref).isFree()); this->assertStoreContent(); } @@ -415,7 +415,7 @@ TYPED_TEST(TestBase, store_can_be_enumerated) EntryRef val0Ref = this->add(this->values()[0]); EntryRef val1Ref = this->add(this->values()[1]); this->remove(this->add(this->values()[2])); - this->trimHoldLists(); + this->reclaim_memory(); auto enumerator = this->getEnumerator(true); std::vector<uint32_t> refs; @@ -460,7 +460,7 @@ TEST_F(DoubleTest, nan_is_handled) for (auto &value : myvalues) { refs.emplace_back(add(value)); } - trimHoldLists(); + reclaim_memory(); EXPECT_TRUE(std::isnan(store.get(refs[1]))); EXPECT_TRUE(std::signbit(store.get(refs[1]))); EXPECT_TRUE(std::isinf(store.get(refs[2]))); diff --git a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp index d0fede5c550..496bc814d0d 100644 --- a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp +++ b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp @@ -62,9 +62,9 @@ struct UniqueStoreDictionaryTest : public ::testing::Test { } void inc_generation() { dict.freeze(); - dict.transfer_hold_lists(gen_handler.getCurrentGeneration()); + dict.assign_generation(gen_handler.getCurrentGeneration()); gen_handler.incGeneration(); - dict.trim_hold_lists(gen_handler.getFirstUsedGeneration()); + dict.reclaim_memory(gen_handler.get_oldest_used_generation()); } void take_snapshot() { dict.freeze(); diff --git a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp index f68dd4dde66..e865239787b 100644 --- a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp +++ b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp @@ -51,8 +51,8 @@ struct TestBase : public ::testing::Test { void remove(EntryRef ref) { allocator.hold(ref); } - EntryRef move(EntryRef ref) { - return allocator.move(ref); + EntryRef move_on_compact(EntryRef ref) { + return allocator.move_on_compact(ref); } uint32_t get_buffer_id(EntryRef ref) const { return EntryRefType(ref).bufferId(); @@ -67,9 +67,9 @@ struct TestBase : public ::testing::Test { EXPECT_EQ(expStats._extra_used, buffer_state(ref).stats().extra_used_bytes()); EXPECT_EQ(expStats._extra_hold, buffer_state(ref).stats().extra_hold_bytes()); } - void trim_hold_lists() { - allocator.get_data_store().transferHoldLists(generation++); - allocator.get_data_store().trimHoldLists(generation); + void reclaim_memory() { + allocator.get_data_store().assign_generation(generation++); + allocator.get_data_store().reclaim_memory(generation); } }; @@ -89,7 +89,7 @@ TEST_F(StringTest, elements_are_put_on_hold_when_value_is_removed) assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(0)); remove(ref); assert_buffer_state(ref, TestBufferStats().used(16).hold(16).dead(0)); - trim_hold_lists(); + reclaim_memory(); assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(16)); } @@ -100,17 +100,17 @@ TEST_F(StringTest, extra_bytes_used_is_tracked) assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001)); remove(ref); assert_buffer_state(ref, TestBufferStats().used(2).hold(1).dead(1).extra_used(1001).extra_hold(1001)); - trim_hold_lists(); + reclaim_memory(); assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(2)); ref = add(spaces1000.c_str()); assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001)); - EntryRef ref2 = move(ref); + EntryRef ref2 = move_on_compact(ref); assert_get(ref2, spaces1000.c_str()); assert_buffer_state(ref, TestBufferStats().used(3).hold(0).dead(1).extra_used(2002)); remove(ref); remove(ref2); assert_buffer_state(ref, TestBufferStats().used(3).hold(2).dead(1).extra_used(2002).extra_hold(2002)); - trim_hold_lists(); + reclaim_memory(); assert_buffer_state(ref, TestBufferStats().used(3).hold(0).dead(3)); } @@ -134,7 +134,7 @@ TEST_F(StringTest, free_list_is_used_when_enabled) EntryRef ref2 = add(spaces1000.c_str()); remove(ref1); remove(ref2); - trim_hold_lists(); + reclaim_memory(); EntryRef ref3 = add(small.c_str()); EntryRef ref4 = add(spaces1000.c_str()); EXPECT_EQ(ref1, ref3); @@ -150,7 +150,7 @@ TEST_F(StringTest, free_list_is_not_used_when_disabled) EntryRef ref2 = add(spaces1000.c_str()); remove(ref1); remove(ref2); - trim_hold_lists(); + reclaim_memory(); EntryRef ref3 = add(small.c_str()); EntryRef ref4 = add(spaces1000.c_str()); EXPECT_NE(ref1, ref3); @@ -159,7 +159,7 @@ TEST_F(StringTest, free_list_is_not_used_when_disabled) assert_buffer_state(ref2, TestBufferStats().used(3).hold(0).dead(2).extra_used(1001)); } -TEST_F(StringTest, free_list_is_never_used_for_move) +TEST_F(StringTest, free_list_is_never_used_for_move_on_compact) { // Free lists are default enabled for UniqueStoreStringAllocator EntryRef ref1 = add(small.c_str()); @@ -168,9 +168,9 @@ TEST_F(StringTest, free_list_is_never_used_for_move) EntryRef ref4 = add(spaces1000.c_str()); remove(ref3); remove(ref4); - trim_hold_lists(); - EntryRef ref5 = move(ref1); - EntryRef ref6 = move(ref2); + reclaim_memory(); + EntryRef ref5 = move_on_compact(ref1); + EntryRef ref6 = move_on_compact(ref2); EXPECT_NE(ref5, ref3); EXPECT_NE(ref6, ref4); assert_buffer_state(ref1, TestBufferStats().used(48).hold(0).dead(16)); diff --git a/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp b/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp index 00da752a749..0bc72f93a9d 100644 --- a/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp +++ b/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp @@ -26,10 +26,10 @@ GenerationHandlerTest::~GenerationHandlerTest() = default; TEST_F(GenerationHandlerTest, require_that_generation_can_be_increased) { EXPECT_EQ(0u, gh.getCurrentGeneration()); - EXPECT_EQ(0u, gh.getFirstUsedGeneration()); + EXPECT_EQ(0u, gh.get_oldest_used_generation()); gh.incGeneration(); EXPECT_EQ(1u, gh.getCurrentGeneration()); - EXPECT_EQ(1u, gh.getFirstUsedGeneration()); + EXPECT_EQ(1u, gh.get_oldest_used_generation()); } TEST_F(GenerationHandlerTest, require_that_readers_can_take_guards) @@ -87,34 +87,34 @@ TEST_F(GenerationHandlerTest, require_that_guards_can_be_copied) TEST_F(GenerationHandlerTest, require_that_the_first_used_generation_is_correct) { - EXPECT_EQ(0u, gh.getFirstUsedGeneration()); + EXPECT_EQ(0u, gh.get_oldest_used_generation()); gh.incGeneration(); - EXPECT_EQ(1u, gh.getFirstUsedGeneration()); + EXPECT_EQ(1u, gh.get_oldest_used_generation()); { GenGuard g1 = gh.takeGuard(); gh.incGeneration(); EXPECT_EQ(1u, gh.getGenerationRefCount()); - EXPECT_EQ(1u, gh.getFirstUsedGeneration()); + EXPECT_EQ(1u, gh.get_oldest_used_generation()); } - EXPECT_EQ(1u, gh.getFirstUsedGeneration()); - gh.updateFirstUsedGeneration(); // Only writer should call this + EXPECT_EQ(1u, gh.get_oldest_used_generation()); + gh.update_oldest_used_generation(); // Only writer should call this EXPECT_EQ(0u, gh.getGenerationRefCount()); - EXPECT_EQ(2u, gh.getFirstUsedGeneration()); + EXPECT_EQ(2u, gh.get_oldest_used_generation()); { GenGuard g1 = gh.takeGuard(); gh.incGeneration(); gh.incGeneration(); EXPECT_EQ(1u, gh.getGenerationRefCount()); - EXPECT_EQ(2u, gh.getFirstUsedGeneration()); + EXPECT_EQ(2u, gh.get_oldest_used_generation()); { GenGuard g2 = gh.takeGuard(); - EXPECT_EQ(2u, gh.getFirstUsedGeneration()); + EXPECT_EQ(2u, gh.get_oldest_used_generation()); } } - EXPECT_EQ(2u, gh.getFirstUsedGeneration()); - gh.updateFirstUsedGeneration(); // Only writer should call this + EXPECT_EQ(2u, gh.get_oldest_used_generation()); + gh.update_oldest_used_generation(); // Only writer should call this EXPECT_EQ(0u, gh.getGenerationRefCount()); - EXPECT_EQ(4u, gh.getFirstUsedGeneration()); + EXPECT_EQ(4u, gh.get_oldest_used_generation()); } TEST_F(GenerationHandlerTest, require_that_generation_can_grow_large) @@ -124,7 +124,7 @@ TEST_F(GenerationHandlerTest, require_that_generation_can_grow_large) EXPECT_EQ(i, gh.getCurrentGeneration()); guards.push_back(gh.takeGuard()); // take guard on current generation if (i >= 128) { - EXPECT_EQ(i - 128, gh.getFirstUsedGeneration()); + EXPECT_EQ(i - 128, gh.get_oldest_used_generation()); guards.pop_front(); EXPECT_EQ(128u, gh.getGenerationRefCount()); } diff --git a/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp b/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp index 74af25b54a8..fd2769fd8b1 100644 --- a/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp +++ b/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp @@ -238,7 +238,7 @@ Fixture::write_indirect_work(uint64_t cnt, IndirectContext& context) ReadStopper read_stopper(_stopRead); uint32_t sleep_cnt = 0; ASSERT_EQ(0, _generationHandler.getCurrentGeneration()); - auto oldest_gen = _generationHandler.getFirstUsedGeneration(); + auto oldest_gen = _generationHandler.get_oldest_used_generation(); for (uint64_t i = 0; i < cnt; ++i) { auto gen = _generationHandler.getCurrentGeneration(); // Hold data for gen, write new data for next_gen @@ -248,7 +248,7 @@ Fixture::write_indirect_work(uint64_t cnt, IndirectContext& context) *v_ptr = next_gen; context._value_ptr.store(v_ptr, std::memory_order_release); _generationHandler.incGeneration(); - auto first_used_gen = _generationHandler.getFirstUsedGeneration(); + auto first_used_gen = _generationHandler.get_oldest_used_generation(); while (oldest_gen < first_used_gen) { // Clear data that readers should no longer have access to. *context.calc_value_ptr(oldest_gen) = 0; @@ -258,8 +258,8 @@ Fixture::write_indirect_work(uint64_t cnt, IndirectContext& context) // Sleep if writer gets too much ahead of readers. std::this_thread::sleep_for(1ms); ++sleep_cnt; - _generationHandler.updateFirstUsedGeneration(); - first_used_gen = _generationHandler.getFirstUsedGeneration(); + _generationHandler.update_oldest_used_generation(); + first_used_gen = _generationHandler.get_oldest_used_generation(); } } _doneWriteWork += cnt; diff --git a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp index c23065b7468..5d6ec3050da 100644 --- a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp +++ b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp @@ -140,7 +140,7 @@ TEST(RcuVectorTest, generation_handling) v.setGeneration(2); v.push_back(50); - v.removeOldGenerations(3); + v.reclaim_memory(3); EXPECT_EQ(0u, v.getMemoryUsage().allocatedBytesOnHold()); v.push_back(60); // new array EXPECT_EQ(24u, v.getMemoryUsage().allocatedBytesOnHold()); @@ -184,7 +184,7 @@ TEST(RcuVectorTest, memory_usage) EXPECT_TRUE(assertUsage(MemoryUsage(6,6,0,2), v.getMemoryUsage())); v.push_back(4); EXPECT_TRUE(assertUsage(MemoryUsage(12,11,0,6), v.getMemoryUsage())); - v.removeOldGenerations(1); + v.reclaim_memory(1); EXPECT_TRUE(assertUsage(MemoryUsage(6,5,0,0), v.getMemoryUsage())); } @@ -434,7 +434,7 @@ StressFixture::commit() auto current_gen = generation_handler.getCurrentGeneration(); g.assign_generation(current_gen); generation_handler.incGeneration(); - auto first_used_gen = generation_handler.getFirstUsedGeneration(); + auto first_used_gen = generation_handler.get_oldest_used_generation(); g.reclaim(first_used_gen); } diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp index c6d8886254d..81687b6e62d 100644 --- a/vespalib/src/vespa/vespalib/btree/btree.hpp +++ b/vespalib/src/vespa/vespalib/btree/btree.hpp @@ -20,7 +20,7 @@ BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::~BTree() { clear(); _alloc.freeze(); - _alloc.clearHoldLists(); + _alloc.reclaim_all_memory(); } template <typename KeyT, typename DataT, typename AggrT, typename CompareT, diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h index c631ac4041a..77900edf848 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h +++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h @@ -101,7 +101,7 @@ public: /** * Try to free held nodes if nobody can be referencing them. */ - void trimHoldLists(generation_t usedGen); + void reclaim_memory(generation_t oldest_used_gen); /** * Transfer nodes from hold1 lists to hold2 lists, they are no @@ -109,9 +109,9 @@ public: * older versions of the frozen structure must leave before elements * can be unheld. */ - void transferHoldLists(generation_t generation); + void assign_generation(generation_t current_gen); - void clearHoldLists(); + void reclaim_all_memory(); static bool isValidRef(BTreeNode::Ref ref) { return NodeStore::isValidRef(ref); } diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp index 8976d73379c..4968bbaf4a7 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp +++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp @@ -266,18 +266,18 @@ template <typename KeyT, typename DataT, typename AggrT, size_t INTERNAL_SLOTS, size_t LEAF_SLOTS> void BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>:: -trimHoldLists(generation_t usedGen) +reclaim_memory(generation_t oldest_used_gen) { - _nodeStore.trimHoldLists(usedGen); + _nodeStore.reclaim_memory(oldest_used_gen); } template <typename KeyT, typename DataT, typename AggrT, size_t INTERNAL_SLOTS, size_t LEAF_SLOTS> void BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>:: -transferHoldLists(generation_t generation) +assign_generation(generation_t current_gen) { - _nodeStore.transferHoldLists(generation); + _nodeStore.assign_generation(current_gen); } @@ -285,9 +285,9 @@ template <typename KeyT, typename DataT, typename AggrT, size_t INTERNAL_SLOTS, size_t LEAF_SLOTS> void BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>:: -clearHoldLists() +reclaim_all_memory() { - _nodeStore.clearHoldLists(); + _nodeStore.reclaim_all_memory(); } diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h index 20f80e07a6b..7b89e2d0ddb 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h +++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h @@ -162,8 +162,8 @@ public: std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy); - void transferHoldLists(generation_t generation) { - _store.transferHoldLists(generation); + void assign_generation(generation_t current_gen) { + _store.assign_generation(current_gen); } // Inherit doc from DataStoreBase @@ -172,12 +172,12 @@ public: } // Inherit doc from DataStoreBase - void trimHoldLists(generation_t usedGen) { - _store.trimHoldLists(usedGen); + void reclaim_memory(generation_t oldest_used_gen) { + _store.reclaim_memory(oldest_used_gen); } - void clearHoldLists() { - _store.clearHoldLists(); + void reclaim_all_memory() { + _store.reclaim_all_memory(); } // Inherit doc from DataStoreBase diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h index 54bc397175d..e5c55d5775d 100644 --- a/vespalib/src/vespa/vespalib/btree/btreestore.h +++ b/vespalib/src/vespa/vespalib/btree/btreestore.h @@ -332,25 +332,25 @@ public: // Inherit doc from DataStoreBase void - trimHoldLists(generation_t usedGen) + reclaim_memory(generation_t oldest_used_gen) { - _allocator.trimHoldLists(usedGen); - _store.trimHoldLists(usedGen); + _allocator.reclaim_memory(oldest_used_gen); + _store.reclaim_memory(oldest_used_gen); } // Inherit doc from DataStoreBase void - transferHoldLists(generation_t generation) + assign_generation(generation_t current_gen) { - _allocator.transferHoldLists(generation); - _store.transferHoldLists(generation); + _allocator.assign_generation(current_gen); + _store.assign_generation(current_gen); } void - clearHoldLists() + reclaim_all_memory() { - _allocator.clearHoldLists(); - _store.clearHoldLists(); + _allocator.reclaim_all_memory(); + _store.reclaim_all_memory(); } diff --git a/vespalib/src/vespa/vespalib/coro/lazy.h b/vespalib/src/vespa/vespalib/coro/lazy.h index ddfaa07528b..cd479850a59 100644 --- a/vespalib/src/vespa/vespalib/coro/lazy.h +++ b/vespalib/src/vespa/vespalib/coro/lazy.h @@ -6,6 +6,7 @@ #include <coroutine> #include <optional> #include <exception> +#include <utility> namespace vespalib::coro { diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.h b/vespalib/src/vespa/vespalib/datastore/array_store.h index db037ee12fb..e7662b9eb73 100644 --- a/vespalib/src/vespa/vespalib/datastore/array_store.h +++ b/vespalib/src/vespa/vespalib/datastore/array_store.h @@ -114,8 +114,8 @@ public: vespalib::AddressSpace addressSpaceUsage() const; // Pass on hold list management to underlying store - void transferHoldLists(generation_t generation) { _store.transferHoldLists(generation); } - void trimHoldLists(generation_t firstUsed) { _store.trimHoldLists(firstUsed); } + void assign_generation(generation_t current_gen) { _store.assign_generation(current_gen); } + void reclaim_memory(generation_t oldest_used_gen) { _store.reclaim_memory(oldest_used_gen); } vespalib::GenerationHolder &getGenerationHolder() { return _store.getGenerationHolder(); } void setInitializing(bool initializing) { _store.setInitializing(initializing); } diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp index 4df8505e927..b31d47fe4fe 100644 --- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp +++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp @@ -57,7 +57,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::ArrayStore(const ArrayStoreConfig &cfg, s template <typename EntryT, typename RefT, typename TypeMapperT> ArrayStore<EntryT, RefT, TypeMapperT>::~ArrayStore() { - _store.clearHoldLists(); + _store.reclaim_all_memory(); _store.dropBuffers(); } diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp b/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp index 65e028119a2..1ce6401605e 100644 --- a/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp +++ b/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp @@ -25,7 +25,7 @@ CompactionContext::compact(vespalib::ArrayRef<AtomicEntryRef> refs) for (auto &atomic_entry_ref : refs) { auto ref = atomic_entry_ref.load_relaxed(); if (ref.valid() && _filter.has(ref)) { - EntryRef newRef = _store.move(ref); + EntryRef newRef = _store.move_on_compact(ref); atomic_entry_ref.store_release(newRef); } } diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp index 4589fbba6fa..a14082e2d5c 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp @@ -7,6 +7,7 @@ #include "compaction_strategy.h" #include <vespa/vespalib/util/generation_hold_list.hpp> #include <vespa/vespalib/util/stringfmt.h> +#include <algorithm> #include <limits> #include <cassert> @@ -220,10 +221,10 @@ DataStoreBase::addType(BufferTypeBase *typeHandler) } void -DataStoreBase::transferHoldLists(generation_t generation) +DataStoreBase::assign_generation(generation_t current_gen) { - _genHolder.assign_generation(generation); - _entry_ref_hold_list.assign_generation(generation); + _genHolder.assign_generation(current_gen); + _entry_ref_hold_list.assign_generation(current_gen); } void @@ -235,14 +236,14 @@ DataStoreBase::doneHoldBuffer(uint32_t bufferId) } void -DataStoreBase::trimHoldLists(generation_t usedGen) +DataStoreBase::reclaim_memory(generation_t oldest_used_gen) { - reclaim_entry_refs(usedGen); // Trim entries before trimming buffers - _genHolder.reclaim(usedGen); + reclaim_entry_refs(oldest_used_gen); // Trim entries before trimming buffers + _genHolder.reclaim(oldest_used_gen); } void -DataStoreBase::clearHoldLists() +DataStoreBase::reclaim_all_memory() { _entry_ref_hold_list.assign_generation(0); reclaim_all_entry_refs(); diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h index 520c13742b5..598f0872253 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h @@ -189,9 +189,9 @@ public: public: /** - * Transfer holds from hold1 to hold2 lists, assigning generation. + * Assign generation on data elements on hold lists added since the last time this function was called. */ - void transferHoldLists(generation_t generation); + void assign_generation(generation_t current_gen); private: /** @@ -201,13 +201,13 @@ private: public: /** - * Trim hold lists, freeing buffers that no longer needs to be held. + * Reclaim memory from hold lists, freeing buffers and entry refs that no longer needs to be held. * - * @param usedGen lowest generation that is still used. + * @param oldest_used_gen oldest generation that is still used. */ - void trimHoldLists(generation_t usedGen); + void reclaim_memory(generation_t oldest_used_gen); - void clearHoldLists(); + void reclaim_all_memory(); template <typename EntryType, typename RefType> EntryType *getEntry(RefType ref) { diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp index 6f001ce3c94..5338ce0c6b2 100644 --- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp +++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp @@ -97,23 +97,22 @@ FixedSizeHashMap::add(const ShardedHashComparator & comp, std::function<EntryRef } void -FixedSizeHashMap::transfer_hold_lists_slow(generation_t generation) +FixedSizeHashMap::assign_generation_slow(generation_t current_gen) { auto &hold_2_list = _hold_2_list; for (uint32_t node_idx : _hold_1_list) { - hold_2_list.push_back(std::make_pair(generation, node_idx)); + hold_2_list.push_back(std::make_pair(current_gen, node_idx)); } _hold_1_list.clear(); - } void -FixedSizeHashMap::trim_hold_lists_slow(generation_t first_used) +FixedSizeHashMap::reclaim_memory_slow(generation_t oldest_used_gen) { while (!_hold_2_list.empty()) { auto& first = _hold_2_list.front(); - if (static_cast<sgeneration_t>(first.first - first_used) >= 0) { + if (static_cast<sgeneration_t>(first.first - oldest_used_gen) >= 0) { break; } uint32_t node_idx = first.second; @@ -183,7 +182,7 @@ FixedSizeHashMap::foreach_key(const std::function<void(EntryRef)>& callback) con } void -FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers) +FixedSizeHashMap::move_keys_on_compact(ICompactable& compactable, const EntryRefFilter &compacting_buffers) { for (auto& chain_head : _chain_heads) { uint32_t node_idx = chain_head.load_relaxed(); @@ -192,7 +191,7 @@ FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &com EntryRef old_ref = node.get_kv().first.load_relaxed(); assert(old_ref.valid()); if (compacting_buffers.has(old_ref)) { - EntryRef new_ref = compactable.move(old_ref); + EntryRef new_ref = compactable.move_on_compact(old_ref); node.get_kv().first.store_release(new_ref); } node_idx = node.get_next_node_idx().load(std::memory_order_relaxed); diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h index c522bcc3c33..de05ec1deb0 100644 --- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h +++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h @@ -56,8 +56,8 @@ private: * A reader must own an appropriate GenerationHandler::Guard to ensure * that memory is held while it can be accessed by reader. * - * The writer must update generation and call transfer_hold_lists and - * trim_hold_lists as needed to free up memory no longer needed by any + * The writer must update generation and call assign_generation and + * reclaim_memory as needed to free up memory no longer needed by any * readers. */ class FixedSizeHashMap { @@ -114,8 +114,8 @@ private: std::deque<std::pair<generation_t, uint32_t>> _hold_2_list; uint32_t _num_shards; - void transfer_hold_lists_slow(generation_t generation); - void trim_hold_lists_slow(generation_t first_used); + void assign_generation_slow(generation_t current_gen); + void reclaim_memory_slow(generation_t oldest_used_gen); void force_add(const EntryComparator& comp, const KvType& kv); public: FixedSizeHashMap(uint32_t module, uint32_t capacity, uint32_t num_shards); @@ -143,15 +143,15 @@ public: return nullptr; } - void transfer_hold_lists(generation_t generation) { + void assign_generation(generation_t current_gen) { if (!_hold_1_list.empty()) { - transfer_hold_lists_slow(generation); + assign_generation_slow(current_gen); } } - void trim_hold_lists(generation_t first_used) { - if (!_hold_2_list.empty() && static_cast<sgeneration_t>(_hold_2_list.front().first - first_used) < 0) { - trim_hold_lists_slow(first_used); + void reclaim_memory(generation_t oldest_used_gen) { + if (!_hold_2_list.empty() && static_cast<sgeneration_t>(_hold_2_list.front().first - oldest_used_gen) < 0) { + reclaim_memory_slow(oldest_used_gen); } } @@ -159,7 +159,7 @@ public: size_t size() const noexcept { return _count; } MemoryUsage get_memory_usage() const; void foreach_key(const std::function<void(EntryRef)>& callback) const; - void move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers); + void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter &compacting_buffers); /* * Scan dictionary and call normalize function for each value. If * returned value is different then write back the modified value to diff --git a/vespalib/src/vespa/vespalib/datastore/i_compactable.h b/vespalib/src/vespa/vespalib/datastore/i_compactable.h index 069d32bb481..31c082e4371 100644 --- a/vespalib/src/vespa/vespalib/datastore/i_compactable.h +++ b/vespalib/src/vespa/vespalib/datastore/i_compactable.h @@ -8,12 +8,13 @@ namespace vespalib::datastore { * Interface for moving an entry as part of compaction of data in old * buffers into new buffers. * - * Old entry is unchanged and not placed on any hold lists since we - * expect the old buffers to be freed soon anyway. + * A copy of the old entry is created and a reference to the new copy is + * returned. The old entry is unchanged and not placed on any hold + * lists since we expect the old buffers to be freed soon anyway. */ struct ICompactable { virtual ~ICompactable() = default; - virtual EntryRef move(EntryRef ref) = 0; + virtual EntryRef move_on_compact(EntryRef ref) = 0; }; } diff --git a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h index bb105d41519..5a75a30d182 100644 --- a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h +++ b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h @@ -25,12 +25,12 @@ public: using generation_t = vespalib::GenerationHandler::generation_t; virtual ~IUniqueStoreDictionary() = default; virtual void freeze() = 0; - virtual void transfer_hold_lists(generation_t generation) = 0; - virtual void trim_hold_lists(generation_t firstUsed) = 0; + virtual void assign_generation(generation_t current_gen) = 0; + virtual void reclaim_memory(generation_t oldest_used_gen) = 0; virtual UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) = 0; virtual EntryRef find(const EntryComparator& comp) = 0; virtual void remove(const EntryComparator& comp, EntryRef ref) = 0; - virtual void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0; + virtual void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0; virtual uint32_t get_num_uniques() const = 0; virtual vespalib::MemoryUsage get_memory_usage() const = 0; virtual void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) = 0; diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp index 102aa1cefb3..a28c3071646 100644 --- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp +++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp @@ -107,27 +107,27 @@ ShardedHashMap::find(const EntryComparator& comp, EntryRef key_ref) const } void -ShardedHashMap::transfer_hold_lists(generation_t generation) +ShardedHashMap::assign_generation(generation_t current_gen) { for (size_t i = 0; i < num_shards; ++i) { auto map = _maps[i].load(std::memory_order_relaxed); if (map != nullptr) { - map->transfer_hold_lists(generation); + map->assign_generation(current_gen); } } - _gen_holder.assign_generation(generation); + _gen_holder.assign_generation(current_gen); } void -ShardedHashMap::trim_hold_lists(generation_t first_used) +ShardedHashMap::reclaim_memory(generation_t oldest_used_gen) { for (size_t i = 0; i < num_shards; ++i) { auto map = _maps[i].load(std::memory_order_relaxed); if (map != nullptr) { - map->trim_hold_lists(first_used); + map->reclaim_memory(oldest_used_gen); } } - _gen_holder.reclaim(first_used); + _gen_holder.reclaim(oldest_used_gen); } size_t @@ -171,12 +171,12 @@ ShardedHashMap::foreach_key(std::function<void(EntryRef)> callback) const } void -ShardedHashMap::move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) +ShardedHashMap::move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers) { for (size_t i = 0; i < num_shards; ++i) { auto map = _maps[i].load(std::memory_order_relaxed); if (map != nullptr) { - map->move_keys(compactable, compacting_buffers); + map->move_keys_on_compact(compactable, compacting_buffers); } } } diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h index e0ba9488351..572a8790828 100644 --- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h +++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h @@ -28,8 +28,8 @@ struct ICompactable; * A reader must own an appropriate GenerationHandler::Guard to ensure * that memory is held while it can be accessed by reader. * - * The writer must update generation and call transfer_hold_lists and - * trim_hold_lists as needed to free up memory no longer needed by any + * The writer must update generation and call assign_generation and + * reclaim_memory as needed to free up memory no longer needed by any * readers. */ class ShardedHashMap { @@ -52,13 +52,13 @@ public: KvType* remove(const EntryComparator& comp, EntryRef key_ref); KvType* find(const EntryComparator& comp, EntryRef key_ref); const KvType* find(const EntryComparator& comp, EntryRef key_ref) const; - void transfer_hold_lists(generation_t generation); - void trim_hold_lists(generation_t first_used); + void assign_generation(generation_t current_gen); + void reclaim_memory(generation_t oldest_used_gen); size_t size() const noexcept; const EntryComparator &get_default_comparator() const noexcept { return *_comp; } MemoryUsage get_memory_usage() const; void foreach_key(std::function<void(EntryRef)> callback) const; - void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers); + void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers); bool normalize_values(std::function<EntryRef(EntryRef)> normalize); bool normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter); void foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter); diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h index e7c374985a7..1313d57fbab 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h @@ -70,8 +70,8 @@ public: inline const DataStoreType& get_data_store() const noexcept { return _allocator.get_data_store(); } // Pass on hold list management to underlying store - void transferHoldLists(generation_t generation); - void trimHoldLists(generation_t firstUsed); + void assign_generation(generation_t current_gen); + void reclaim_memory(generation_t oldest_used_gen); vespalib::GenerationHolder &getGenerationHolder() { return _store.getGenerationHolder(); } void setInitializing(bool initializing) { _store.setInitializing(initializing); } void freeze(); diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp index 37a56bf2561..b8493017020 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp @@ -109,20 +109,20 @@ private: } } - EntryRef move(EntryRef oldRef) override { + EntryRef move_on_compact(EntryRef oldRef) override { RefT iRef(oldRef); uint32_t buffer_id = iRef.bufferId(); auto &inner_mapping = _mapping[buffer_id]; assert(iRef.offset() < inner_mapping.size()); EntryRef &mappedRef = inner_mapping[iRef.offset()]; assert(!mappedRef.valid()); - EntryRef newRef = _store.move(oldRef); + EntryRef newRef = _store.move_on_compact(oldRef); mappedRef = newRef; return newRef; } void fillMapping() { - _dict.move_keys(*this, _filter); + _dict.move_keys_on_compact(*this, _filter); } public: @@ -190,18 +190,18 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::bufferState(EntryRef ref) const template <typename EntryT, typename RefT, typename Compare, typename Allocator> void -UniqueStore<EntryT, RefT, Compare, Allocator>::transferHoldLists(generation_t generation) +UniqueStore<EntryT, RefT, Compare, Allocator>::assign_generation(generation_t current_gen) { - _dict->transfer_hold_lists(generation); - _store.transferHoldLists(generation); + _dict->assign_generation(current_gen); + _store.assign_generation(current_gen); } template <typename EntryT, typename RefT, typename Compare, typename Allocator> void -UniqueStore<EntryT, RefT, Compare, Allocator>::trimHoldLists(generation_t firstUsed) +UniqueStore<EntryT, RefT, Compare, Allocator>::reclaim_memory(generation_t oldest_used_gen) { - _dict->trim_hold_lists(firstUsed); - _store.trimHoldLists(firstUsed); + _dict->reclaim_memory(oldest_used_gen); + _store.reclaim_memory(oldest_used_gen); } template <typename EntryT, typename RefT, typename Compare, typename Allocator> diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h index 04df88ab4b9..0f6d9ddfc9b 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h @@ -35,7 +35,7 @@ public: ~UniqueStoreAllocator() override; EntryRef allocate(const EntryType& value); void hold(EntryRef ref); - EntryRef move(EntryRef ref) override; + EntryRef move_on_compact(EntryRef ref) override; const WrappedEntryType& get_wrapped(EntryRef ref) const { RefType iRef(ref); return *_store.template getEntry<WrappedEntryType>(iRef); diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp index 04a229d4ffa..8ad11b18218 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp @@ -28,7 +28,7 @@ UniqueStoreAllocator<EntryT, RefT>::UniqueStoreAllocator(std::shared_ptr<alloc:: template <typename EntryT, typename RefT> UniqueStoreAllocator<EntryT, RefT>::~UniqueStoreAllocator() { - _store.clearHoldLists(); + _store.reclaim_all_memory(); _store.dropBuffers(); } @@ -48,7 +48,7 @@ UniqueStoreAllocator<EntryT, RefT>::hold(EntryRef ref) template <typename EntryT, typename RefT> EntryRef -UniqueStoreAllocator<EntryT, RefT>::move(EntryRef ref) +UniqueStoreAllocator<EntryT, RefT>::move_on_compact(EntryRef ref) { return _store.template allocator<WrappedEntryType>(0).alloc(get_wrapped(ref)).ref; } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h index 702bae38e7c..8c5f284bb14 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h @@ -74,12 +74,12 @@ public: UniqueStoreDictionary(std::unique_ptr<EntryComparator> compare); ~UniqueStoreDictionary() override; void freeze() override; - void transfer_hold_lists(generation_t generation) override; - void trim_hold_lists(generation_t firstUsed) override; + void assign_generation(generation_t current_gen) override; + void reclaim_memory(generation_t oldest_used_gen) override; UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override; EntryRef find(const EntryComparator& comp) override; void remove(const EntryComparator& comp, EntryRef ref) override; - void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override; + void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override; uint32_t get_num_uniques() const override; vespalib::MemoryUsage get_memory_usage() const override; void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) override; diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp index 8029b66309d..6708b4c1448 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp @@ -41,25 +41,25 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::freeze() template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::transfer_hold_lists(generation_t generation) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::assign_generation(generation_t current_gen) { if constexpr (has_btree_dictionary) { - this->_btree_dict.getAllocator().transferHoldLists(generation); + this->_btree_dict.getAllocator().assign_generation(current_gen); } if constexpr (has_hash_dictionary) { - this->_hash_dict.transfer_hold_lists(generation); + this->_hash_dict.assign_generation(current_gen); } } template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::trim_hold_lists(generation_t firstUsed) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::reclaim_memory(generation_t oldest_used_gen) { if constexpr (has_btree_dictionary) { - this->_btree_dict.getAllocator().trimHoldLists(firstUsed); + this->_btree_dict.getAllocator().reclaim_memory(oldest_used_gen); } if constexpr (has_hash_dictionary) { - this->_hash_dict.trim_hold_lists(firstUsed); + this->_hash_dict.reclaim_memory(oldest_used_gen); } } @@ -140,7 +140,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::remove(const template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const EntryRefFilter& compacting_buffers) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys_on_compact(ICompactable &compactable, const EntryRefFilter& compacting_buffers) { if constexpr (has_btree_dictionary) { auto itr = this->_btree_dict.begin(); @@ -148,7 +148,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICo EntryRef oldRef(itr.getKey().load_relaxed()); assert(oldRef.valid()); if (compacting_buffers.has(oldRef)) { - EntryRef newRef(compactable.move(oldRef)); + EntryRef newRef(compactable.move_on_compact(oldRef)); this->_btree_dict.thaw(itr); itr.writeKey(AtomicEntryRef(newRef)); if constexpr (has_hash_dictionary) { @@ -160,7 +160,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICo ++itr; } } else { - this->_hash_dict.move_keys(compactable, compacting_buffers); + this->_hash_dict.move_keys_on_compact(compactable, compacting_buffers); } } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h index be5fa8f6c1e..8977fd1cce8 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h @@ -111,7 +111,7 @@ public: ~UniqueStoreStringAllocator() override; EntryRef allocate(const char *value); void hold(EntryRef ref); - EntryRef move(EntryRef ref) override; + EntryRef move_on_compact(EntryRef ref) override; const UniqueStoreEntryBase& get_wrapped(EntryRef ref) const { RefType iRef(ref); auto &state = _store.getBufferState(iRef.bufferId()); diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp index b5405cd22b5..65cab4850ba 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp @@ -30,7 +30,7 @@ UniqueStoreStringAllocator<RefT>::UniqueStoreStringAllocator(std::shared_ptr<all template <typename RefT> UniqueStoreStringAllocator<RefT>::~UniqueStoreStringAllocator() { - _store.clearHoldLists(); + _store.reclaim_all_memory(); _store.dropBuffers(); } @@ -71,7 +71,7 @@ UniqueStoreStringAllocator<RefT>::hold(EntryRef ref) template <typename RefT> EntryRef -UniqueStoreStringAllocator<RefT>::move(EntryRef ref) +UniqueStoreStringAllocator<RefT>::move_on_compact(EntryRef ref) { RefT iRef(ref); uint32_t type_id = _store.getTypeId(iRef.bufferId()); diff --git a/vespalib/src/vespa/vespalib/util/generationhandler.cpp b/vespalib/src/vespa/vespalib/util/generationhandler.cpp index d1cc0271068..3562926d88d 100644 --- a/vespalib/src/vespa/vespalib/util/generationhandler.cpp +++ b/vespalib/src/vespa/vespalib/util/generationhandler.cpp @@ -111,7 +111,7 @@ GenerationHandler::Guard::operator=(Guard &&rhs) } void -GenerationHandler::updateFirstUsedGeneration() +GenerationHandler::update_oldest_used_generation() { for (;;) { if (_first == _last.load(std::memory_order_relaxed)) @@ -125,12 +125,12 @@ GenerationHandler::updateFirstUsedGeneration() toFree->_next = _free; _free = toFree; } - _firstUsedGeneration.store(_first->_generation, std::memory_order_relaxed); + _oldest_used_generation.store(_first->_generation, std::memory_order_relaxed); } GenerationHandler::GenerationHandler() : _generation(0), - _firstUsedGeneration(0), + _oldest_used_generation(0), _last(nullptr), _first(nullptr), _free(nullptr), @@ -144,7 +144,7 @@ GenerationHandler::GenerationHandler() GenerationHandler::~GenerationHandler(void) { - updateFirstUsedGeneration(); + update_oldest_used_generation(); assert(_first == _last.load(std::memory_order_relaxed)); while (_free != nullptr) { GenerationHold *toFree = _free; @@ -190,7 +190,7 @@ GenerationHandler::incGeneration() // reader set_generation(ngen); last->_generation.store(ngen, std::memory_order_relaxed); - updateFirstUsedGeneration(); + update_oldest_used_generation(); return; } GenerationHold *nhold = nullptr; @@ -207,7 +207,7 @@ GenerationHandler::incGeneration() last->_next = nhold; set_generation(ngen); _last.store(nhold, std::memory_order_release); - updateFirstUsedGeneration(); + update_oldest_used_generation(); } uint32_t @@ -215,7 +215,7 @@ GenerationHandler::getGenerationRefCount(generation_t gen) const { if (static_cast<sgeneration_t>(gen - getCurrentGeneration()) > 0) return 0u; - if (static_cast<sgeneration_t>(getFirstUsedGeneration() - gen) > 0) + if (static_cast<sgeneration_t>(get_oldest_used_generation() - gen) > 0) return 0u; for (GenerationHold *hold = _first; hold != nullptr; hold = hold->_next) { if (hold->_generation.load(std::memory_order_relaxed) == gen) diff --git a/vespalib/src/vespa/vespalib/util/generationhandler.h b/vespalib/src/vespa/vespalib/util/generationhandler.h index 9637ad0e414..6ba71b7f5fb 100644 --- a/vespalib/src/vespa/vespalib/util/generationhandler.h +++ b/vespalib/src/vespa/vespalib/util/generationhandler.h @@ -73,7 +73,7 @@ public: private: std::atomic<generation_t> _generation; - std::atomic<generation_t> _firstUsedGeneration; + std::atomic<generation_t> _oldest_used_generation; std::atomic<GenerationHold *> _last; // Points to "current generation" entry GenerationHold *_first; // Points to "firstUsedGeneration" entry GenerationHold *_free; // List of free entries @@ -101,17 +101,17 @@ public: void incGeneration(); /** - * Update first used generation. + * Update the oldest used generation. * Should be called by the writer thread. */ - void updateFirstUsedGeneration(); + void update_oldest_used_generation(); /** - * Returns the first generation guarded by a reader. It might be too low - * if writer hasn't updated first used generation after last reader left. + * Returns the oldest generation guarded by a reader. + * It might be too low if writer hasn't updated oldest used generation after last reader left. */ - generation_t getFirstUsedGeneration() const noexcept { - return _firstUsedGeneration.load(std::memory_order_relaxed); + generation_t get_oldest_used_generation() const noexcept { + return _oldest_used_generation.load(std::memory_order_relaxed); } /** diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.h b/vespalib/src/vespa/vespalib/util/rcuvector.h index 5d084fe3815..b0929303692 100644 --- a/vespalib/src/vespa/vespalib/util/rcuvector.h +++ b/vespalib/src/vespa/vespalib/util/rcuvector.h @@ -182,7 +182,7 @@ public: /** * Remove all old data vectors where generation < firstUsed. **/ - void removeOldGenerations(generation_t firstUsed); + void reclaim_memory(generation_t oldest_used_gen); MemoryUsage getMemoryUsage() const override; }; diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.hpp b/vespalib/src/vespa/vespalib/util/rcuvector.hpp index e551bb17db0..eadda8ac1e9 100644 --- a/vespalib/src/vespa/vespalib/util/rcuvector.hpp +++ b/vespalib/src/vespa/vespalib/util/rcuvector.hpp @@ -187,9 +187,9 @@ RcuVector<T>::~RcuVector() template <typename T> void -RcuVector<T>::removeOldGenerations(generation_t firstUsed) +RcuVector<T>::reclaim_memory(generation_t oldest_used_gen) { - _genHolderStore.reclaim(firstUsed); + _genHolderStore.reclaim(oldest_used_gen); } template <typename T> |