aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/CMakeLists.txt12
-rw-r--r--vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp30
-rw-r--r--vespalib/src/tests/btree/btree_store/btree_store_test.cpp4
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp30
-rw-r--r--vespalib/src/tests/btree/btreeaggregation_test.cpp20
-rw-r--r--vespalib/src/tests/btree/frozenbtree_test.cpp8
-rw-r--r--vespalib/src/tests/coro/detached/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/coro/detached/detached_test.cpp19
-rw-r--r--vespalib/src/tests/coro/lazy/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/coro/lazy/lazy_test.cpp121
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp50
-rw-r--r--vespalib/src/tests/datastore/buffer_stats/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp34
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp120
-rw-r--r--vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp12
-rw-r--r--vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp16
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp38
-rw-r--r--vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp4
-rw-r--r--vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp74
-rw-r--r--vespalib/src/tests/util/generation_hold_list/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/util/generation_hold_list/generation_hold_list_test.cpp95
-rw-r--r--vespalib/src/tests/util/generation_holder/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/util/generation_holder/generation_holder_test.cpp38
-rw-r--r--vespalib/src/tests/util/generationhandler/generationhandler_test.cpp28
-rw-r--r--vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp8
-rw-r--r--vespalib/src/tests/util/rcuvector/rcuvector_test.cpp50
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h11
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp18
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.h22
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.hpp23
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h18
-rw-r--r--vespalib/src/vespa/vespalib/coro/CMakeLists.txt5
-rw-r--r--vespalib/src/vespa/vespalib/coro/detached.h32
-rw-r--r--vespalib/src/vespa/vespalib/coro/lazy.h111
-rw-r--r--vespalib/src/vespa/vespalib/coro/sync_wait.h59
-rw-r--r--vespalib/src/vespa/vespalib/datastore/CMakeLists.txt2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/allocator.hpp6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.h10
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp49
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp57
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_stats.h76
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.cpp86
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h66
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compaction_context.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compaction_strategy.h1
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.h28
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.hpp59
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp192
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h164
-rw-r--r--vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp63
-rw-r--r--vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h33
-rw-r--r--vespalib/src/vespa/vespalib/datastore/free_list.cpp1
-rw-r--r--vespalib/src/vespa/vespalib/datastore/i_compactable.h7
-rw-r--r--vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/memory_stats.cpp40
-rw-r--r--vespalib/src/vespa/vespalib/datastore/memory_stats.h32
-rw-r--r--vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp24
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h10
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp18
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp18
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/util/generation_hold_list.h106
-rw-r--r--vespalib/src/vespa/vespalib/util/generation_hold_list.hpp88
-rw-r--r--vespalib/src/vespa/vespalib/util/generationhandler.cpp14
-rw-r--r--vespalib/src/vespa/vespalib/util/generationhandler.h14
-rw-r--r--vespalib/src/vespa/vespalib/util/generationholder.cpp60
-rw-r--r--vespalib/src/vespa/vespalib/util/generationholder.h70
-rw-r--r--vespalib/src/vespa/vespalib/util/rcuvector.h2
-rw-r--r--vespalib/src/vespa/vespalib/util/rcuvector.hpp14
79 files changed, 1541 insertions, 1073 deletions
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index 7aafb7c364e..6ecac23d5fa 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -42,6 +42,8 @@ vespa_define_module(
src/tests/component
src/tests/compress
src/tests/compression
+ src/tests/coro/detached
+ src/tests/coro/lazy
src/tests/cpu_usage
src/tests/crc
src/tests/crypto
@@ -54,6 +56,7 @@ vespa_define_module(
src/tests/data/smart_buffer
src/tests/datastore/array_store
src/tests/datastore/array_store_config
+ src/tests/datastore/buffer_stats
src/tests/datastore/buffer_type
src/tests/datastore/compact_buffer_candidates
src/tests/datastore/datastore
@@ -180,9 +183,9 @@ vespa_define_module(
src/tests/util/bfloat16
src/tests/util/cgroup_resource_limits
src/tests/util/file_area_freelist
+ src/tests/util/generation_hold_list
src/tests/util/generationhandler
src/tests/util/generationhandler_stress
- src/tests/util/generation_holder
src/tests/util/hamming
src/tests/util/md5
src/tests/util/mmap_file_allocator
@@ -201,9 +204,13 @@ vespa_define_module(
src/tests/fastlib/text
LIBS
+ src/vespa/fastlib/io
+ src/vespa/fastlib/text
+ src/vespa/fastlib/text/apps
src/vespa/vespalib
src/vespa/vespalib/btree
src/vespa/vespalib/component
+ src/vespa/vespalib/coro
src/vespa/vespalib/crypto
src/vespa/vespalib/data
src/vespa/vespalib/data/slime
@@ -230,7 +237,4 @@ vespa_define_module(
src/vespa/vespalib/time
src/vespa/vespalib/trace
src/vespa/vespalib/util
- src/vespa/fastlib/io
- src/vespa/fastlib/text
- src/vespa/fastlib/text/apps
)
diff --git a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
index c68ff07491e..caed5c3543c 100644
--- a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
+++ b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
@@ -59,15 +59,13 @@ public:
AtomicEntryRef add_relaxed(uint32_t value) { return AtomicEntryRef(add(value)); }
void hold(const AtomicEntryRef& ref) { _store.holdElem(ref.load_relaxed(), 1); }
EntryRef move(EntryRef ref);
- void transfer_hold_lists(generation_t gen) { _store.transferHoldLists(gen); }
- void trim_hold_lists(generation_t gen) { _store.trimHoldLists(gen); }
+ void assign_generation(generation_t current_gen) { _store.assign_generation(current_gen); }
+ void reclaim_memory(generation_t gen) { _store.reclaim_memory(gen); }
uint32_t get(EntryRef ref) const { return _store.getEntry(ref); }
uint32_t get_acquire(const AtomicEntryRef& ref) const { return get(ref.load_acquire()); }
uint32_t get_relaxed(const AtomicEntryRef& ref) const { return get(ref.load_relaxed()); }
std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact();
static constexpr bool is_indirect = true;
- static uint32_t get_offset_bits() { return StoreRefType::offset_bits; }
- static uint32_t get_num_buffers() { return StoreRefType::numBuffers(); }
bool has_held_buffers() const noexcept { return _store.has_held_buffers(); }
};
@@ -82,7 +80,7 @@ std::unique_ptr<vespalib::datastore::CompactingBuffers>
RealIntStore::start_compact()
{
// Use a compaction strategy that will compact all active buffers
- CompactionStrategy compaction_strategy(0.0, 0.0, get_num_buffers(), 1.0);
+ auto compaction_strategy = CompactionStrategy::make_compact_all_active_buffers_strategy();
CompactionSpec compaction_spec(true, false);
return _store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
}
@@ -120,8 +118,8 @@ public:
static uint32_t add(uint32_t value) noexcept { return value; }
static uint32_t add_relaxed(uint32_t value) noexcept { return value; }
static void hold(uint32_t) noexcept { }
- static void transfer_hold_lists(generation_t) noexcept { }
- static void trim_hold_lists(generation_t) noexcept { }
+ static void assign_generation(generation_t) noexcept { }
+ static void reclaim_memory(generation_t) noexcept { }
static uint32_t get(uint32_t value) noexcept { return value; }
static uint32_t get_acquire(uint32_t value) noexcept { return value; }
static uint32_t get_relaxed(uint32_t value) noexcept { return value; }
@@ -276,15 +274,15 @@ Fixture<Params>::commit()
auto &allocator = _tree.getAllocator();
allocator.freeze();
auto current_gen = _generationHandler.getCurrentGeneration();
- allocator.transferHoldLists(current_gen);
- _keys.transfer_hold_lists(current_gen);
- _values.transfer_hold_lists(current_gen);
- allocator.transferHoldLists(_generationHandler.getCurrentGeneration());
+ allocator.assign_generation(current_gen);
+ _keys.assign_generation(current_gen);
+ _values.assign_generation(current_gen);
+ allocator.assign_generation(_generationHandler.getCurrentGeneration());
_generationHandler.incGeneration();
- auto first_used_gen = _generationHandler.getFirstUsedGeneration();
- allocator.trimHoldLists(first_used_gen);
- _keys.trim_hold_lists(first_used_gen);
- _values.trim_hold_lists(first_used_gen);
+ auto oldest_used_gen = _generationHandler.get_oldest_used_generation();
+ allocator.reclaim_memory(oldest_used_gen);
+ _keys.reclaim_memory(oldest_used_gen);
+ _values.reclaim_memory(oldest_used_gen);
}
template <typename Params>
@@ -329,7 +327,7 @@ void
Fixture<Params>::compact_tree()
{
// Use a compaction strategy that will compact all active buffers
- CompactionStrategy compaction_strategy(0.0, 0.0, RefType::numBuffers(), 1.0);
+ auto compaction_strategy = CompactionStrategy::make_compact_all_active_buffers_strategy();
_tree.compact_worst(compaction_strategy);
_writeItr = _tree.begin();
_compact_tree.track_compacted();
diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
index 4da34c64ed9..0370b1ce2eb 100644
--- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
+++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
@@ -31,9 +31,9 @@ protected:
void inc_generation()
{
_store.freeze();
- _store.transferHoldLists(_gen_handler.getCurrentGeneration());
+ _store.assign_generation(_gen_handler.getCurrentGeneration());
_gen_handler.incGeneration();
- _store.trimHoldLists(_gen_handler.getFirstUsedGeneration());
+ _store.reclaim_memory(_gen_handler.get_oldest_used_generation());
}
EntryRef add_sequence(int start_key, int end_key)
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index 92f55681c0f..f2896cb783c 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -163,9 +163,9 @@ void
cleanup(GenerationHandler & g, ManagerType & m)
{
m.freeze();
- m.transferHoldLists(g.getCurrentGeneration());
+ m.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- m.trimHoldLists(g.getFirstUsedGeneration());
+ m.reclaim_memory(g.get_oldest_used_generation());
}
template <typename ManagerType, typename NodeType>
@@ -862,19 +862,21 @@ TEST_F(BTreeTest, require_that_we_can_insert_and_remove_from_tree)
}
// compact full tree by calling incremental compaction methods in a loop
{
+ // Use a compaction strategy that will compact all active buffers
+ auto compaction_strategy = CompactionStrategy::make_compact_all_active_buffers_strategy();
MyTree::NodeAllocatorType &manager = tree.getAllocator();
- std::vector<uint32_t> toHold = manager.startCompact();
+ auto compacting_buffers = manager.start_compact_worst(compaction_strategy);
MyTree::Iterator itr = tree.begin();
tree.setRoot(itr.moveFirstLeafNode(tree.getRoot()));
while (itr.valid()) {
// LOG(info, "Leaf moved to %d", UNWRAP(itr.getKey()));
itr.moveNextLeafNode();
}
- manager.finishCompact(toHold);
+ compacting_buffers->finish();
manager.freeze();
- manager.transferHoldLists(g.getCurrentGeneration());
+ manager.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- manager.trimHoldLists(g.getFirstUsedGeneration());
+ manager.reclaim_memory(g.get_oldest_used_generation());
}
// remove entries
for (size_t i = 0; i < numEntries; ++i) {
@@ -1104,9 +1106,9 @@ TEST_F(BTreeTest, require_that_memory_usage_is_calculated)
EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
// trim hold lists
- tm.transferHoldLists(gh.getCurrentGeneration());
+ tm.assign_generation(gh.getCurrentGeneration());
gh.incGeneration();
- tm.trimHoldLists(gh.getFirstUsedGeneration());
+ tm.reclaim_memory(gh.get_oldest_used_generation());
mu = vespalib::MemoryUsage();
mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode)));
mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode)));
@@ -1280,9 +1282,9 @@ TEST_F(BTreeTest, require_that_small_nodes_works)
s.clear(root);
s.clearBuilder();
s.freeze();
- s.transferHoldLists(g.getCurrentGeneration());
+ s.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- s.trimHoldLists(g.getFirstUsedGeneration());
+ s.reclaim_memory(g.get_oldest_used_generation());
}
namespace {
@@ -1414,9 +1416,9 @@ TEST_F(BTreeTest, require_that_apply_works)
s.clear(root);
s.clearBuilder();
s.freeze();
- s.transferHoldLists(g.getCurrentGeneration());
+ s.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- s.trimHoldLists(g.getFirstUsedGeneration());
+ s.reclaim_memory(g.get_oldest_used_generation());
}
class MyTreeTestIterator : public MyTree::Iterator
@@ -1551,9 +1553,9 @@ inc_generation(GenerationHandler &g, Tree &t)
{
auto &s = t.getAllocator();
s.freeze();
- s.transferHoldLists(g.getCurrentGeneration());
+ s.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- s.trimHoldLists(g.getFirstUsedGeneration());
+ s.reclaim_memory(g.get_oldest_used_generation());
}
template <typename Tree>
diff --git a/vespalib/src/tests/btree/btreeaggregation_test.cpp b/vespalib/src/tests/btree/btreeaggregation_test.cpp
index f4300499fcd..fb394df9861 100644
--- a/vespalib/src/tests/btree/btreeaggregation_test.cpp
+++ b/vespalib/src/tests/btree/btreeaggregation_test.cpp
@@ -15,6 +15,7 @@
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/btree/btreeaggregator.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/test/btree/btree_printer.h>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/rand48.h>
@@ -28,6 +29,7 @@
LOG_SETUP("btreeaggregation_test");
using vespalib::GenerationHandler;
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
namespace vespalib::btree {
@@ -270,9 +272,9 @@ void
freezeTree(GenerationHandler &g, ManagerType &m)
{
m.freeze();
- m.transferHoldLists(g.getCurrentGeneration());
+ m.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- m.trimHoldLists(g.getFirstUsedGeneration());
+ m.reclaim_memory(g.get_oldest_used_generation());
}
template <typename ManagerType>
@@ -877,19 +879,21 @@ Test::requireThatWeCanInsertAndRemoveFromTree()
}
// compact full tree by calling incremental compaction methods in a loop
{
+ // Use a compaction strategy that will compact all active buffers
+ auto compaction_strategy = CompactionStrategy::make_compact_all_active_buffers_strategy();
MyTree::NodeAllocatorType &manager = tree.getAllocator();
- std::vector<uint32_t> toHold = manager.startCompact();
+ auto compacting_buffers = manager.start_compact_worst(compaction_strategy);
MyTree::Iterator itr = tree.begin();
tree.setRoot(itr.moveFirstLeafNode(tree.getRoot()));
while (itr.valid()) {
// LOG(info, "Leaf moved to %d", UNWRAP(itr.getKey()));
itr.moveNextLeafNode();
}
- manager.finishCompact(toHold);
+ compacting_buffers->finish();
manager.freeze();
- manager.transferHoldLists(g.getCurrentGeneration());
+ manager.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- manager.trimHoldLists(g.getFirstUsedGeneration());
+ manager.reclaim_memory(g.get_oldest_used_generation());
}
// remove entries
for (size_t i = 0; i < numEntries; ++i) {
@@ -1186,9 +1190,9 @@ Test::requireThatSmallNodesWorks()
s.clear(root);
s.clearBuilder();
s.freeze();
- s.transferHoldLists(g.getCurrentGeneration());
+ s.assign_generation(g.getCurrentGeneration());
g.incGeneration();
- s.trimHoldLists(g.getFirstUsedGeneration());
+ s.reclaim_memory(g.get_oldest_used_generation());
}
void
diff --git a/vespalib/src/tests/btree/frozenbtree_test.cpp b/vespalib/src/tests/btree/frozenbtree_test.cpp
index 01748b9edeb..3471d5dc3df 100644
--- a/vespalib/src/tests/btree/frozenbtree_test.cpp
+++ b/vespalib/src/tests/btree/frozenbtree_test.cpp
@@ -114,7 +114,7 @@ FrozenBTreeTest::freeTree(bool verbose)
static_cast<uint64_t>(_intTree->getUsedMemory()),
static_cast<uint64_t>(_intTree->getHeldMemory()));
_intTree->dropFrozen();
- _intTree->removeOldGenerations(_intTree->getGeneration() + 1);
+ _intTree->reclaim_memory(_intTree->getGeneration() + 1);
LOG(info,
"freeTree after unhold: %" PRIu64 " (%" PRIu64 " held)",
static_cast<uint64_t>(_intTree->getUsedMemory()),
@@ -134,9 +134,9 @@ FrozenBTreeTest::freeTree(bool verbose)
(void) verbose;
_tree->clear(*_allocator);
_allocator->freeze();
- _allocator->transferHoldLists(_generationHandler->getCurrentGeneration());
+ _allocator->assign_generation(_generationHandler->getCurrentGeneration());
_generationHandler->incGeneration();
- _allocator->trimHoldLists(_generationHandler->getFirstUsedGeneration());
+ _allocator->reclaim_memory(_generationHandler->get_oldest_used_generation());
delete _tree;
_tree = NULL;
delete _allocator;
@@ -425,7 +425,7 @@ FrozenBTreeTest::Main()
EXPECT_TRUE(_tree->getFrozenView(*_allocator).empty());
_allocator->freeze();
EXPECT_FALSE(_tree->getFrozenView(*_allocator).empty());
- _allocator->transferHoldLists(_generationHandler->getCurrentGeneration());
+ _allocator->assign_generation(_generationHandler->getCurrentGeneration());
lookupFrozenRandomValues(*_tree, *_allocator, _randomValues);
traverseTreeIterator(*_tree,
*_allocator,
diff --git a/vespalib/src/tests/coro/detached/CMakeLists.txt b/vespalib/src/tests/coro/detached/CMakeLists.txt
new file mode 100644
index 00000000000..237b8615fec
--- /dev/null
+++ b/vespalib/src/tests/coro/detached/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_detached_test_app TEST
+ SOURCES
+ detached_test.cpp
+ DEPENDS
+ vespalib
+ GTest::GTest
+)
+vespa_add_test(NAME vespalib_detached_test_app COMMAND vespalib_detached_test_app)
diff --git a/vespalib/src/tests/coro/detached/detached_test.cpp b/vespalib/src/tests/coro/detached/detached_test.cpp
new file mode 100644
index 00000000000..f23d16cc75c
--- /dev/null
+++ b/vespalib/src/tests/coro/detached/detached_test.cpp
@@ -0,0 +1,19 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/coro/detached.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using vespalib::coro::Detached;
+
+Detached set_result(int &res, int value) {
+ res = value;
+ co_return;
+}
+
+TEST(DetachedTest, call_detached_coroutine) {
+ int result = 0;
+ set_result(result, 42);
+ EXPECT_EQ(result, 42);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/coro/lazy/CMakeLists.txt b/vespalib/src/tests/coro/lazy/CMakeLists.txt
new file mode 100644
index 00000000000..daa11eb3576
--- /dev/null
+++ b/vespalib/src/tests/coro/lazy/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_lazy_test_app TEST
+ SOURCES
+ lazy_test.cpp
+ DEPENDS
+ vespalib
+ GTest::GTest
+)
+vespa_add_test(NAME vespalib_lazy_test_app COMMAND vespalib_lazy_test_app)
diff --git a/vespalib/src/tests/coro/lazy/lazy_test.cpp b/vespalib/src/tests/coro/lazy/lazy_test.cpp
new file mode 100644
index 00000000000..b838152249e
--- /dev/null
+++ b/vespalib/src/tests/coro/lazy/lazy_test.cpp
@@ -0,0 +1,121 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/coro/lazy.h>
+#include <vespa/vespalib/coro/sync_wait.h>
+#include <vespa/vespalib/util/require.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <mutex>
+
+#include <thread>
+
+using vespalib::coro::Lazy;
+using vespalib::coro::sync_wait;
+
+std::mutex thread_lock;
+std::vector<std::thread> threads;
+struct JoinThreads {
+ ~JoinThreads() {
+ for (auto &thread: threads) {
+ thread.join();
+ }
+ threads.clear();
+ }
+};
+
+auto run_in_other_thread() {
+ struct awaiter {
+ bool await_ready() const noexcept { return false; }
+ void await_suspend(std::coroutine_handle<> handle) const {
+ auto guard = std::lock_guard(thread_lock);
+ threads.push_back(std::thread(handle));
+ }
+ void await_resume() const noexcept {}
+ };
+ return awaiter();
+}
+
+Lazy<int> make_lazy(int value) {
+ co_return value;
+}
+
+Lazy<int> async_add_values(int a, int b) {
+ auto lazy_a = make_lazy(a);
+ auto lazy_b = make_lazy(b);
+ co_return (co_await lazy_a + co_await lazy_b);
+}
+
+Lazy<int> async_sum(Lazy<int> a, Lazy<int> b) {
+ co_return (co_await a + co_await b);
+}
+
+Lazy<std::unique_ptr<int>> move_only_int() {
+ co_return std::make_unique<int>(123);
+}
+
+Lazy<int> extract_rvalue() {
+ auto res = co_await move_only_int();
+ co_return *res;
+}
+
+Lazy<int> will_throw() {
+ REQUIRE_FAILED("failed on purpose");
+ co_return 123;
+}
+
+template<typename T>
+Lazy<T> forward_value(Lazy<T> value) {
+ co_return co_await std::move(value);
+}
+
+template <typename T>
+Lazy<T> switch_thread(Lazy<T> value) {
+ std::cerr << "switching from thread " << std::this_thread::get_id() << std::endl;
+ co_await run_in_other_thread();
+ std::cerr << "........... to thread " << std::this_thread::get_id() << std::endl;
+ co_return co_await value;
+}
+
+TEST(LazyTest, simple_lazy_value) {
+ auto lazy = make_lazy(42);
+ auto result = sync_wait(lazy);
+ EXPECT_EQ(result, 42);
+}
+
+TEST(LazyTest, async_sum_of_async_values) {
+ auto lazy = async_add_values(10, 20);
+ auto result = sync_wait(lazy);
+ EXPECT_EQ(result, 30);
+}
+
+TEST(LazyTest, async_sum_of_external_async_values) {
+ auto a = make_lazy(100);
+ auto b = make_lazy(200);
+ auto lazy = async_sum(std::move(a), std::move(b));
+ auto result = sync_wait(lazy);
+ EXPECT_EQ(result, 300);
+}
+
+TEST(LazyTest, extract_rvalue_from_lazy_in_coroutine) {
+ auto lazy = extract_rvalue();
+ auto result = sync_wait(lazy);
+ EXPECT_EQ(result, 123);
+}
+
+TEST(LazyTest, extract_rvalue_from_lazy_in_sync_wait) {
+ auto result = sync_wait(move_only_int());
+ EXPECT_EQ(*result, 123);
+}
+
+TEST(LazyTest, calculate_result_in_another_thread) {
+ JoinThreads thread_guard;
+ auto result = sync_wait(switch_thread(make_lazy(7)));
+ EXPECT_EQ(result, 7);
+}
+
+TEST(LazyTest, exceptions_are_propagated) {
+ JoinThreads thread_guard;
+ auto lazy = switch_thread(forward_value(will_throw()));
+ EXPECT_THROW(sync_wait(lazy), vespalib::RequireFailedException);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 1e8632aee95..e9ea1a67156 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -20,7 +20,7 @@ using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MemoryAllocatorObserver::Stats;
-using BufferStats = vespalib::datastore::test::BufferStats;
+using TestBufferStats = vespalib::datastore::test::BufferStats;
using MemStats = vespalib::datastore::test::MemStats;
namespace {
@@ -98,16 +98,16 @@ struct ArrayStoreTest : public TestT
}
void assertBufferState(EntryRef ref, const MemStats& expStats) const {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
- EXPECT_EQ(expStats._hold, store.bufferState(ref).getHoldElems());
- EXPECT_EQ(expStats._dead, store.bufferState(ref).getDeadElems());
+ EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
+ EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
}
- void assert_buffer_stats(EntryRef ref, const BufferStats& exp_stats) const {
+ void assert_buffer_stats(EntryRef ref, const TestBufferStats& exp_stats) const {
auto& state = store.bufferState(ref);
EXPECT_EQ(exp_stats._used, state.size());
- EXPECT_EQ(exp_stats._hold, state.getHoldElems());
- EXPECT_EQ(exp_stats._dead, state.getDeadElems());
- EXPECT_EQ(exp_stats._extra_used, state.getExtraUsedBytes());
- EXPECT_EQ(exp_stats._extra_hold, state.getExtraHoldBytes());
+ EXPECT_EQ(exp_stats._hold, state.stats().hold_elems());
+ EXPECT_EQ(exp_stats._dead, state.stats().dead_elems());
+ EXPECT_EQ(exp_stats._extra_used, state.stats().extra_used_bytes());
+ EXPECT_EQ(exp_stats._extra_hold, state.stats().extra_hold_bytes());
}
void assertMemoryUsage(const MemStats expStats) const {
MemoryUsage act = store.getMemoryUsage();
@@ -123,7 +123,7 @@ struct ArrayStoreTest : public TestT
void assert_ref_reused(const EntryVector& first, const EntryVector& second, bool should_reuse) {
EntryRef ref1 = add(first);
remove(ref1);
- trimHoldLists();
+ reclaim_memory();
EntryRef ref2 = add(second);
EXPECT_EQ(should_reuse, (ref2 == ref1));
assertGet(ref2, second);
@@ -136,9 +136,9 @@ struct ArrayStoreTest : public TestT
}
return EntryRef();
}
- void trimHoldLists() {
- store.transferHoldLists(generation++);
- store.trimHoldLists(generation);
+ void reclaim_memory() {
+ store.assign_generation(generation++);
+ store.reclaim_memory(generation);
}
void compactWorst(bool compactMemory, bool compactAddressSpace) {
CompactionSpec compaction_spec(compactMemory, compactAddressSpace);
@@ -205,10 +205,10 @@ VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(NumberStoreFreeListsDisabledMultiTest,
TEST_P(NumberStoreTest, control_static_sizes) {
#ifdef _LIBCPP_VERSION
- EXPECT_EQ(464u, sizeof(store));
+ EXPECT_EQ(472u, sizeof(store));
EXPECT_EQ(304u, sizeof(NumberStoreTest::ArrayStoreType::DataStoreType));
#else
- EXPECT_EQ(496u, sizeof(store));
+ EXPECT_EQ(504u, sizeof(store));
EXPECT_EQ(336u, sizeof(NumberStoreTest::ArrayStoreType::DataStoreType));
#endif
EXPECT_EQ(112u, sizeof(NumberStoreTest::ArrayStoreType::SmallBufferType));
@@ -280,13 +280,13 @@ TEST_P(NumberStoreFreeListsDisabledTest, large_arrays_are_NOT_allocated_from_fre
TEST_P(NumberStoreTest, track_size_of_large_array_allocations_with_free_lists_enabled) {
EntryRef ref = add({1,2,3,4});
- assert_buffer_stats(ref, BufferStats().used(2).hold(0).dead(1).extra_used(16));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(16));
remove({1,2,3,4});
- assert_buffer_stats(ref, BufferStats().used(2).hold(1).dead(1).extra_hold(16).extra_used(16));
- trimHoldLists();
- assert_buffer_stats(ref, BufferStats().used(2).hold(0).dead(2).extra_used(0));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(1).dead(1).extra_hold(16).extra_used(16));
+ reclaim_memory();
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(2).extra_used(0));
add({5,6,7,8,9});
- assert_buffer_stats(ref, BufferStats().used(2).hold(0).dead(1).extra_used(20));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(20));
}
TEST_F(SmallOffsetNumberStoreTest, new_underlying_buffer_is_allocated_when_current_is_full)
@@ -316,7 +316,7 @@ test_compaction(NumberStoreBasicTest &f)
EntryRef size2Ref = f.add({2,2});
EntryRef size3Ref = f.add({3,3,3});
f.remove(f.add({5,5}));
- f.trimHoldLists();
+ f.reclaim_memory();
f.assertBufferState(size1Ref, MemStats().used(1).dead(0));
f.assertBufferState(size2Ref, MemStats().used(4).dead(2));
f.assertBufferState(size3Ref, MemStats().used(2).dead(1)); // Note: First element is reserved
@@ -335,7 +335,7 @@ test_compaction(NumberStoreBasicTest &f)
EXPECT_NE(size2BufferId, f.getBufferId(f.getEntryRef({2,2})));
f.assertGet(size2Ref, {2,2}); // Old ref should still point to data.
EXPECT_TRUE(f.store.bufferState(size2Ref).isOnHold());
- f.trimHoldLists();
+ f.reclaim_memory();
EXPECT_TRUE(f.store.bufferState(size2Ref).isFree());
}
@@ -360,7 +360,7 @@ void testCompaction(NumberStoreTest &f, bool compactMemory, bool compactAddressS
f.remove(f.add({5,5,5}));
f.remove(f.add({6}));
f.remove(f.add({7}));
- f.trimHoldLists();
+ f.reclaim_memory();
f.assertBufferState(size1Ref, MemStats().used(3).dead(2));
f.assertBufferState(size2Ref, MemStats().used(2).dead(0));
f.assertBufferState(size3Ref, MemStats().used(6).dead(3));
@@ -397,7 +397,7 @@ void testCompaction(NumberStoreTest &f, bool compactMemory, bool compactAddressS
EXPECT_FALSE(f.store.bufferState(size1Ref).isOnHold());
}
EXPECT_FALSE(f.store.bufferState(size2Ref).isOnHold());
- f.trimHoldLists();
+ f.reclaim_memory();
if (compactMemory) {
EXPECT_TRUE(f.store.bufferState(size3Ref).isFree());
} else {
@@ -436,7 +436,7 @@ TEST_P(NumberStoreTest, used_onHold_and_dead_memory_usage_is_tracked_for_small_a
assertMemoryUsage(exp.used(entrySize() * 3));
remove({1,2,3});
assertMemoryUsage(exp.hold(entrySize() * 3));
- trimHoldLists();
+ reclaim_memory();
assertMemoryUsage(exp.holdToDead(entrySize() * 3));
}
@@ -447,7 +447,7 @@ TEST_P(NumberStoreTest, used_onHold_and_dead_memory_usage_is_tracked_for_large_a
assertMemoryUsage(exp.used(largeArraySize() + entrySize() * 4));
remove({1,2,3,4});
assertMemoryUsage(exp.hold(largeArraySize() + entrySize() * 4));
- trimHoldLists();
+ reclaim_memory();
assertMemoryUsage(exp.decUsed(entrySize() * 4).decHold(largeArraySize() + entrySize() * 4).
dead(largeArraySize()));
}
diff --git a/vespalib/src/tests/datastore/buffer_stats/CMakeLists.txt b/vespalib/src/tests/datastore/buffer_stats/CMakeLists.txt
new file mode 100644
index 00000000000..2463f584133
--- /dev/null
+++ b/vespalib/src/tests/datastore/buffer_stats/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_datastore_buffer_stats_test_app TEST
+ SOURCES
+ buffer_stats_test.cpp
+ DEPENDS
+ vespalib
+ GTest::GTest
+)
+vespa_add_test(NAME vespalib_datastore_buffer_stats_test_app COMMAND vespalib_datastore_buffer_stats_test_app)
diff --git a/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp b/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp
new file mode 100644
index 00000000000..09b2590a5f3
--- /dev/null
+++ b/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp
@@ -0,0 +1,34 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/datastore/buffer_stats.h>
+#include <vespa/vespalib/datastore/memory_stats.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib::datastore;
+
+TEST(BufferStatsTest, buffer_stats_to_memory_stats)
+{
+ InternalBufferStats buf;
+ buf.set_alloc_elems(17);
+ buf.pushed_back(7);
+ buf.set_dead_elems(5);
+ buf.set_hold_elems(3);
+ buf.inc_extra_used_bytes(13);
+ buf.inc_extra_hold_bytes(11);
+
+ MemoryStats mem;
+ constexpr size_t es = 8;
+ buf.add_to_mem_stats(es, mem);
+
+ EXPECT_EQ(17, mem._allocElems);
+ EXPECT_EQ(7, mem._usedElems);
+ EXPECT_EQ(5, mem._deadElems);
+ EXPECT_EQ(3, mem._holdElems);
+ EXPECT_EQ(17 * es + 13, mem._allocBytes);
+ EXPECT_EQ(7 * es + 13, mem._usedBytes);
+ EXPECT_EQ(5 * es, mem._deadBytes);
+ EXPECT_EQ(3 * es + 11, mem._holdBytes);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
+
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 9522aa1e0dc..645871d3ef6 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -17,7 +17,6 @@ using vespalib::alloc::MemoryAllocator;
class MyStore : public DataStore<int, EntryRefT<3, 2> > {
private:
using ParentType = DataStore<int, EntryRefT<3, 2> >;
- using ParentType::_primary_buffer_ids;
public:
MyStore() {}
explicit MyStore(std::unique_ptr<BufferType<int>> type)
@@ -29,14 +28,11 @@ public:
void holdElem(EntryRef ref, uint64_t len) {
ParentType::holdElem(ref, len);
}
- void transferHoldLists(generation_t generation) {
- ParentType::transferHoldLists(generation);
+ void assign_generation(generation_t current_gen) {
+ ParentType::assign_generation(current_gen);
}
- void trimElemHoldList(generation_t usedGen) override {
- ParentType::trimElemHoldList(usedGen);
- }
- void incDead(EntryRef ref, uint64_t dead) {
- ParentType::incDead(ref, dead);
+ void reclaim_entry_refs(generation_t oldest_used_gen) override {
+ ParentType::reclaim_entry_refs(oldest_used_gen);
}
void ensureBufferCapacity(size_t sizeNeeded) {
ParentType::ensureBufferCapacity(0, sizeNeeded);
@@ -47,7 +43,7 @@ public:
void switch_primary_buffer() {
ParentType::switch_primary_buffer(0, 0u);
}
- size_t primary_buffer_id() const { return _primary_buffer_ids[0]; }
+ size_t primary_buffer_id() const { return get_primary_buffer_id(0); }
BufferState& get_active_buffer_state() {
return ParentType::getBufferState(primary_buffer_id());
}
@@ -55,7 +51,7 @@ public:
using GrowthStats = std::vector<int>;
-using BufferStats = std::vector<int>;
+using BufferIds = std::vector<int>;
constexpr float ALLOC_GROW_FACTOR = 0.4;
constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
@@ -124,8 +120,8 @@ public:
++i;
}
}
- BufferStats getBuffers(size_t bufs) {
- BufferStats buffers;
+ BufferIds getBuffers(size_t bufs) {
+ BufferIds buffers;
while (buffers.size() < bufs) {
RefType iRef = (_type.getArraySize() == 1) ?
(_store.template allocator<DataType>(_typeId).alloc().ref) :
@@ -143,8 +139,8 @@ public:
using MyRef = MyStore::RefType;
void
-assertMemStats(const DataStoreBase::MemStats &exp,
- const DataStoreBase::MemStats &act)
+assertMemStats(const MemoryStats &exp,
+ const MemoryStats &act)
{
EXPECT_EQ(exp._allocElems, act._allocElems);
EXPECT_EQ(exp._usedElems, act._usedElems);
@@ -265,29 +261,29 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
s.switch_primary_buffer();
EXPECT_EQ(1u, s.primary_buffer_id());
s.holdBuffer(0); // hold last buffer
- s.transferHoldLists(10);
+ s.assign_generation(10);
EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
s.switch_primary_buffer();
EXPECT_EQ(2u, s.primary_buffer_id());
s.holdBuffer(1); // hold last buffer
- s.transferHoldLists(20);
+ s.assign_generation(20);
EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
s.switch_primary_buffer();
EXPECT_EQ(3u, s.primary_buffer_id());
s.holdBuffer(2); // hold last buffer
- s.transferHoldLists(30);
+ s.assign_generation(30);
EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId());
s.holdBuffer(3); // hold current buffer
- s.transferHoldLists(40);
+ s.assign_generation(40);
EXPECT_TRUE(s.getBufferState(0).size() != 0);
EXPECT_TRUE(s.getBufferState(1).size() != 0);
EXPECT_TRUE(s.getBufferState(2).size() != 0);
EXPECT_TRUE(s.getBufferState(3).size() != 0);
- s.trimHoldLists(11);
+ s.reclaim_memory(11);
EXPECT_TRUE(s.getBufferState(0).size() == 0);
EXPECT_TRUE(s.getBufferState(1).size() != 0);
EXPECT_TRUE(s.getBufferState(2).size() != 0);
@@ -296,7 +292,7 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
s.switch_primary_buffer();
EXPECT_EQ(0u, s.primary_buffer_id());
EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
- s.trimHoldLists(41);
+ s.reclaim_memory(41);
EXPECT_TRUE(s.getBufferState(0).size() != 0);
EXPECT_TRUE(s.getBufferState(1).size() == 0);
EXPECT_TRUE(s.getBufferState(2).size() == 0);
@@ -308,21 +304,21 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
MyStore s;
MyRef r1 = s.addEntry(1);
s.holdElem(r1, 1);
- s.transferHoldLists(10);
+ s.assign_generation(10);
MyRef r2 = s.addEntry(2);
s.holdElem(r2, 1);
- s.transferHoldLists(20);
+ s.assign_generation(20);
MyRef r3 = s.addEntry(3);
s.holdElem(r3, 1);
- s.transferHoldLists(30);
+ s.assign_generation(30);
EXPECT_EQ(1, s.getEntry(r1));
EXPECT_EQ(2, s.getEntry(r2));
EXPECT_EQ(3, s.getEntry(r3));
- s.trimElemHoldList(11);
+ s.reclaim_entry_refs(11);
EXPECT_EQ(0, s.getEntry(r1));
EXPECT_EQ(2, s.getEntry(r2));
EXPECT_EQ(3, s.getEntry(r3));
- s.trimElemHoldList(31);
+ s.reclaim_entry_refs(31);
EXPECT_EQ(0, s.getEntry(r1));
EXPECT_EQ(0, s.getEntry(r2));
EXPECT_EQ(0, s.getEntry(r3));
@@ -362,17 +358,17 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists)
s.enableFreeLists();
auto r1 = s.addEntry(1);
s.holdElem(r1, 1);
- s.transferHoldLists(10);
+ s.assign_generation(10);
auto r2 = s.addEntry(2);
expect_successive_refs(r1, r2);
s.holdElem(r2, 1);
- s.transferHoldLists(20);
- s.trimElemHoldList(11);
+ s.assign_generation(20);
+ s.reclaim_entry_refs(11);
auto r3 = s.addEntry(3); // reuse r1
EXPECT_EQ(r1, r3);
auto r4 = s.addEntry(4);
expect_successive_refs(r2, r4);
- s.trimElemHoldList(21);
+ s.reclaim_entry_refs(21);
auto r5 = s.addEntry(5); // reuse r2
EXPECT_EQ(r2, r5);
auto r6 = s.addEntry(6);
@@ -397,8 +393,8 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
expect_successive_handles(h1, h2);
s.holdElem(h1.ref, 3);
s.holdElem(h2.ref, 3);
- s.transferHoldLists(10);
- s.trimElemHoldList(11);
+ s.assign_generation(10);
+ s.reclaim_entry_refs(11);
auto h3 = allocator.alloc(3); // reuse h2.ref from free list
EXPECT_EQ(h2, h3);
@@ -414,7 +410,7 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
- DataStoreBase::MemStats m;
+ MemoryStats m;
m._allocElems = MyRef::offsetSize();
m._usedElems = 1; // ref = 0 is reserved
m._deadElems = 1; // ref = 0 is reserved
@@ -429,16 +425,11 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
m._usedElems++;
assertMemStats(m, s.getMemStats());
- // inc dead
- s.incDead(r, 1);
- m._deadElems++;
- assertMemStats(m, s.getMemStats());
-
// hold buffer
s.addEntry(20);
s.addEntry(30);
s.holdBuffer(r.bufferId());
- s.transferHoldLists(100);
+ s.assign_generation(100);
m._usedElems += 2;
m._holdElems = m._usedElems;
m._deadElems = 0;
@@ -455,7 +446,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
m._freeBuffers--;
// trim hold buffer
- s.trimHoldLists(101);
+ s.reclaim_memory(101);
m._allocElems -= MyRef::offsetSize();
m._usedElems = 1;
m._deadElems = 0;
@@ -466,7 +457,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{ // increase extra used bytes
auto prev_stats = s.getMemStats();
- s.get_active_buffer_state().incExtraUsedBytes(50);
+ s.get_active_buffer_state().stats().inc_extra_used_bytes(50);
auto curr_stats = s.getMemStats();
EXPECT_EQ(prev_stats._allocBytes + 50, curr_stats._allocBytes);
EXPECT_EQ(prev_stats._usedBytes + 50, curr_stats._usedBytes);
@@ -474,7 +465,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{ // increase extra hold bytes
auto prev_stats = s.getMemStats();
- s.get_active_buffer_state().incExtraHoldBytes(30);
+ s.get_active_buffer_state().hold_elems(0, 30);
auto curr_stats = s.getMemStats();
EXPECT_EQ(prev_stats._holdBytes + 30, curr_stats._holdBytes);
}
@@ -487,15 +478,14 @@ TEST(DataStoreTest, require_that_memory_usage_is_calculated)
s.addEntry(20);
s.addEntry(30);
s.addEntry(40);
- s.incDead(r, 1);
s.holdBuffer(r.bufferId());
- s.transferHoldLists(100);
+ s.assign_generation(100);
vespalib::MemoryUsage m = s.getMemoryUsage();
EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
EXPECT_EQ(5 * sizeof(int), m.usedBytes());
EXPECT_EQ(0 * sizeof(int), m.deadBytes());
EXPECT_EQ(5 * sizeof(int), m.allocatedBytesOnHold());
- s.trimHoldLists(101);
+ s.reclaim_memory(101);
}
TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
@@ -523,8 +513,8 @@ TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
EXPECT_EQ(4 * sizeof(int), m.usedBytes());
EXPECT_EQ(2 * sizeof(int), m.deadBytes());
EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
- s.transferHoldLists(100);
- s.trimHoldLists(101);
+ s.assign_generation(100);
+ s.reclaim_memory(101);
}
using IntGrowStore = GrowStore<int, EntryRefT<24>>;
@@ -644,9 +634,9 @@ TEST(DataStoreTest, can_set_memory_allocator)
s.switch_primary_buffer();
EXPECT_EQ(AllocStats(3, 0), stats);
s.holdBuffer(0);
- s.transferHoldLists(10);
+ s.assign_generation(10);
EXPECT_EQ(AllocStats(3, 0), stats);
- s.trimHoldLists(11);
+ s.reclaim_memory(11);
EXPECT_EQ(AllocStats(3, 2), stats);
}
EXPECT_EQ(AllocStats(3, 3), stats);
@@ -655,7 +645,7 @@ TEST(DataStoreTest, can_set_memory_allocator)
namespace {
void
-assertBuffers(BufferStats exp_buffers, size_t num_arrays_for_new_buffer)
+assertBuffers(BufferIds exp_buffers, size_t num_arrays_for_new_buffer)
{
EXPECT_EQ(exp_buffers, IntGrowStore(1, 1, 1024, num_arrays_for_new_buffer).getBuffers(exp_buffers.size()));
}
@@ -680,7 +670,7 @@ TEST(DataStoreTest, control_static_sizes) {
namespace {
-void test_free_element_to_held_buffer(bool direct, bool before_hold_buffer)
+void test_free_element_to_held_buffer(bool before_hold_buffer)
{
MyStore s;
auto ref = s.addEntry(1);
@@ -689,45 +679,27 @@ void test_free_element_to_held_buffer(bool direct, bool before_hold_buffer)
EXPECT_EQ(1u, s.primary_buffer_id());
if (before_hold_buffer) {
- if (direct) {
- s.freeElem(ref, 1);
- } else {
- s.holdElem(ref, 1);
- }
+ s.holdElem(ref, 1);
}
s.holdBuffer(0); // hold last buffer
if (!before_hold_buffer) {
- if (direct) {
- ASSERT_DEATH({ s.freeElem(ref, 1); }, "state.isOnHold\\(\\) && was_held");
- } else {
- ASSERT_DEATH({ s.holdElem(ref, 1); }, "state.isActive\\(\\)");
- }
+ ASSERT_DEATH({ s.holdElem(ref, 1); }, "isActive\\(\\)");
}
- s.transferHoldLists(100);
- s.trimHoldLists(101);
+ s.assign_generation(100);
+ s.reclaim_memory(101);
}
}
-TEST(DataStoreTest, free_to_active_then_held_buffer_is_ok)
-{
- test_free_element_to_held_buffer(true, true);
-}
-
TEST(DataStoreTest, hold_to_active_then_held_buffer_is_ok)
{
- test_free_element_to_held_buffer(false, true);
+ test_free_element_to_held_buffer(true);
}
#ifndef NDEBUG
-TEST(DataStoreDeathTest, free_to_held_buffer_is_not_ok)
-{
- test_free_element_to_held_buffer(true, false);
-}
-
TEST(DataStoreDeathTest, hold_to_held_buffer_is_not_ok)
{
- test_free_element_to_held_buffer(false, false);
+ test_free_element_to_held_buffer(false);
}
#endif
diff --git a/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp b/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp
index 599cb209e6c..4f4c3ac94eb 100644
--- a/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp
+++ b/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp
@@ -88,13 +88,13 @@ DataStoreFixedSizeHashTest::~DataStoreFixedSizeHashTest()
void
DataStoreFixedSizeHashTest::commit()
{
- _store.transferHoldLists(_generation_handler.getCurrentGeneration());
- _hash_map->transfer_hold_lists(_generation_handler.getCurrentGeneration());
- _generation_holder.transferHoldLists(_generation_handler.getCurrentGeneration());
+ _store.assign_generation(_generation_handler.getCurrentGeneration());
+ _hash_map->assign_generation(_generation_handler.getCurrentGeneration());
+ _generation_holder.assign_generation(_generation_handler.getCurrentGeneration());
_generation_handler.incGeneration();
- _store.trimHoldLists(_generation_handler.getFirstUsedGeneration());
- _hash_map->trim_hold_lists(_generation_handler.getFirstUsedGeneration());
- _generation_holder.trimHoldLists(_generation_handler.getFirstUsedGeneration());
+ _store.reclaim_memory(_generation_handler.get_oldest_used_generation());
+ _hash_map->reclaim_memory(_generation_handler.get_oldest_used_generation());
+ _generation_holder.reclaim(_generation_handler.get_oldest_used_generation());
}
size_t
diff --git a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
index 13f9ae251b6..4c3fe1756c5 100644
--- a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
+++ b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
@@ -73,8 +73,8 @@ public:
}
~MyCompactable() override = default;
- EntryRef move(EntryRef ref) override {
- auto new_ref = _allocator.move(ref);
+ EntryRef move_on_compact(EntryRef ref) override {
+ auto new_ref = _allocator.move_on_compact(ref);
_allocator.hold(ref);
_new_refs.emplace_back(new_ref);
return new_ref;
@@ -168,11 +168,11 @@ DataStoreShardedHashTest::~DataStoreShardedHashTest()
void
DataStoreShardedHashTest::commit()
{
- _store.transferHoldLists(_generationHandler.getCurrentGeneration());
- _hash_map.transfer_hold_lists(_generationHandler.getCurrentGeneration());
+ _store.assign_generation(_generationHandler.getCurrentGeneration());
+ _hash_map.assign_generation(_generationHandler.getCurrentGeneration());
_generationHandler.incGeneration();
- _store.trimHoldLists(_generationHandler.getFirstUsedGeneration());
- _hash_map.trim_hold_lists(_generationHandler.getFirstUsedGeneration());
+ _store.reclaim_memory(_generationHandler.get_oldest_used_generation());
+ _hash_map.reclaim_memory(_generationHandler.get_oldest_used_generation());
}
void
@@ -395,7 +395,7 @@ TEST_F(DataStoreShardedHashTest, foreach_key_works)
}
}
-TEST_F(DataStoreShardedHashTest, move_keys_works)
+TEST_F(DataStoreShardedHashTest, move_keys_on_compact_works)
{
populate_sample_data(small_population);
std::vector<EntryRef> refs;
@@ -403,7 +403,7 @@ TEST_F(DataStoreShardedHashTest, move_keys_works)
std::vector<EntryRef> new_refs;
MyCompactable my_compactable(_allocator, new_refs);
auto filter = make_entry_ref_filter<RefT>(false);
- _hash_map.move_keys(my_compactable, filter);
+ _hash_map.move_keys_on_compact(my_compactable, filter);
std::vector<EntryRef> verify_new_refs;
_hash_map.foreach_key([&verify_new_refs](EntryRef ref) { verify_new_refs.emplace_back(ref); });
EXPECT_EQ(small_population, refs.size());
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
index 92bd5502406..48a0ecafbc6 100644
--- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -21,10 +21,10 @@ enum class DictionaryType { BTREE, HASH, BTREE_AND_HASH };
using namespace vespalib::datastore;
using vespalib::ArrayRef;
using generation_t = vespalib::GenerationHandler::generation_t;
-using vespalib::datastore::test::BufferStats;
using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MemoryAllocatorObserver::Stats;
+using TestBufferStats = vespalib::datastore::test::BufferStats;
template <typename UniqueStoreT>
struct TestBaseValues {
@@ -94,10 +94,10 @@ struct TestBase : public ::testing::Test {
uint32_t getBufferId(EntryRef ref) const {
return EntryRefType(ref).bufferId();
}
- void assertBufferState(EntryRef ref, const BufferStats expStats) const {
+ void assertBufferState(EntryRef ref, const TestBufferStats expStats) const {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
- EXPECT_EQ(expStats._hold, store.bufferState(ref).getHoldElems());
- EXPECT_EQ(expStats._dead, store.bufferState(ref).getDeadElems());
+ EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
+ EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
}
void assertStoreContent() const {
for (const auto &elem : refStore) {
@@ -112,15 +112,15 @@ struct TestBase : public ::testing::Test {
}
return EntryRef();
}
- void trimHoldLists() {
+ void reclaim_memory() {
store.freeze();
- store.transferHoldLists(generation++);
- store.trimHoldLists(generation);
+ store.assign_generation(generation++);
+ store.reclaim_memory(generation);
}
void compactWorst() {
CompactionSpec compaction_spec(true, true);
// Use a compaction strategy that will compact all active buffers
- CompactionStrategy compaction_strategy(0.0, 0.0, EntryRefType::numBuffers(), 1.0);
+ auto compaction_strategy = CompactionStrategy::make_compact_all_active_buffers_strategy();
auto remapper = store.compact_worst(compaction_spec, compaction_strategy);
std::vector<AtomicEntryRef> refs;
for (const auto &elem : refStore) {
@@ -320,9 +320,9 @@ TYPED_TEST(TestBase, elements_are_put_on_hold_when_value_is_removed)
EntryRef ref = this->add(this->values()[0]);
size_t reserved = this->get_reserved(ref);
size_t array_size = this->get_array_size(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
}
TYPED_TEST(TestBase, elements_are_reference_counted)
@@ -333,11 +333,11 @@ TYPED_TEST(TestBase, elements_are_reference_counted)
// Note: The first buffer have the first element reserved -> we expect 2 elements used here.
size_t reserved = this->get_reserved(ref);
size_t array_size = this->get_array_size(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
}
TEST_F(SmallOffsetNumberTest, new_underlying_buffer_is_allocated_when_current_is_full)
@@ -364,10 +364,10 @@ TYPED_TEST(TestBase, store_can_be_compacted)
EntryRef val0Ref = this->add(this->values()[0]);
EntryRef val1Ref = this->add(this->values()[1]);
this->remove(this->add(this->values()[2]));
- this->trimHoldLists();
+ this->reclaim_memory();
size_t reserved = this->get_reserved(val0Ref);
size_t array_size = this->get_array_size(val0Ref);
- this->assertBufferState(val0Ref, BufferStats().used(reserved + 3 * array_size).dead(reserved + array_size));
+ this->assertBufferState(val0Ref, TestBufferStats().used(reserved + 3 * array_size).dead(reserved + array_size));
uint32_t val1BufferId = this->getBufferId(val0Ref);
EXPECT_EQ(2u, this->refStore.size());
@@ -381,7 +381,7 @@ TYPED_TEST(TestBase, store_can_be_compacted)
this->assertGet(val0Ref, this->values()[0]);
this->assertGet(val1Ref, this->values()[1]);
EXPECT_TRUE(this->store.bufferState(val0Ref).isOnHold());
- this->trimHoldLists();
+ this->reclaim_memory();
EXPECT_TRUE(this->store.bufferState(val0Ref).isFree());
this->assertStoreContent();
}
@@ -396,7 +396,7 @@ TYPED_TEST(TestBase, store_can_be_instantiated_with_builder)
EntryRef val1Ref = builder.mapEnumValueToEntryRef(2);
size_t reserved = this->get_reserved(val0Ref);
size_t array_size = this->get_array_size(val0Ref);
- this->assertBufferState(val0Ref, BufferStats().used(2 * array_size + reserved).dead(reserved)); // Note: First element is reserved
+ this->assertBufferState(val0Ref, TestBufferStats().used(2 * array_size + reserved).dead(reserved)); // Note: First element is reserved
EXPECT_TRUE(val0Ref.valid());
EXPECT_TRUE(val1Ref.valid());
EXPECT_NE(val0Ref.ref(), val1Ref.ref());
@@ -415,7 +415,7 @@ TYPED_TEST(TestBase, store_can_be_enumerated)
EntryRef val0Ref = this->add(this->values()[0]);
EntryRef val1Ref = this->add(this->values()[1]);
this->remove(this->add(this->values()[2]));
- this->trimHoldLists();
+ this->reclaim_memory();
auto enumerator = this->getEnumerator(true);
std::vector<uint32_t> refs;
@@ -460,7 +460,7 @@ TEST_F(DoubleTest, nan_is_handled)
for (auto &value : myvalues) {
refs.emplace_back(add(value));
}
- trimHoldLists();
+ reclaim_memory();
EXPECT_TRUE(std::isnan(store.get(refs[1])));
EXPECT_TRUE(std::signbit(store.get(refs[1])));
EXPECT_TRUE(std::isinf(store.get(refs[2])));
diff --git a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
index d0fede5c550..496bc814d0d 100644
--- a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
@@ -62,9 +62,9 @@ struct UniqueStoreDictionaryTest : public ::testing::Test {
}
void inc_generation() {
dict.freeze();
- dict.transfer_hold_lists(gen_handler.getCurrentGeneration());
+ dict.assign_generation(gen_handler.getCurrentGeneration());
gen_handler.incGeneration();
- dict.trim_hold_lists(gen_handler.getFirstUsedGeneration());
+ dict.reclaim_memory(gen_handler.get_oldest_used_generation());
}
void take_snapshot() {
dict.freeze();
diff --git a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
index 777da0c2b16..e865239787b 100644
--- a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
@@ -11,7 +11,7 @@
using namespace vespalib::datastore;
using vespalib::MemoryUsage;
using generation_t = vespalib::GenerationHandler::generation_t;
-using BufferStats = vespalib::datastore::test::BufferStats;
+using TestBufferStats = vespalib::datastore::test::BufferStats;
using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MemoryAllocatorObserver::Stats;
@@ -51,8 +51,8 @@ struct TestBase : public ::testing::Test {
void remove(EntryRef ref) {
allocator.hold(ref);
}
- EntryRef move(EntryRef ref) {
- return allocator.move(ref);
+ EntryRef move_on_compact(EntryRef ref) {
+ return allocator.move_on_compact(ref);
}
uint32_t get_buffer_id(EntryRef ref) const {
return EntryRefType(ref).bufferId();
@@ -60,16 +60,16 @@ struct TestBase : public ::testing::Test {
const BufferState &buffer_state(EntryRef ref) const {
return allocator.get_data_store().getBufferState(get_buffer_id(ref));
}
- void assert_buffer_state(EntryRef ref, const BufferStats expStats) const {
+ void assert_buffer_state(EntryRef ref, const TestBufferStats expStats) const {
EXPECT_EQ(expStats._used, buffer_state(ref).size());
- EXPECT_EQ(expStats._hold, buffer_state(ref).getHoldElems());
- EXPECT_EQ(expStats._dead, buffer_state(ref).getDeadElems());
- EXPECT_EQ(expStats._extra_used, buffer_state(ref).getExtraUsedBytes());
- EXPECT_EQ(expStats._extra_hold, buffer_state(ref).getExtraHoldBytes());
+ EXPECT_EQ(expStats._hold, buffer_state(ref).stats().hold_elems());
+ EXPECT_EQ(expStats._dead, buffer_state(ref).stats().dead_elems());
+ EXPECT_EQ(expStats._extra_used, buffer_state(ref).stats().extra_used_bytes());
+ EXPECT_EQ(expStats._extra_hold, buffer_state(ref).stats().extra_hold_bytes());
}
- void trim_hold_lists() {
- allocator.get_data_store().transferHoldLists(generation++);
- allocator.get_data_store().trimHoldLists(generation);
+ void reclaim_memory() {
+ allocator.get_data_store().assign_generation(generation++);
+ allocator.get_data_store().reclaim_memory(generation);
}
};
@@ -86,32 +86,32 @@ TEST_F(StringTest, can_add_and_get_values)
TEST_F(StringTest, elements_are_put_on_hold_when_value_is_removed)
{
EntryRef ref = add(small.c_str());
- assert_buffer_state(ref, BufferStats().used(16).hold(0).dead(0));
+ assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(0));
remove(ref);
- assert_buffer_state(ref, BufferStats().used(16).hold(16).dead(0));
- trim_hold_lists();
- assert_buffer_state(ref, BufferStats().used(16).hold(0).dead(16));
+ assert_buffer_state(ref, TestBufferStats().used(16).hold(16).dead(0));
+ reclaim_memory();
+ assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(16));
}
TEST_F(StringTest, extra_bytes_used_is_tracked)
{
EntryRef ref = add(spaces1000.c_str());
// Note: The first buffer have the first element reserved -> we expect 2 elements used here.
- assert_buffer_state(ref, BufferStats().used(2).hold(0).dead(1).extra_used(1001));
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
remove(ref);
- assert_buffer_state(ref, BufferStats().used(2).hold(1).dead(1).extra_used(1001).extra_hold(1001));
- trim_hold_lists();
- assert_buffer_state(ref, BufferStats().used(2).hold(0).dead(2));
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(1).dead(1).extra_used(1001).extra_hold(1001));
+ reclaim_memory();
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(2));
ref = add(spaces1000.c_str());
- assert_buffer_state(ref, BufferStats().used(2).hold(0).dead(1).extra_used(1001));
- EntryRef ref2 = move(ref);
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
+ EntryRef ref2 = move_on_compact(ref);
assert_get(ref2, spaces1000.c_str());
- assert_buffer_state(ref, BufferStats().used(3).hold(0).dead(1).extra_used(2002));
+ assert_buffer_state(ref, TestBufferStats().used(3).hold(0).dead(1).extra_used(2002));
remove(ref);
remove(ref2);
- assert_buffer_state(ref, BufferStats().used(3).hold(2).dead(1).extra_used(2002).extra_hold(2002));
- trim_hold_lists();
- assert_buffer_state(ref, BufferStats().used(3).hold(0).dead(3));
+ assert_buffer_state(ref, TestBufferStats().used(3).hold(2).dead(1).extra_used(2002).extra_hold(2002));
+ reclaim_memory();
+ assert_buffer_state(ref, TestBufferStats().used(3).hold(0).dead(3));
}
TEST_F(StringTest, string_length_determines_buffer)
@@ -134,13 +134,13 @@ TEST_F(StringTest, free_list_is_used_when_enabled)
EntryRef ref2 = add(spaces1000.c_str());
remove(ref1);
remove(ref2);
- trim_hold_lists();
+ reclaim_memory();
EntryRef ref3 = add(small.c_str());
EntryRef ref4 = add(spaces1000.c_str());
EXPECT_EQ(ref1, ref3);
EXPECT_EQ(ref2, ref4);
- assert_buffer_state(ref1, BufferStats().used(16).hold(0).dead(0));
- assert_buffer_state(ref2, BufferStats().used(2).hold(0).dead(1).extra_used(1001));
+ assert_buffer_state(ref1, TestBufferStats().used(16).hold(0).dead(0));
+ assert_buffer_state(ref2, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
}
TEST_F(StringTest, free_list_is_not_used_when_disabled)
@@ -150,16 +150,16 @@ TEST_F(StringTest, free_list_is_not_used_when_disabled)
EntryRef ref2 = add(spaces1000.c_str());
remove(ref1);
remove(ref2);
- trim_hold_lists();
+ reclaim_memory();
EntryRef ref3 = add(small.c_str());
EntryRef ref4 = add(spaces1000.c_str());
EXPECT_NE(ref1, ref3);
EXPECT_NE(ref2, ref4);
- assert_buffer_state(ref1, BufferStats().used(32).hold(0).dead(16));
- assert_buffer_state(ref2, BufferStats().used(3).hold(0).dead(2).extra_used(1001));
+ assert_buffer_state(ref1, TestBufferStats().used(32).hold(0).dead(16));
+ assert_buffer_state(ref2, TestBufferStats().used(3).hold(0).dead(2).extra_used(1001));
}
-TEST_F(StringTest, free_list_is_never_used_for_move)
+TEST_F(StringTest, free_list_is_never_used_for_move_on_compact)
{
// Free lists are default enabled for UniqueStoreStringAllocator
EntryRef ref1 = add(small.c_str());
@@ -168,13 +168,13 @@ TEST_F(StringTest, free_list_is_never_used_for_move)
EntryRef ref4 = add(spaces1000.c_str());
remove(ref3);
remove(ref4);
- trim_hold_lists();
- EntryRef ref5 = move(ref1);
- EntryRef ref6 = move(ref2);
+ reclaim_memory();
+ EntryRef ref5 = move_on_compact(ref1);
+ EntryRef ref6 = move_on_compact(ref2);
EXPECT_NE(ref5, ref3);
EXPECT_NE(ref6, ref4);
- assert_buffer_state(ref1, BufferStats().used(48).hold(0).dead(16));
- assert_buffer_state(ref2, BufferStats().used(4).hold(0).dead(2).extra_used(2002));
+ assert_buffer_state(ref1, TestBufferStats().used(48).hold(0).dead(16));
+ assert_buffer_state(ref2, TestBufferStats().used(4).hold(0).dead(2).extra_used(2002));
}
TEST_F(StringTest, provided_memory_allocator_is_used)
diff --git a/vespalib/src/tests/util/generation_hold_list/CMakeLists.txt b/vespalib/src/tests/util/generation_hold_list/CMakeLists.txt
new file mode 100644
index 00000000000..c85b2537745
--- /dev/null
+++ b/vespalib/src/tests/util/generation_hold_list/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_generation_hold_list_test_app TEST
+ SOURCES
+ generation_hold_list_test.cpp
+ DEPENDS
+ vespalib
+ GTest::GTest
+)
+vespa_add_test(NAME vespalib_generation_hold_list_test_app COMMAND vespalib_generation_hold_list_test_app)
diff --git a/vespalib/src/tests/util/generation_hold_list/generation_hold_list_test.cpp b/vespalib/src/tests/util/generation_hold_list/generation_hold_list_test.cpp
new file mode 100644
index 00000000000..7e56e17990c
--- /dev/null
+++ b/vespalib/src/tests/util/generation_hold_list/generation_hold_list_test.cpp
@@ -0,0 +1,95 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/util/generation_hold_list.hpp>
+#include <vespa/vespalib/util/generationholder.h>
+#include <cstdint>
+
+using namespace vespalib;
+
+using MyElem = GenerationHeldBase;
+using generation_t = GenerationHandler::generation_t;
+
+TEST(GenerationHolderTest, holding_of_unique_ptr_elements_with_tracking_of_held_bytes)
+{
+ GenerationHolder h;
+ h.insert(std::make_unique<MyElem>(3));
+ h.assign_generation(0);
+ h.insert(std::make_unique<MyElem>(5));
+ h.assign_generation(1);
+ h.insert(std::make_unique<MyElem>(7));
+ h.assign_generation(2);
+ h.insert(std::make_unique<MyElem>(11));
+ h.assign_generation(4);
+ EXPECT_EQ(3 + 5 + 7 + 11, h.get_held_bytes());
+
+ h.reclaim(0);
+ EXPECT_EQ(3 + 5 + 7 + 11, h.get_held_bytes());
+ h.reclaim(1);
+ EXPECT_EQ(5 + 7 + 11, h.get_held_bytes());
+ h.reclaim(2);
+ EXPECT_EQ(7 + 11, h.get_held_bytes());
+
+ h.insert(std::make_unique<MyElem>(13));
+ h.assign_generation(6);
+ EXPECT_EQ(7 + 11 + 13, h.get_held_bytes());
+
+ h.reclaim(6);
+ EXPECT_EQ(13, h.get_held_bytes());
+ h.reclaim(7);
+ EXPECT_EQ(0, h.get_held_bytes());
+ h.reclaim(7);
+ EXPECT_EQ(0, h.get_held_bytes());
+}
+
+TEST(GenerationHolderTest, reclaim_all_clears_everything)
+{
+ GenerationHolder h;
+ h.insert(std::make_unique<MyElem>(3));
+ h.insert(std::make_unique<MyElem>(5));
+ h.assign_generation(1);
+ h.reclaim_all();
+ EXPECT_EQ(0, h.get_held_bytes());
+}
+
+using IntVector = std::vector<int32_t>;
+using IntHoldList = GenerationHoldList<int32_t, false, true>;
+
+struct IntHoldListTest : public testing::Test {
+ IntHoldList h;
+ IntHoldListTest() : h() {}
+ void assert_reclaim(const IntVector& exp, generation_t oldest_used_gen) {
+ IntVector act;
+ h.reclaim(oldest_used_gen, [&](int elem){ act.push_back(elem); });
+ EXPECT_EQ(exp, act);
+ }
+ void assert_reclaim_all(const IntVector& exp) {
+ IntVector act;
+ h.reclaim_all([&](int elem){ act.push_back(elem); });
+ EXPECT_EQ(exp, act);
+ }
+};
+
+TEST_F(IntHoldListTest, reclaim_calls_callback_for_reclaimed_elements)
+{
+ h.insert(3);
+ h.assign_generation(1);
+ h.insert(5);
+ h.insert(7);
+ h.assign_generation(2);
+
+ assert_reclaim({}, 1);
+ assert_reclaim({3}, 2);
+ assert_reclaim({5, 7}, 3);
+}
+
+TEST_F(IntHoldListTest, reclaim_all_calls_callback_for_all_elements)
+{
+ h.insert(3);
+ h.insert(5);
+ h.assign_generation(2);
+ assert_reclaim_all({3, 5});
+ assert_reclaim_all({});
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/util/generation_holder/CMakeLists.txt b/vespalib/src/tests/util/generation_holder/CMakeLists.txt
deleted file mode 100644
index 8acf9fadaff..00000000000
--- a/vespalib/src/tests/util/generation_holder/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_generation_holder_test_app TEST
- SOURCES
- generation_holder_test.cpp
- DEPENDS
- vespalib
- GTest::GTest
-)
-vespa_add_test(NAME vespalib_generation_holder_test_app COMMAND vespalib_generation_holder_test_app)
diff --git a/vespalib/src/tests/util/generation_holder/generation_holder_test.cpp b/vespalib/src/tests/util/generation_holder/generation_holder_test.cpp
deleted file mode 100644
index 97c3330ac9e..00000000000
--- a/vespalib/src/tests/util/generation_holder/generation_holder_test.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/gtest/gtest.h>
-#include <vespa/vespalib/util/generationholder.h>
-
-using vespalib::GenerationHolder;
-using MyHeld = vespalib::GenerationHeldBase;
-
-TEST(GenerationHolderTest, basic_tracking)
-{
- GenerationHolder gh;
- gh.hold(std::make_unique<MyHeld>(sizeof(int32_t)));
- gh.transferHoldLists(0);
- gh.hold(std::make_unique<MyHeld>(sizeof(int32_t)));
- gh.transferHoldLists(1);
- gh.hold(std::make_unique<MyHeld>(sizeof(int32_t)));
- gh.transferHoldLists(2);
- gh.hold(std::make_unique<MyHeld>(sizeof(int32_t)));
- gh.transferHoldLists(4);
- EXPECT_EQ(4u * sizeof(int32_t), gh.getHeldBytes());
- gh.trimHoldLists(0);
- EXPECT_EQ(4u * sizeof(int32_t), gh.getHeldBytes());
- gh.trimHoldLists(1);
- EXPECT_EQ(3u * sizeof(int32_t), gh.getHeldBytes());
- gh.trimHoldLists(2);
- EXPECT_EQ(2u * sizeof(int32_t), gh.getHeldBytes());
- gh.hold(std::make_unique<MyHeld>(sizeof(int32_t)));
- gh.transferHoldLists(6);
- EXPECT_EQ(3u * sizeof(int32_t), gh.getHeldBytes());
- gh.trimHoldLists(6);
- EXPECT_EQ(1u * sizeof(int32_t), gh.getHeldBytes());
- gh.trimHoldLists(7);
- EXPECT_EQ(0u * sizeof(int32_t), gh.getHeldBytes());
- gh.trimHoldLists(7);
- EXPECT_EQ(0u * sizeof(int32_t), gh.getHeldBytes());
-}
-
-GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp b/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp
index 00da752a749..0bc72f93a9d 100644
--- a/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp
+++ b/vespalib/src/tests/util/generationhandler/generationhandler_test.cpp
@@ -26,10 +26,10 @@ GenerationHandlerTest::~GenerationHandlerTest() = default;
TEST_F(GenerationHandlerTest, require_that_generation_can_be_increased)
{
EXPECT_EQ(0u, gh.getCurrentGeneration());
- EXPECT_EQ(0u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(0u, gh.get_oldest_used_generation());
gh.incGeneration();
EXPECT_EQ(1u, gh.getCurrentGeneration());
- EXPECT_EQ(1u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(1u, gh.get_oldest_used_generation());
}
TEST_F(GenerationHandlerTest, require_that_readers_can_take_guards)
@@ -87,34 +87,34 @@ TEST_F(GenerationHandlerTest, require_that_guards_can_be_copied)
TEST_F(GenerationHandlerTest, require_that_the_first_used_generation_is_correct)
{
- EXPECT_EQ(0u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(0u, gh.get_oldest_used_generation());
gh.incGeneration();
- EXPECT_EQ(1u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(1u, gh.get_oldest_used_generation());
{
GenGuard g1 = gh.takeGuard();
gh.incGeneration();
EXPECT_EQ(1u, gh.getGenerationRefCount());
- EXPECT_EQ(1u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(1u, gh.get_oldest_used_generation());
}
- EXPECT_EQ(1u, gh.getFirstUsedGeneration());
- gh.updateFirstUsedGeneration(); // Only writer should call this
+ EXPECT_EQ(1u, gh.get_oldest_used_generation());
+ gh.update_oldest_used_generation(); // Only writer should call this
EXPECT_EQ(0u, gh.getGenerationRefCount());
- EXPECT_EQ(2u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(2u, gh.get_oldest_used_generation());
{
GenGuard g1 = gh.takeGuard();
gh.incGeneration();
gh.incGeneration();
EXPECT_EQ(1u, gh.getGenerationRefCount());
- EXPECT_EQ(2u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(2u, gh.get_oldest_used_generation());
{
GenGuard g2 = gh.takeGuard();
- EXPECT_EQ(2u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(2u, gh.get_oldest_used_generation());
}
}
- EXPECT_EQ(2u, gh.getFirstUsedGeneration());
- gh.updateFirstUsedGeneration(); // Only writer should call this
+ EXPECT_EQ(2u, gh.get_oldest_used_generation());
+ gh.update_oldest_used_generation(); // Only writer should call this
EXPECT_EQ(0u, gh.getGenerationRefCount());
- EXPECT_EQ(4u, gh.getFirstUsedGeneration());
+ EXPECT_EQ(4u, gh.get_oldest_used_generation());
}
TEST_F(GenerationHandlerTest, require_that_generation_can_grow_large)
@@ -124,7 +124,7 @@ TEST_F(GenerationHandlerTest, require_that_generation_can_grow_large)
EXPECT_EQ(i, gh.getCurrentGeneration());
guards.push_back(gh.takeGuard()); // take guard on current generation
if (i >= 128) {
- EXPECT_EQ(i - 128, gh.getFirstUsedGeneration());
+ EXPECT_EQ(i - 128, gh.get_oldest_used_generation());
guards.pop_front();
EXPECT_EQ(128u, gh.getGenerationRefCount());
}
diff --git a/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp b/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp
index 74af25b54a8..fd2769fd8b1 100644
--- a/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp
+++ b/vespalib/src/tests/util/generationhandler_stress/generation_handler_stress_test.cpp
@@ -238,7 +238,7 @@ Fixture::write_indirect_work(uint64_t cnt, IndirectContext& context)
ReadStopper read_stopper(_stopRead);
uint32_t sleep_cnt = 0;
ASSERT_EQ(0, _generationHandler.getCurrentGeneration());
- auto oldest_gen = _generationHandler.getFirstUsedGeneration();
+ auto oldest_gen = _generationHandler.get_oldest_used_generation();
for (uint64_t i = 0; i < cnt; ++i) {
auto gen = _generationHandler.getCurrentGeneration();
// Hold data for gen, write new data for next_gen
@@ -248,7 +248,7 @@ Fixture::write_indirect_work(uint64_t cnt, IndirectContext& context)
*v_ptr = next_gen;
context._value_ptr.store(v_ptr, std::memory_order_release);
_generationHandler.incGeneration();
- auto first_used_gen = _generationHandler.getFirstUsedGeneration();
+ auto first_used_gen = _generationHandler.get_oldest_used_generation();
while (oldest_gen < first_used_gen) {
// Clear data that readers should no longer have access to.
*context.calc_value_ptr(oldest_gen) = 0;
@@ -258,8 +258,8 @@ Fixture::write_indirect_work(uint64_t cnt, IndirectContext& context)
// Sleep if writer gets too much ahead of readers.
std::this_thread::sleep_for(1ms);
++sleep_cnt;
- _generationHandler.updateFirstUsedGeneration();
- first_used_gen = _generationHandler.getFirstUsedGeneration();
+ _generationHandler.update_oldest_used_generation();
+ first_used_gen = _generationHandler.get_oldest_used_generation();
}
}
_doneWriteWork += cnt;
diff --git a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
index eb2b00f9e20..5d6ec3050da 100644
--- a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
+++ b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
@@ -102,15 +102,15 @@ TEST(RcuVectorTest, resize)
RcuVectorBase<int8_t> v(growStrategy(16, 1.0, 0), g);
v.push_back(1);
v.push_back(2);
- g.transferHoldLists(0);
- g.trimHoldLists(1);
+ g.assign_generation(0);
+ g.reclaim(1);
const int8_t *old = &v[0];
EXPECT_EQ(16u, v.capacity());
EXPECT_EQ(2u, v.size());
v.ensure_size(32, 3);
v[0] = 3;
v[1] = 3;
- g.transferHoldLists(1);
+ g.assign_generation(1);
EXPECT_EQ(1, old[0]);
EXPECT_EQ(2, old[1]);
EXPECT_EQ(3, v[0]);
@@ -119,7 +119,7 @@ TEST(RcuVectorTest, resize)
EXPECT_EQ(3, v[31]);
EXPECT_EQ(64u, v.capacity());
EXPECT_EQ(32u, v.size());
- g.trimHoldLists(2);
+ g.reclaim(2);
}
}
@@ -140,7 +140,7 @@ TEST(RcuVectorTest, generation_handling)
v.setGeneration(2);
v.push_back(50);
- v.removeOldGenerations(3);
+ v.reclaim_memory(3);
EXPECT_EQ(0u, v.getMemoryUsage().allocatedBytesOnHold());
v.push_back(60); // new array
EXPECT_EQ(24u, v.getMemoryUsage().allocatedBytesOnHold());
@@ -184,7 +184,7 @@ TEST(RcuVectorTest, memory_usage)
EXPECT_TRUE(assertUsage(MemoryUsage(6,6,0,2), v.getMemoryUsage()));
v.push_back(4);
EXPECT_TRUE(assertUsage(MemoryUsage(12,11,0,6), v.getMemoryUsage()));
- v.removeOldGenerations(1);
+ v.reclaim_memory(1);
EXPECT_TRUE(assertUsage(MemoryUsage(6,5,0,0), v.getMemoryUsage()));
}
@@ -197,11 +197,11 @@ void verify_shrink_with_buffer_copying(size_t initial_size, size_t absolute_mini
v.push_back(2);
v.push_back(3);
v.push_back(4);
- g.transferHoldLists(0);
- g.trimHoldLists(1);
+ g.assign_generation(0);
+ g.reclaim(1);
MemoryUsage mu;
mu = v.getMemoryUsage();
- mu.incAllocatedBytesOnHold(g.getHeldBytes());
+ mu.incAllocatedBytesOnHold(g.get_held_bytes());
EXPECT_TRUE(assertUsage(MemoryUsage(initial_capacity, 4, 0, 0), mu));
EXPECT_EQ(4u, v.size());
EXPECT_EQ(initial_capacity, v.capacity());
@@ -211,18 +211,18 @@ void verify_shrink_with_buffer_copying(size_t initial_size, size_t absolute_mini
EXPECT_EQ(4, v[3]);
const int8_t *old = &v[0];
v.shrink(2);
- g.transferHoldLists(1);
+ g.assign_generation(1);
EXPECT_EQ(2u, v.size());
EXPECT_EQ(minimal_capacity, v.capacity());
EXPECT_EQ(1, v[0]);
EXPECT_EQ(2, v[1]);
EXPECT_EQ(1, old[0]);
EXPECT_EQ(2, old[1]);
- g.trimHoldLists(2);
+ g.reclaim(2);
EXPECT_EQ(1, v[0]);
EXPECT_EQ(2, v[1]);
mu = v.getMemoryUsage();
- mu.incAllocatedBytesOnHold(g.getHeldBytes());
+ mu.incAllocatedBytesOnHold(g.get_held_bytes());
EXPECT_TRUE(assertUsage(MemoryUsage(minimal_capacity, 2, 0, 0), mu));
}
@@ -256,7 +256,7 @@ struct ShrinkFixture {
EXPECT_EQ(oldPtr, &vec[0]);
}
void assertEmptyHoldList() {
- EXPECT_EQ(0u, g.getHeldBytes());
+ EXPECT_EQ(0u, g.get_held_bytes());
}
static size_t page_ints() { return round_up_to_page_size(1) / sizeof(int); }
};
@@ -294,8 +294,8 @@ TEST(RcuVectorTest, small_expand)
v.push_back(2);
EXPECT_EQ(2u, v.capacity());
EXPECT_EQ(2u, v.size());
- g.transferHoldLists(1);
- g.trimHoldLists(2);
+ g.assign_generation(1);
+ g.reclaim(2);
}
struct FixtureBase {
@@ -325,10 +325,10 @@ struct Fixture : public FixtureBase {
Fixture();
~Fixture();
- void transfer_and_trim(generation_t transfer_gen, generation_t trim_gen)
+ void assign_and_reclaim(generation_t assign_gen, generation_t reclaim_gen)
{
- g.transferHoldLists(transfer_gen);
- g.trimHoldLists(trim_gen);
+ g.assign_generation(assign_gen);
+ g.reclaim(reclaim_gen);
}
};
@@ -345,7 +345,7 @@ TEST(RcuVectorTest, memory_allocator_can_be_set)
{
Fixture f;
EXPECT_EQ(AllocStats(2, 0), f.stats);
- f.transfer_and_trim(1, 2);
+ f.assign_and_reclaim(1, 2);
EXPECT_EQ(AllocStats(2, 1), f.stats);
}
@@ -355,7 +355,7 @@ TEST(RcuVectorTest, memory_allocator_is_preserved_across_reset)
f.arr.reset();
f.arr.reserve(100);
EXPECT_EQ(AllocStats(4, 1), f.stats);
- f.transfer_and_trim(1, 2);
+ f.assign_and_reclaim(1, 2);
EXPECT_EQ(AllocStats(4, 3), f.stats);
}
@@ -366,7 +366,7 @@ TEST(RcuVectorTest, created_replacement_vector_uses_same_memory_allocator)
EXPECT_EQ(AllocStats(2, 0), f.stats);
arr2.reserve(100);
EXPECT_EQ(AllocStats(3, 0), f.stats);
- f.transfer_and_trim(1, 2);
+ f.assign_and_reclaim(1, 2);
EXPECT_EQ(AllocStats(3, 1), f.stats);
}
@@ -377,7 +377,7 @@ TEST(RcuVectorTest, ensure_size_and_shrink_use_same_memory_allocator)
EXPECT_EQ(AllocStats(3, 0), f.stats);
f.arr.shrink(1000);
EXPECT_EQ(AllocStats(4, 0), f.stats);
- f.transfer_and_trim(1, 2);
+ f.assign_and_reclaim(1, 2);
EXPECT_EQ(AllocStats(4, 3), f.stats);
}
@@ -432,10 +432,10 @@ void
StressFixture::commit()
{
auto current_gen = generation_handler.getCurrentGeneration();
- g.transferHoldLists(current_gen);
+ g.assign_generation(current_gen);
generation_handler.incGeneration();
- auto first_used_gen = generation_handler.getFirstUsedGeneration();
- g.trimHoldLists(first_used_gen);
+ auto first_used_gen = generation_handler.get_oldest_used_generation();
+ g.reclaim(first_used_gen);
}
void
diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp
index c6d8886254d..81687b6e62d 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btree.hpp
@@ -20,7 +20,7 @@ BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::~BTree()
{
clear();
_alloc.freeze();
- _alloc.clearHoldLists();
+ _alloc.reclaim_all_memory();
}
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index 86c9621f869..77900edf848 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -101,7 +101,7 @@ public:
/**
* Try to free held nodes if nobody can be referencing them.
*/
- void trimHoldLists(generation_t usedGen);
+ void reclaim_memory(generation_t oldest_used_gen);
/**
* Transfer nodes from hold1 lists to hold2 lists, they are no
@@ -109,9 +109,9 @@ public:
* older versions of the frozen structure must leave before elements
* can be unheld.
*/
- void transferHoldLists(generation_t generation);
+ void assign_generation(generation_t current_gen);
- void clearHoldLists();
+ void reclaim_all_memory();
static bool isValidRef(BTreeNode::Ref ref) { return NodeStore::isValidRef(ref); }
@@ -164,14 +164,9 @@ public:
vespalib::string toString(const BTreeNode * node) const;
bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); }
- std::vector<uint32_t> startCompact() { return _nodeStore.startCompact(); }
std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); }
- void finishCompact(const std::vector<uint32_t> &toHold) {
- return _nodeStore.finishCompact(toHold);
- }
-
template <typename FunctionType>
void foreach_key(EntryRef ref, FunctionType func) const {
_nodeStore.foreach_key(ref, func);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
index 81262f560c7..a38b68afe73 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
@@ -34,7 +34,7 @@ BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
assert(_treeToFreeze.empty());
assert(_internalHoldUntilFreeze.empty());
assert(_leafHoldUntilFreeze.empty());
- DataStoreBase::MemStats stats = _nodeStore.getMemStats();
+ auto stats = _nodeStore.getMemStats();
assert(stats._usedBytes == stats._deadBytes);
assert(stats._holdBytes == 0);
(void) stats;
@@ -235,7 +235,7 @@ freeze()
InternalNodeType *inode = mapInternalRef(i);
(void) inode;
assert(inode->getFrozen());
- _nodeStore.freeElem(i);
+ _nodeStore.holdElem(i);
}
_internalHoldUntilFreeze.clear();
}
@@ -245,7 +245,7 @@ freeze()
LeafNodeType *lnode = mapLeafRef(i);
(void) lnode;
assert(lnode->getFrozen());
- _nodeStore.freeElem(i);
+ _nodeStore.holdElem(i);
}
_leafHoldUntilFreeze.clear();
}
@@ -266,18 +266,18 @@ template <typename KeyT, typename DataT, typename AggrT,
size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
void
BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
-trimHoldLists(generation_t usedGen)
+reclaim_memory(generation_t oldest_used_gen)
{
- _nodeStore.trimHoldLists(usedGen);
+ _nodeStore.reclaim_memory(oldest_used_gen);
}
template <typename KeyT, typename DataT, typename AggrT,
size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
void
BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
-transferHoldLists(generation_t generation)
+assign_generation(generation_t current_gen)
{
- _nodeStore.transferHoldLists(generation);
+ _nodeStore.assign_generation(current_gen);
}
@@ -285,9 +285,9 @@ template <typename KeyT, typename DataT, typename AggrT,
size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
void
BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
-clearHoldLists()
+reclaim_all_memory()
{
- _nodeStore.clearHoldLists();
+ _nodeStore.reclaim_all_memory();
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
index d05ec840f83..a63a4d20170 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
@@ -156,32 +156,24 @@ public:
_store.holdElem(ref, 1);
}
- void freeElem(EntryRef ref) {
- _store.freeElem(ref, 1);
- }
-
- std::vector<uint32_t> startCompact();
-
std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy);
- void finishCompact(const std::vector<uint32_t> &toHold);
-
- void transferHoldLists(generation_t generation) {
- _store.transferHoldLists(generation);
+ void assign_generation(generation_t current_gen) {
+ _store.assign_generation(current_gen);
}
// Inherit doc from DataStoreBase
- datastore::DataStoreBase::MemStats getMemStats() const {
+ datastore::MemoryStats getMemStats() const {
return _store.getMemStats();
}
// Inherit doc from DataStoreBase
- void trimHoldLists(generation_t usedGen) {
- _store.trimHoldLists(usedGen);
+ void reclaim_memory(generation_t oldest_used_gen) {
+ _store.reclaim_memory(oldest_used_gen);
}
- void clearHoldLists() {
- _store.clearHoldLists();
+ void reclaim_all_memory() {
+ _store.reclaim_all_memory();
}
// Inherit doc from DataStoreBase
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
index 0f9eeb9daec..a1ffb4d445d 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
@@ -55,20 +55,6 @@ BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
_store.dropBuffers(); // Drop buffers before type handlers are dropped
}
-
-template <typename KeyT, typename DataT, typename AggrT,
- size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
-std::vector<uint32_t>
-BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
-startCompact()
-{
- std::vector<uint32_t> iToHold = _store.startCompact(NODETYPE_INTERNAL);
- std::vector<uint32_t> lToHold = _store.startCompact(NODETYPE_LEAF);
- std::vector<uint32_t> ret = iToHold;
- ret.insert(ret.end(), lToHold.begin(), lToHold.end());
- return ret;
-}
-
template <typename KeyT, typename DataT, typename AggrT,
size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
std::unique_ptr<vespalib::datastore::CompactingBuffers>
@@ -78,15 +64,6 @@ start_compact_worst(const CompactionStrategy &compaction_strategy)
return _store.start_compact_worst_buffers(datastore::CompactionSpec(true, false), compaction_strategy);
}
-template <typename KeyT, typename DataT, typename AggrT,
- size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
-void
-BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
-finishCompact(const std::vector<uint32_t> &toHold)
-{
- _store.finishCompact(toHold);
-}
-
}
#define VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_INTERNALNODE(K, A, S) \
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index 54bc397175d..e5c55d5775d 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -332,25 +332,25 @@ public:
// Inherit doc from DataStoreBase
void
- trimHoldLists(generation_t usedGen)
+ reclaim_memory(generation_t oldest_used_gen)
{
- _allocator.trimHoldLists(usedGen);
- _store.trimHoldLists(usedGen);
+ _allocator.reclaim_memory(oldest_used_gen);
+ _store.reclaim_memory(oldest_used_gen);
}
// Inherit doc from DataStoreBase
void
- transferHoldLists(generation_t generation)
+ assign_generation(generation_t current_gen)
{
- _allocator.transferHoldLists(generation);
- _store.transferHoldLists(generation);
+ _allocator.assign_generation(current_gen);
+ _store.assign_generation(current_gen);
}
void
- clearHoldLists()
+ reclaim_all_memory()
{
- _allocator.clearHoldLists();
- _store.clearHoldLists();
+ _allocator.reclaim_all_memory();
+ _store.reclaim_all_memory();
}
diff --git a/vespalib/src/vespa/vespalib/coro/CMakeLists.txt b/vespalib/src/vespa/vespalib/coro/CMakeLists.txt
new file mode 100644
index 00000000000..d190c2e8ddc
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/coro/CMakeLists.txt
@@ -0,0 +1,5 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(vespalib_vespalib_coro OBJECT
+ SOURCES
+ DEPENDS
+)
diff --git a/vespalib/src/vespa/vespalib/coro/detached.h b/vespalib/src/vespa/vespalib/coro/detached.h
new file mode 100644
index 00000000000..5e3fa1452fa
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/coro/detached.h
@@ -0,0 +1,32 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <coroutine>
+#include <exception>
+
+namespace vespalib::coro {
+
+/**
+ * coroutine return type
+ *
+ * The coroutine is eager (will not suspend in initial_suspend) and
+ * self destroying (will not suspend in final_suspend). The return
+ * value gives no way of interacting with the coroutine. Without any
+ * co_await operations this acts similar to a normal subroutine. Note
+ * that letting a detached coroutine wait for a Lazy<T> will
+ * essentially attach it to the Lazy<T> as a continuation and resume
+ * it, but will require the Lazy<T> not to be deleted mid flight
+ * (started but not completed).
+ **/
+struct Detached {
+ struct promise_type {
+ Detached get_return_object() { return {}; }
+ static std::suspend_never initial_suspend() noexcept { return {}; }
+ static std::suspend_never final_suspend() noexcept { return {}; }
+ static void unhandled_exception() { std::terminate(); }
+ void return_void() noexcept {};
+ };
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/coro/lazy.h b/vespalib/src/vespa/vespalib/coro/lazy.h
new file mode 100644
index 00000000000..5a10c05bc24
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/coro/lazy.h
@@ -0,0 +1,111 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <concepts>
+#include <coroutine>
+#include <optional>
+#include <exception>
+#include <utility>
+
+namespace vespalib::coro {
+
+/**
+ * coroutine return type
+ *
+ * The coroutine is lazy (will suspend in initial_suspend) and
+ * destroyed from the outside (will suspend in final_suspend). Waiting
+ * for a Lazy<T> using co_await will use symmetric transfer to suspend
+ * the waiting coroutine and resume this one. The waiting coroutine
+ * is registered as a continuation and will be resumed again once the
+ * result is available (also using symmetric transfer). The result is
+ * assumed to be produced asynchronously. If you need to access it
+ * from the outside (in that specific thread); use sync_wait.
+ **/
+template <std::movable T>
+class [[nodiscard]] Lazy {
+public:
+ struct promise_type {
+ Lazy<T> get_return_object() { return Lazy(Handle::from_promise(*this)); }
+ static std::suspend_always initial_suspend() noexcept { return {}; }
+ static auto final_suspend() noexcept {
+ struct awaiter {
+ bool await_ready() const noexcept { return false; }
+ std::coroutine_handle<> await_suspend(Handle handle) const noexcept {
+ return handle.promise().waiter;
+ }
+ void await_resume() const noexcept {}
+ };
+ return awaiter();
+ }
+ template <typename RET>
+ requires std::is_convertible_v<RET&&,T>
+ void return_value(RET &&ret_value) noexcept(std::is_nothrow_constructible_v<T,RET&&>) {
+ value = std::forward<RET>(ret_value);
+ }
+ void unhandled_exception() noexcept {
+ exception = std::current_exception();
+ }
+ std::optional<T> value;
+ std::exception_ptr exception;
+ std::coroutine_handle<> waiter;
+ promise_type(promise_type &&) = delete;
+ promise_type(const promise_type &) = delete;
+ promise_type() noexcept : value(std::nullopt), exception(), waiter(std::noop_coroutine()) {}
+ T &result() & {
+ if (exception) {
+ std::rethrow_exception(exception);
+ }
+ return *value;
+ }
+ T &&result() && {
+ if (exception) {
+ std::rethrow_exception(exception);
+ }
+ return std::move(*value);
+ }
+ };
+ using Handle = std::coroutine_handle<promise_type>;
+
+private:
+ Handle _handle;
+
+ struct awaiter_base {
+ Handle handle;
+ awaiter_base(Handle handle_in) noexcept : handle(handle_in) {}
+ bool await_ready() const noexcept { return handle.done(); }
+ Handle await_suspend(std::coroutine_handle<> waiter) const noexcept {
+ handle.promise().waiter = waiter;
+ return handle;
+ }
+ };
+
+public:
+ Lazy(const Lazy &) = delete;
+ Lazy &operator=(const Lazy &) = delete;
+ explicit Lazy(Handle handle_in) noexcept : _handle(handle_in) {}
+ Lazy(Lazy &&rhs) noexcept : _handle(std::exchange(rhs._handle, nullptr)) {}
+ auto operator co_await() & noexcept {
+ struct awaiter : awaiter_base {
+ using awaiter_base::handle;
+ awaiter(Handle handle_in) noexcept : awaiter_base(handle_in) {}
+ decltype(auto) await_resume() const { return handle.promise().result(); }
+ };
+ return awaiter(_handle);
+ }
+ auto operator co_await() && noexcept {
+ struct awaiter : awaiter_base {
+ using awaiter_base::handle;
+ awaiter(Handle handle_in) noexcept : awaiter_base(handle_in) {}
+ decltype(auto) await_resume() const { return std::move(handle.promise()).result(); }
+ };
+ return awaiter(_handle);
+ }
+ ~Lazy() {
+ if (_handle) {
+ _handle.destroy();
+ }
+ }
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/coro/sync_wait.h b/vespalib/src/vespa/vespalib/coro/sync_wait.h
new file mode 100644
index 00000000000..bdea2dfc7f0
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/coro/sync_wait.h
@@ -0,0 +1,59 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "detached.h"
+#include "lazy.h"
+#include <vespa/vespalib/util/gate.h>
+
+#include <coroutine>
+#include <exception>
+
+namespace vespalib::coro {
+
+template <typename T, typename S>
+Detached signal_when_done(Lazy<T> &value, S &sink) {
+ try {
+ sink(co_await value);
+ } catch (...) {
+ sink(std::current_exception());
+ }
+}
+
+/**
+ * Wait for a lazy value to be calculated (note that waiting for a
+ * value will also start calculating it). Make sure the thread waiting
+ * is not needed in the calculation of the value, or you will end up
+ * with a deadlock.
+ **/
+template <typename T>
+T &sync_wait(Lazy<T> &value) {
+ struct MySink {
+ Gate gate;
+ T *result;
+ std::exception_ptr exception;
+ void operator()(T &result_in) {
+ result = &result_in;
+ gate.countDown();
+ }
+ void operator()(std::exception_ptr exception_in) {
+ exception = exception_in;
+ gate.countDown();
+ }
+ MySink() : gate(), result(nullptr), exception() {}
+ };
+ MySink sink;
+ signal_when_done(value, sink);
+ sink.gate.await();
+ if (sink.exception) {
+ std::rethrow_exception(sink.exception);
+ }
+ return *sink.result;
+}
+
+template <typename T>
+T &&sync_wait(Lazy<T> &&value) {
+ return std::move(sync_wait(value));
+}
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
index 9990e3f5764..f11004363f8 100644
--- a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
@@ -5,6 +5,7 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT
array_store_config.cpp
atomic_entry_ref.cpp
buffer_free_list.cpp
+ buffer_stats.cpp
buffer_type.cpp
bufferstate.cpp
compact_buffer_candidates.cpp
@@ -19,6 +20,7 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT
fixed_size_hash_map.cpp
free_list.cpp
large_array_buffer_type.cpp
+ memory_stats.cpp
sharded_hash_map.cpp
small_array_buffer_type.cpp
unique_store.cpp
diff --git a/vespalib/src/vespa/vespalib/datastore/allocator.hpp b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
index 9b69be49b8e..a65fd8a2352 100644
--- a/vespalib/src/vespa/vespalib/datastore/allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
@@ -28,7 +28,7 @@ Allocator<EntryT, RefT>::alloc(Args && ... args)
RefT ref(oldBufferSize, buffer_id);
EntryT *entry = _store.getEntry<EntryT>(ref);
new (static_cast<void *>(entry)) EntryT(std::forward<Args>(args)...);
- state.pushed_back(1);
+ state.stats().pushed_back(1);
return HandleType(ref, entry);
}
@@ -48,7 +48,7 @@ Allocator<EntryT, RefT>::allocArray(ConstArrayRef array)
for (size_t i = 0; i < array.size(); ++i) {
new (static_cast<void *>(buf + i)) EntryT(array[i]);
}
- state.pushed_back(array.size());
+ state.stats().pushed_back(array.size());
return HandleType(ref, buf);
}
@@ -68,7 +68,7 @@ Allocator<EntryT, RefT>::allocArray(size_t size)
for (size_t i = 0; i < size; ++i) {
new (static_cast<void *>(buf + i)) EntryT();
}
- state.pushed_back(size);
+ state.stats().pushed_back(size);
return HandleType(ref, buf);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.h b/vespalib/src/vespa/vespalib/datastore/array_store.h
index db037ee12fb..95d632d9603 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.h
@@ -10,6 +10,7 @@
#include "entryref.h"
#include "atomic_entry_ref.h"
#include "i_compaction_context.h"
+#include "i_compactable.h"
#include "large_array_buffer_type.h"
#include "small_array_buffer_type.h"
#include <vespa/vespalib/util/array.h>
@@ -28,7 +29,7 @@ namespace vespalib::datastore {
* The max value of maxSmallArrayTypeId is (2^bufferBits - 1).
*/
template <typename EntryT, typename RefT = EntryRefT<19>, typename TypeMapperT = ArrayStoreTypeMapper<EntryT> >
-class ArrayStore
+class ArrayStore : public ICompactable
{
public:
using AllocSpec = ArrayStoreConfig::AllocSpec;
@@ -66,7 +67,7 @@ private:
public:
ArrayStore(const ArrayStoreConfig &cfg, std::shared_ptr<alloc::MemoryAllocator> memory_allocator);
ArrayStore(const ArrayStoreConfig &cfg, std::shared_ptr<alloc::MemoryAllocator> memory_allocator, TypeMapper&& mapper);
- ~ArrayStore();
+ ~ArrayStore() override;
EntryRef add(const ConstArrayRef &array);
ConstArrayRef get(EntryRef ref) const {
if (!ref.valid()) {
@@ -104,6 +105,7 @@ public:
}
void remove(EntryRef ref);
+ EntryRef move_on_compact(EntryRef ref) override;
ICompactionContext::UP compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
@@ -114,8 +116,8 @@ public:
vespalib::AddressSpace addressSpaceUsage() const;
// Pass on hold list management to underlying store
- void transferHoldLists(generation_t generation) { _store.transferHoldLists(generation); }
- void trimHoldLists(generation_t firstUsed) { _store.trimHoldLists(firstUsed); }
+ void assign_generation(generation_t current_gen) { _store.assign_generation(current_gen); }
+ void reclaim_memory(generation_t oldest_used_gen) { _store.reclaim_memory(oldest_used_gen); }
vespalib::GenerationHolder &getGenerationHolder() { return _store.getGenerationHolder(); }
void setInitializing(bool initializing) { _store.setInitializing(initializing); }
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index e79398271fb..95f4a3c4155 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -4,13 +4,14 @@
#include "array_store.h"
#include "compacting_buffers.h"
+#include "compaction_context.h"
#include "compaction_spec.h"
-#include "entry_ref_filter.h"
#include "datastore.hpp"
+#include "entry_ref_filter.h"
#include "large_array_buffer_type.hpp"
#include "small_array_buffer_type.hpp"
-#include <atomic>
#include <algorithm>
+#include <atomic>
namespace vespalib::datastore {
@@ -57,7 +58,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::ArrayStore(const ArrayStoreConfig &cfg, s
template <typename EntryT, typename RefT, typename TypeMapperT>
ArrayStore<EntryT, RefT, TypeMapperT>::~ArrayStore()
{
- _store.clearHoldLists();
+ _store.reclaim_all_memory();
_store.dropBuffers();
}
@@ -114,7 +115,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::addLargeArray(const ConstArrayRef &array)
auto handle = _store.template freeListAllocator<LargeArray, NoOpReclaimer>(_largeArrayTypeId)
.alloc(array.cbegin(), array.cend());
auto& state = _store.getBufferState(RefT(handle.ref).bufferId());
- state.incExtraUsedBytes(sizeof(EntryT) * array.size());
+ state.stats().inc_extra_used_bytes(sizeof(EntryT) * array.size());
return handle.ref;
}
@@ -125,7 +126,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::allocate_large_array(size_t array_size)
using NoOpReclaimer = DefaultReclaimer<LargeArray>;
auto handle = _store.template freeListAllocator<LargeArray, NoOpReclaimer>(_largeArrayTypeId).alloc(array_size);
auto& state = _store.getBufferState(RefT(handle.ref).bufferId());
- state.incExtraUsedBytes(sizeof(EntryT) * array_size);
+ state.stats().inc_extra_used_bytes(sizeof(EntryT) * array_size);
return handle.ref;
}
@@ -145,38 +146,11 @@ ArrayStore<EntryT, RefT, TypeMapperT>::remove(EntryRef ref)
}
}
-namespace arraystore {
-
template <typename EntryT, typename RefT, typename TypeMapperT>
-class CompactionContext : public ICompactionContext {
-private:
- using ArrayStoreType = ArrayStore<EntryT, RefT, TypeMapperT>;
- ArrayStoreType &_store;
- std::unique_ptr<vespalib::datastore::CompactingBuffers> _compacting_buffers;
- EntryRefFilter _filter;
-
-public:
- CompactionContext(ArrayStoreType &store,
- std::unique_ptr<vespalib::datastore::CompactingBuffers> compacting_buffers)
- : _store(store),
- _compacting_buffers(std::move(compacting_buffers)),
- _filter(_compacting_buffers->make_entry_ref_filter())
- {
- }
- ~CompactionContext() override {
- _compacting_buffers->finish();
- }
- void compact(vespalib::ArrayRef<AtomicEntryRef> refs) override {
- for (auto &atomic_entry_ref : refs) {
- auto ref = atomic_entry_ref.load_relaxed();
- if (ref.valid() && _filter.has(ref)) {
- EntryRef newRef = _store.add(_store.get(ref));
- atomic_entry_ref.store_release(newRef);
- }
- }
- }
-};
-
+EntryRef
+ArrayStore<EntryT, RefT, TypeMapperT>::move_on_compact(EntryRef ref)
+{
+ return add(get(ref));
}
template <typename EntryT, typename RefT, typename TypeMapperT>
@@ -184,8 +158,7 @@ ICompactionContext::UP
ArrayStore<EntryT, RefT, TypeMapperT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy)
{
auto compacting_buffers = _store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
- return std::make_unique<arraystore::CompactionContext<EntryT, RefT, TypeMapperT>>
- (*this, std::move(compacting_buffers));
+ return std::make_unique<CompactionContext>(*this, std::move(compacting_buffers));
}
template <typename EntryT, typename RefT, typename TypeMapperT>
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
new file mode 100644
index 00000000000..8d97414626e
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
@@ -0,0 +1,57 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "buffer_stats.h"
+#include <cassert>
+
+namespace vespalib::datastore {
+
+BufferStats::BufferStats()
+ : _alloc_elems(0),
+ _used_elems(0),
+ _hold_elems(0),
+ _dead_elems(0),
+ _extra_used_bytes(0),
+ _extra_hold_bytes(0)
+{
+}
+
+void
+BufferStats::add_to_mem_stats(size_t element_size, MemoryStats& stats) const
+{
+ size_t extra_used = extra_used_bytes();
+ stats._allocElems += capacity();
+ stats._usedElems += size();
+ stats._deadElems += dead_elems();
+ stats._holdElems += hold_elems();
+ stats._allocBytes += (capacity() * element_size) + extra_used;
+ stats._usedBytes += (size() * element_size) + extra_used;
+ stats._deadBytes += dead_elems() * element_size;
+ stats._holdBytes += (hold_elems() * element_size) + extra_hold_bytes();
+}
+
+InternalBufferStats::InternalBufferStats()
+ : BufferStats()
+{
+}
+
+void
+InternalBufferStats::clear()
+{
+ _alloc_elems.store(0, std::memory_order_relaxed);
+ _used_elems.store(0, std::memory_order_relaxed);
+ _hold_elems.store(0, std::memory_order_relaxed);
+ _dead_elems.store(0, std::memory_order_relaxed);
+ _extra_used_bytes.store(0, std::memory_order_relaxed);
+ _extra_hold_bytes.store(0, std::memory_order_relaxed);
+}
+
+void
+InternalBufferStats::dec_hold_elems(size_t value)
+{
+ ElemCount elems = hold_elems();
+ assert(elems >= value);
+ _hold_elems.store(elems - value, std::memory_order_relaxed);
+}
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_stats.h b/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
new file mode 100644
index 00000000000..66f8b532c41
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
@@ -0,0 +1,76 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "buffer_type.h"
+#include "memory_stats.h"
+#include <atomic>
+
+namespace vespalib::datastore {
+
+/**
+ * Represents statistics for a given buffer in a data store.
+ */
+class BufferStats {
+protected:
+ // The number of elements that are allocated in the buffer.
+ std::atomic<ElemCount> _alloc_elems;
+ // The number of elements (of the allocated) that are used: _used_elems <= _alloc_elems.
+ std::atomic<ElemCount> _used_elems;
+ // The number of elements (of the used) that are on hold: _hold_elems <= _used_elems.
+ // "On hold" is a transitionary state used when removing elements.
+ std::atomic<ElemCount> _hold_elems;
+ // The number of elements (of the used) that are dead: _dead_elems <= _used_elems.
+ // A dead element was first on hold, and is now available for reuse in the free list (if enabled).
+ std::atomic<ElemCount> _dead_elems;
+
+ // Number of bytes that are heap allocated (and used) by elements that are stored in this buffer.
+ // For simple types this is always 0.
+ std::atomic<size_t> _extra_used_bytes;
+ // Number of bytes that are heap allocated (and used) by elements that are stored in this buffer and is now on hold.
+ // For simple types this is always 0.
+ std::atomic<size_t> _extra_hold_bytes;
+
+public:
+ BufferStats();
+
+ size_t size() const { return _used_elems.load(std::memory_order_relaxed); }
+ size_t capacity() const { return _alloc_elems.load(std::memory_order_relaxed); }
+ size_t remaining() const { return capacity() - size(); }
+
+ void pushed_back(size_t num_elems) {
+ _used_elems.store(size() + num_elems, std::memory_order_relaxed);
+ }
+
+ size_t dead_elems() const { return _dead_elems.load(std::memory_order_relaxed); }
+ size_t hold_elems() const { return _hold_elems.load(std::memory_order_relaxed); }
+ size_t extra_used_bytes() const { return _extra_used_bytes.load(std::memory_order_relaxed); }
+ size_t extra_hold_bytes() const { return _extra_hold_bytes.load(std::memory_order_relaxed); }
+
+ void inc_extra_used_bytes(size_t value) { _extra_used_bytes.store(extra_used_bytes() + value, std::memory_order_relaxed); }
+
+ void add_to_mem_stats(size_t element_size, MemoryStats& stats) const;
+};
+
+/**
+ * Provides low-level access to buffer stats for integration in BufferState.
+ */
+class InternalBufferStats : public BufferStats {
+public:
+ InternalBufferStats();
+ void clear();
+ void set_alloc_elems(size_t value) { _alloc_elems.store(value, std::memory_order_relaxed); }
+ void set_dead_elems(size_t value) { _dead_elems.store(value, std::memory_order_relaxed); }
+ void set_hold_elems(size_t value) { _hold_elems.store(value, std::memory_order_relaxed); }
+ void inc_dead_elems(size_t value) { _dead_elems.store(dead_elems() + value, std::memory_order_relaxed); }
+ void inc_hold_elems(size_t value) { _hold_elems.store(hold_elems() + value, std::memory_order_relaxed); }
+ void dec_hold_elems(size_t value);
+ void inc_extra_hold_bytes(size_t value) { _extra_hold_bytes.store(extra_hold_bytes() + value, std::memory_order_relaxed); }
+ std::atomic<ElemCount>& used_elems_ref() { return _used_elems; }
+ std::atomic<ElemCount>& dead_elems_ref() { return _dead_elems; }
+ std::atomic<size_t>& extra_used_bytes_ref() { return _extra_used_bytes; }
+ std::atomic<size_t>& extra_hold_bytes_ref() { return _extra_hold_bytes; }
+};
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
index d24d8336131..45a94693eeb 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
@@ -11,13 +11,8 @@ using vespalib::alloc::MemoryAllocator;
namespace vespalib::datastore {
BufferState::BufferState()
- : _usedElems(0),
- _allocElems(0),
- _deadElems(0u),
- _holdElems(0u),
- _extraUsedBytes(0),
- _extraHoldBytes(0),
- _free_list(_deadElems),
+ : _stats(),
+ _free_list(_stats.dead_elems_ref()),
_typeHandler(nullptr),
_buffer(Alloc::alloc(0, MemoryAllocator::HUGEPAGE_SIZE)),
_arraySize(0),
@@ -33,14 +28,7 @@ BufferState::~BufferState()
assert(getState() == State::FREE);
assert(!_free_list.enabled());
assert(_free_list.empty());
- assert(_holdElems == 0);
-}
-
-void
-BufferState::decHoldElems(size_t value) {
- ElemCount hold_elems = getHoldElems();
- assert(hold_elems >= value);
- _holdElems.store(hold_elems - value, std::memory_order_relaxed);
+ assert(_stats.hold_elems() == 0);
}
namespace {
@@ -100,10 +88,10 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
assert(_typeHandler == nullptr);
assert(capacity() == 0);
assert(size() == 0);
- assert(getDeadElems() == 0u);
- assert(getHoldElems() == 0);
- assert(getExtraUsedBytes() == 0);
- assert(getExtraHoldBytes() == 0);
+ assert(_stats.dead_elems() == 0u);
+ assert(_stats.hold_elems() == 0);
+ assert(_stats.extra_used_bytes() == 0);
+ assert(_stats.extra_hold_bytes() == 0);
assert(_free_list.empty());
size_t reservedElements = typeHandler->getReservedElements(bufferId);
@@ -115,14 +103,15 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
_buffer.create(alloc.bytes).swap(_buffer);
assert(_buffer.get() != nullptr || alloc.elements == 0u);
buffer.store(_buffer.get(), std::memory_order_release);
- _allocElems.store(alloc.elements, std::memory_order_relaxed);
+ _stats.set_alloc_elems(alloc.elements);
_typeHandler.store(typeHandler, std::memory_order_release);
assert(typeId <= std::numeric_limits<uint16_t>::max());
_typeId = typeId;
_arraySize = typeHandler->getArraySize();
_free_list.set_array_size(_arraySize);
_state.store(State::ACTIVE, std::memory_order_release);
- typeHandler->onActive(bufferId, &_usedElems, &_deadElems, buffer.load(std::memory_order::relaxed));
+ typeHandler->onActive(bufferId, &_stats.used_elems_ref(), &_stats.dead_elems_ref(),
+ buffer.load(std::memory_order::relaxed));
}
void
@@ -132,11 +121,11 @@ BufferState::onHold(uint32_t buffer_id)
assert(getTypeHandler() != nullptr);
_state.store(State::HOLD, std::memory_order_release);
_compacting = false;
- assert(getDeadElems() <= size());
- assert(getHoldElems() <= (size() - getDeadElems()));
- _deadElems.store(0, std::memory_order_relaxed);
- _holdElems.store(size(), std::memory_order_relaxed); // Put everyting on hold
- getTypeHandler()->onHold(buffer_id, &_usedElems, &_deadElems);
+ assert(_stats.dead_elems() <= size());
+ assert(_stats.hold_elems() <= (size() - _stats.dead_elems()));
+ _stats.set_dead_elems(0);
+ _stats.set_hold_elems(size());
+ getTypeHandler()->onHold(buffer_id, &_stats.used_elems_ref(), &_stats.dead_elems_ref());
_free_list.disable();
}
@@ -146,18 +135,13 @@ BufferState::onFree(std::atomic<void*>& buffer)
assert(buffer.load(std::memory_order_relaxed) == _buffer.get());
assert(getState() == State::HOLD);
assert(_typeHandler != nullptr);
- assert(getDeadElems() <= size());
- assert(getHoldElems() == size() - getDeadElems());
+ assert(_stats.dead_elems() <= size());
+ assert(_stats.hold_elems() == (size() - _stats.dead_elems()));
getTypeHandler()->destroyElements(buffer, size());
Alloc::alloc().swap(_buffer);
getTypeHandler()->onFree(size());
buffer.store(nullptr, std::memory_order_release);
- _usedElems.store(0, std::memory_order_relaxed);
- _allocElems.store(0, std::memory_order_relaxed);
- _deadElems.store(0, std::memory_order_relaxed);
- _holdElems.store(0, std::memory_order_relaxed);
- _extraUsedBytes.store(0, std::memory_order_relaxed);
- _extraHoldBytes.store(0, std::memory_order_relaxed);
+ _stats.clear();
_state.store(State::FREE, std::memory_order_release);
_typeHandler = nullptr;
_arraySize = 0;
@@ -192,6 +176,36 @@ BufferState::disableElemHoldList()
_disableElemHoldList = true;
}
+bool
+BufferState::hold_elems(size_t num_elems, size_t extra_bytes)
+{
+ assert(isActive());
+ if (_disableElemHoldList) {
+ // The elements are directly marked as dead as they are not put on hold.
+ _stats.inc_dead_elems(num_elems);
+ return true;
+ }
+ _stats.inc_hold_elems(num_elems);
+ _stats.inc_extra_hold_bytes(extra_bytes);
+ return false;
+}
+
+void
+BufferState::free_elems(EntryRef ref, size_t num_elems, size_t ref_offset)
+{
+ if (isActive()) {
+ if (_free_list.enabled() && (num_elems == getArraySize())) {
+ _free_list.push_entry(ref);
+ }
+ } else {
+ assert(isOnHold());
+ }
+ _stats.inc_dead_elems(num_elems);
+ _stats.dec_hold_elems(num_elems);
+ getTypeHandler()->cleanHold(_buffer.get(), (ref_offset * _arraySize), num_elems,
+ BufferTypeBase::CleanContext(_stats.extra_used_bytes_ref(),
+ _stats.extra_hold_bytes_ref()));
+}
void
BufferState::fallbackResize(uint32_t bufferId,
@@ -211,13 +225,13 @@ BufferState::fallbackResize(uint32_t bufferId,
std::atomic_thread_fence(std::memory_order_release);
_buffer = std::move(newBuffer);
buffer.store(_buffer.get(), std::memory_order_release);
- _allocElems.store(alloc.elements, std::memory_order_relaxed);
+ _stats.set_alloc_elems(alloc.elements);
}
void
BufferState::resume_primary_buffer(uint32_t buffer_id)
{
- getTypeHandler()->resume_primary_buffer(buffer_id, &_usedElems, &_deadElems);
+ getTypeHandler()->resume_primary_buffer(buffer_id, &_stats.used_elems_ref(), &_stats.dead_elems_ref());
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index 8f32a93b487..3f023b41c51 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -3,6 +3,7 @@
#pragma once
#include "buffer_free_list.h"
+#include "buffer_stats.h"
#include "buffer_type.h"
#include "entryref.h"
#include <vespa/vespalib/util/generationhandler.h>
@@ -38,17 +39,7 @@ public:
};
private:
- std::atomic<ElemCount> _usedElems;
- std::atomic<ElemCount> _allocElems;
- std::atomic<ElemCount> _deadElems;
- std::atomic<ElemCount> _holdElems;
- // Number of bytes that are heap allocated by elements that are stored in this buffer.
- // For simple types this is 0.
- std::atomic<size_t> _extraUsedBytes;
- // Number of bytes that are heap allocated by elements that are stored in this buffer and is now on hold.
- // For simple types this is 0.
- std::atomic<size_t> _extraHoldBytes;
-
+ InternalBufferStats _stats;
BufferFreeList _free_list;
std::atomic<BufferTypeBase*> _typeHandler;
Alloc _buffer;
@@ -91,36 +82,38 @@ public:
*/
void onFree(std::atomic<void*>& buffer);
-
/**
- * Disable hold of elements, just mark then as dead without cleanup.
+ * Disable hold of elements, just mark elements as dead without cleanup.
* Typically used when tearing down data structure in a controlled manner.
*/
void disableElemHoldList();
- BufferFreeList& free_list() { return _free_list; }
- const BufferFreeList& free_list() const { return _free_list; }
+ /**
+ * Update stats to reflect that the given elements are put on hold.
+ * Returns true if element hold list is disabled for this buffer.
+ */
+ bool hold_elems(size_t num_elems, size_t extra_bytes);
- size_t size() const { return _usedElems.load(std::memory_order_relaxed); }
- size_t capacity() const { return _allocElems.load(std::memory_order_relaxed); }
- size_t remaining() const { return capacity() - size(); }
- void pushed_back(size_t numElems) {
- pushed_back(numElems, 0);
- }
- void pushed_back(size_t numElems, size_t extraBytes) {
- _usedElems.store(size() + numElems, std::memory_order_relaxed);
- _extraUsedBytes.store(getExtraUsedBytes() + extraBytes, std::memory_order_relaxed);
- }
- void cleanHold(void *buffer, size_t offset, ElemCount numElems) {
- getTypeHandler()->cleanHold(buffer, offset, numElems, BufferTypeBase::CleanContext(_extraUsedBytes, _extraHoldBytes));
- }
+ /**
+ * Free the given elements and update stats accordingly.
+ *
+ * The given entry ref is put on the free list (if enabled).
+ * Hold cleaning of elements is executed on the buffer type.
+ */
+ void free_elems(EntryRef ref, size_t num_elems, size_t ref_offset);
+
+ BufferStats& stats() { return _stats; }
+ const BufferStats& stats() const { return _stats; }
+
+ void enable_free_list(FreeList& type_free_list) { _free_list.enable(type_free_list); }
+ void disable_free_list() { _free_list.disable(); }
+
+ size_t size() const { return _stats.size(); }
+ size_t capacity() const { return _stats.capacity(); }
+ size_t remaining() const { return _stats.remaining(); }
void dropBuffer(uint32_t buffer_id, std::atomic<void*>& buffer);
uint32_t getTypeId() const { return _typeId; }
uint32_t getArraySize() const { return _arraySize; }
- size_t getDeadElems() const { return _deadElems.load(std::memory_order_relaxed); }
- size_t getHoldElems() const { return _holdElems.load(std::memory_order_relaxed); }
- size_t getExtraUsedBytes() const { return _extraUsedBytes.load(std::memory_order_relaxed); }
- size_t getExtraHoldBytes() const { return _extraHoldBytes.load(std::memory_order_relaxed); }
bool getCompacting() const { return _compacting; }
void setCompacting() { _compacting = true; }
uint32_t get_used_arrays() const noexcept { return size() / _arraySize; }
@@ -136,15 +129,6 @@ public:
const BufferTypeBase *getTypeHandler() const { return _typeHandler.load(std::memory_order_relaxed); }
BufferTypeBase *getTypeHandler() { return _typeHandler.load(std::memory_order_relaxed); }
- void incDeadElems(size_t value) { _deadElems.store(getDeadElems() + value, std::memory_order_relaxed); }
- void incHoldElems(size_t value) { _holdElems.store(getHoldElems() + value, std::memory_order_relaxed); }
- void decHoldElems(size_t value);
- void incExtraUsedBytes(size_t value) { _extraUsedBytes.store(getExtraUsedBytes() + value, std::memory_order_relaxed); }
- void incExtraHoldBytes(size_t value) {
- _extraHoldBytes.store(getExtraHoldBytes() + value, std::memory_order_relaxed);
- }
-
- bool hasDisabledElemHoldList() const { return _disableElemHoldList; }
void resume_primary_buffer(uint32_t buffer_id);
};
diff --git a/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp
index 41c216a9684..dd47a159e9b 100644
--- a/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp
@@ -13,7 +13,7 @@ CompactBufferCandidates::CompactBufferCandidates(uint32_t num_buffers, uint32_t
_max_buffers(std::max(max_buffers, 1u)),
_active_buffers_ratio(std::min(1.0, std::max(0.0001, active_buffers_ratio))),
_ratio(ratio),
- _slack(slack),
+ _slack(_ratio == 0.0 ? 0u : slack),
_free_buffers(0)
{
_candidates.reserve(num_buffers);
diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp b/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp
index 65e028119a2..1ce6401605e 100644
--- a/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/compaction_context.cpp
@@ -25,7 +25,7 @@ CompactionContext::compact(vespalib::ArrayRef<AtomicEntryRef> refs)
for (auto &atomic_entry_ref : refs) {
auto ref = atomic_entry_ref.load_relaxed();
if (ref.valid() && _filter.has(ref)) {
- EntryRef newRef = _store.move(ref);
+ EntryRef newRef = _store.move_on_compact(ref);
atomic_entry_ref.store_release(newRef);
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp
index 2dbd501f78e..4eb4ff16864 100644
--- a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/util/memoryusage.h>
#include <vespa/vespalib/util/address_space.h>
#include <iostream>
+#include <limits>
namespace vespalib::datastore {
@@ -34,4 +35,10 @@ std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_
return os;
}
+CompactionStrategy
+CompactionStrategy::make_compact_all_active_buffers_strategy()
+{
+ return CompactionStrategy(0.0, 0.0, std::numeric_limits<uint32_t>::max(), 1.0);
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h
index 2bcf30fc6fc..f78e123e5de 100644
--- a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h
+++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h
@@ -74,6 +74,7 @@ public:
bool should_compact_memory(const MemoryUsage& memory_usage) const;
bool should_compact_address_space(const AddressSpace& address_space) const;
CompactionSpec should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const;
+ static CompactionStrategy make_compact_all_active_buffers_strategy();
};
std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy);
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.cpp b/vespalib/src/vespa/vespalib/datastore/datastore.cpp
index 15686a9f79d..76d622f3704 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.cpp
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "datastore.hpp"
-#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/util/rcuvector.hpp>
namespace vespalib::datastore {
@@ -10,7 +9,6 @@ template class DataStoreT<EntryRefT<22> >;
}
-template void vespalib::Array<vespalib::datastore::DataStoreBase::ElemHold1ListElem>::increase(size_t);
template class vespalib::RcuVector<vespalib::datastore::EntryRef>;
template class vespalib::RcuVectorBase<vespalib::datastore::EntryRef>;
template class vespalib::RcuVector<vespalib::datastore::AtomicEntryRef>;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.h b/vespalib/src/vespa/vespalib/datastore/datastore.h
index 86c39b547d3..95f47e98ef5 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.h
@@ -27,7 +27,7 @@ template <typename RefT = EntryRefT<22> >
class DataStoreT : public DataStoreBase
{
private:
- void free_elem_internal(EntryRef ref, size_t numElems, bool was_held);
+ void free_elem_internal(EntryRef ref, size_t numElems);
public:
typedef RefT RefType;
@@ -38,22 +38,6 @@ public:
~DataStoreT() override;
/**
- * Increase number of dead elements in buffer.
- *
- * @param ref Reference to dead stored features
- * @param dead Number of newly dead elements
- */
- void incDead(EntryRef ref, size_t deadElems) {
- RefType intRef(ref);
- DataStoreBase::incDead(intRef.bufferId(), deadElems);
- }
-
- /**
- * Free element(s).
- */
- void freeElem(EntryRef ref, size_t numElems) { free_elem_internal(ref, numElems, false); }
-
- /**
* Hold element(s).
*/
void holdElem(EntryRef ref, size_t numElems) {
@@ -61,14 +45,9 @@ public:
}
void holdElem(EntryRef ref, size_t numElems, size_t extraBytes);
- /**
- * Trim elem hold list, freeing elements that no longer needs to be held.
- *
- * @param usedGen lowest generation that is still used.
- */
- void trimElemHoldList(generation_t usedGen) override;
+ void reclaim_entry_refs(generation_t oldest_used_gen) override;
- void clearElemHoldList() override;
+ void reclaim_all_entry_refs() override;
bool getCompacting(EntryRef ref) const {
return getBufferState(RefType(ref).bufferId()).getCompacting();
@@ -97,7 +76,6 @@ class DataStore : public DataStoreT<RefT>
protected:
typedef DataStoreT<RefT> ParentType;
using ParentType::ensureBufferCapacity;
- using ParentType::_primary_buffer_ids;
using ParentType::getEntry;
using ParentType::dropBuffers;
using ParentType::init_primary_buffers;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
index 5b8df719915..bfb63954875 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
@@ -7,7 +7,7 @@
#include "free_list_allocator.hpp"
#include "free_list_raw_allocator.hpp"
#include "raw_allocator.hpp"
-#include <vespa/vespalib/util/array.hpp>
+#include <vespa/vespalib/util/generation_hold_list.hpp>
namespace vespalib::datastore {
@@ -22,23 +22,11 @@ DataStoreT<RefT>::~DataStoreT() = default;
template <typename RefT>
void
-DataStoreT<RefT>::free_elem_internal(EntryRef ref, size_t numElems, bool was_held)
+DataStoreT<RefT>::free_elem_internal(EntryRef ref, size_t numElems)
{
RefType intRef(ref);
BufferState &state = getBufferState(intRef.bufferId());
- if (state.isActive()) {
- if (state.free_list().enabled() && (numElems == state.getArraySize())) {
- state.free_list().push_entry(ref);
- }
- } else {
- assert(state.isOnHold() && was_held);
- }
- state.incDeadElems(numElems);
- if (was_held) {
- state.decHoldElems(numElems);
- }
- state.cleanHold(getBuffer(intRef.bufferId()),
- intRef.offset() * state.getArraySize(), numElems);
+ state.free_elems(ref, numElems, intRef.offset());
}
template <typename RefT>
@@ -47,48 +35,27 @@ DataStoreT<RefT>::holdElem(EntryRef ref, size_t numElems, size_t extraBytes)
{
RefType intRef(ref);
BufferState &state = getBufferState(intRef.bufferId());
- assert(state.isActive());
- if (state.hasDisabledElemHoldList()) {
- state.incDeadElems(numElems);
- return;
+ if (!state.hold_elems(numElems, extraBytes)) {
+ _entry_ref_hold_list.insert({ref, numElems});
}
- _elemHold1List.push_back(ElemHold1ListElem(ref, numElems));
- state.incHoldElems(numElems);
- state.incExtraHoldBytes(extraBytes);
}
template <typename RefT>
void
-DataStoreT<RefT>::trimElemHoldList(generation_t usedGen)
+DataStoreT<RefT>::reclaim_entry_refs(generation_t oldest_used_gen)
{
- ElemHold2List &elemHold2List = _elemHold2List;
-
- ElemHold2List::iterator it(elemHold2List.begin());
- ElemHold2List::iterator ite(elemHold2List.end());
- uint32_t freed = 0;
- for (; it != ite; ++it) {
- if (static_cast<sgeneration_t>(it->_generation - usedGen) >= 0)
- break;
- free_elem_internal(it->_ref, it->_len, true);
- ++freed;
- }
- if (freed != 0) {
- elemHold2List.erase(elemHold2List.begin(), it);
- }
+ _entry_ref_hold_list.reclaim(oldest_used_gen, [this](const auto& elem) {
+ free_elem_internal(elem.ref, elem.num_elems);
+ });
}
template <typename RefT>
void
-DataStoreT<RefT>::clearElemHoldList()
+DataStoreT<RefT>::reclaim_all_entry_refs()
{
- ElemHold2List &elemHold2List = _elemHold2List;
-
- ElemHold2List::iterator it(elemHold2List.begin());
- ElemHold2List::iterator ite(elemHold2List.end());
- for (; it != ite; ++it) {
- free_elem_internal(it->_ref, it->_len, true);
- }
- elemHold2List.clear();
+ _entry_ref_hold_list.reclaim_all([this](const auto& elem) {
+ free_elem_internal(elem.ref, elem.num_elems);
+ });
}
template <typename RefT>
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index 67749ee913d..a14082e2d5c 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -5,7 +5,7 @@
#include "compacting_buffers.h"
#include "compaction_spec.h"
#include "compaction_strategy.h"
-#include <vespa/vespalib/util/array.hpp>
+#include <vespa/vespalib/util/generation_hold_list.hpp>
#include <vespa/vespalib/util/stringfmt.h>
#include <algorithm>
#include <limits>
@@ -16,6 +16,10 @@ LOG_SETUP(".vespalib.datastore.datastorebase");
using vespalib::GenerationHeldBase;
+namespace vespalib {
+template class GenerationHoldList<datastore::DataStoreBase::EntryRefHoldElem, false, true>;
+}
+
namespace vespalib::datastore {
namespace {
@@ -36,7 +40,7 @@ constexpr size_t TOO_DEAD_SLACK = 0x4000u;
bool
primary_buffer_too_dead(const BufferState &state)
{
- size_t deadElems = state.getDeadElems();
+ size_t deadElems = state.stats().dead_elems();
size_t deadBytes = deadElems * state.getArraySize();
return ((deadBytes >= TOO_DEAD_SLACK) && (deadElems * 2 >= state.size()));
}
@@ -88,8 +92,7 @@ DataStoreBase::DataStoreBase(uint32_t numBuffers, uint32_t offset_bits, size_t m
_free_lists(),
_freeListsEnabled(false),
_initializing(false),
- _elemHold1List(),
- _elemHold2List(),
+ _entry_ref_hold_list(),
_numBuffers(numBuffers),
_offset_bits(offset_bits),
_hold_buffer_count(0u),
@@ -102,9 +105,6 @@ DataStoreBase::DataStoreBase(uint32_t numBuffers, uint32_t offset_bits, size_t m
DataStoreBase::~DataStoreBase()
{
disableFreeLists();
-
- assert(_elemHold1List.empty());
- assert(_elemHold2List.empty());
}
void
@@ -221,22 +221,10 @@ DataStoreBase::addType(BufferTypeBase *typeHandler)
}
void
-DataStoreBase::transferElemHoldList(generation_t generation)
-{
- ElemHold2List &elemHold2List = _elemHold2List;
- for (const ElemHold1ListElem & elemHold1 : _elemHold1List) {
- elemHold2List.push_back(ElemHold2ListElem(elemHold1, generation));
- }
- _elemHold1List.clear();
-}
-
-void
-DataStoreBase::transferHoldLists(generation_t generation)
+DataStoreBase::assign_generation(generation_t current_gen)
{
- _genHolder.transferHoldLists(generation);
- if (hasElemHold1()) {
- transferElemHoldList(generation);
- }
+ _genHolder.assign_generation(current_gen);
+ _entry_ref_hold_list.assign_generation(current_gen);
}
void
@@ -248,18 +236,18 @@ DataStoreBase::doneHoldBuffer(uint32_t bufferId)
}
void
-DataStoreBase::trimHoldLists(generation_t usedGen)
+DataStoreBase::reclaim_memory(generation_t oldest_used_gen)
{
- trimElemHoldList(usedGen); // Trim entries before trimming buffers
- _genHolder.trimHoldLists(usedGen);
+ reclaim_entry_refs(oldest_used_gen); // Trim entries before trimming buffers
+ _genHolder.reclaim(oldest_used_gen);
}
void
-DataStoreBase::clearHoldLists()
+DataStoreBase::reclaim_all_memory()
{
- transferElemHoldList(0);
- clearElemHoldList();
- _genHolder.clearHoldLists();
+ _entry_ref_hold_list.assign_generation(0);
+ reclaim_all_entry_refs();
+ _genHolder.reclaim_all();
}
void
@@ -269,13 +257,13 @@ DataStoreBase::dropBuffers()
for (uint32_t bufferId = 0; bufferId < numBuffers; ++bufferId) {
_states[bufferId].dropBuffer(bufferId, _buffers[bufferId].get_atomic_buffer());
}
- _genHolder.clearHoldLists();
+ _genHolder.reclaim_all();
}
vespalib::MemoryUsage
DataStoreBase::getMemoryUsage() const
{
- MemStats stats = getMemStats();
+ auto stats = getMemStats();
vespalib::MemoryUsage usage;
usage.setAllocatedBytes(stats._allocBytes);
usage.setUsedBytes(stats._usedBytes);
@@ -289,18 +277,18 @@ DataStoreBase::holdBuffer(uint32_t bufferId)
{
_states[bufferId].onHold(bufferId);
size_t holdBytes = 0u; // getMemStats() still accounts held buffers
- GenerationHeldBase::UP hold(new BufferHold(holdBytes, *this, bufferId));
- _genHolder.hold(std::move(hold));
+ auto hold = std::make_unique<BufferHold>(holdBytes, *this, bufferId);
+ _genHolder.insert(std::move(hold));
}
void
DataStoreBase::enableFreeLists()
{
- for (BufferState & bState : _states) {
+ for (auto& bState : _states) {
if (!bState.isActive() || bState.getCompacting()) {
continue;
}
- bState.free_list().enable(_free_lists[bState.getTypeId()]);
+ bState.enable_free_list(_free_lists[bState.getTypeId()]);
}
_freeListsEnabled = true;
}
@@ -308,8 +296,8 @@ DataStoreBase::enableFreeLists()
void
DataStoreBase::disableFreeLists()
{
- for (BufferState & bState : _states) {
- bState.free_list().disable();
+ for (auto& bState : _states) {
+ bState.disable_free_list();
}
_freeListsEnabled = false;
}
@@ -321,17 +309,11 @@ DataStoreBase::enableFreeList(uint32_t bufferId)
if (_freeListsEnabled &&
state.isActive() &&
!state.getCompacting()) {
- state.free_list().enable(_free_lists[state.getTypeId()]);
+ state.enable_free_list(_free_lists[state.getTypeId()]);
}
}
void
-DataStoreBase::disableFreeList(uint32_t bufferId)
-{
- _states[bufferId].free_list().disable();
-}
-
-void
DataStoreBase::disableElemHoldList()
{
for (auto &state : _states) {
@@ -341,47 +323,29 @@ DataStoreBase::disableElemHoldList()
}
}
-namespace {
-
-void
-add_buffer_state_to_mem_stats(const BufferState& state, size_t elementSize, DataStoreBase::MemStats& stats)
-{
- size_t extra_used_bytes = state.getExtraUsedBytes();
- stats._allocElems += state.capacity();
- stats._usedElems += state.size();
- stats._deadElems += state.getDeadElems();
- stats._holdElems += state.getHoldElems();
- stats._allocBytes += (state.capacity() * elementSize) + extra_used_bytes;
- stats._usedBytes += (state.size() * elementSize) + extra_used_bytes;
- stats._deadBytes += state.getDeadElems() * elementSize;
- stats._holdBytes += (state.getHoldElems() * elementSize) + state.getExtraHoldBytes();
-}
-
-}
-
-DataStoreBase::MemStats
+MemoryStats
DataStoreBase::getMemStats() const
{
- MemStats stats;
+ MemoryStats stats;
- for (const BufferState & bState: _states) {
+ for (const auto& bState: _states) {
auto typeHandler = bState.getTypeHandler();
- BufferState::State state = bState.getState();
+ auto state = bState.getState();
if ((state == BufferState::State::FREE) || (typeHandler == nullptr)) {
++stats._freeBuffers;
} else if (state == BufferState::State::ACTIVE) {
size_t elementSize = typeHandler->elementSize();
++stats._activeBuffers;
- add_buffer_state_to_mem_stats(bState, elementSize, stats);
+ bState.stats().add_to_mem_stats(elementSize, stats);
} else if (state == BufferState::State::HOLD) {
size_t elementSize = typeHandler->elementSize();
++stats._holdBuffers;
- add_buffer_state_to_mem_stats(bState, elementSize, stats);
+ bState.stats().add_to_mem_stats(elementSize, stats);
} else {
LOG_ABORT("should not be reached");
}
}
- size_t genHolderHeldBytes = _genHolder.getHeldBytes();
+ size_t genHolderHeldBytes = _genHolder.get_held_bytes();
stats._holdBytes += genHolderHeldBytes;
stats._allocBytes += genHolderHeldBytes;
stats._usedBytes += genHolderHeldBytes;
@@ -394,11 +358,11 @@ DataStoreBase::getAddressSpaceUsage() const
size_t usedArrays = 0;
size_t deadArrays = 0;
size_t limitArrays = 0;
- for (const BufferState & bState: _states) {
+ for (const auto& bState: _states) {
if (bState.isActive()) {
uint32_t arraySize = bState.getArraySize();
usedArrays += bState.size() / arraySize;
- deadArrays += bState.getDeadElems() / arraySize;
+ deadArrays += bState.stats().dead_elems() / arraySize;
limitArrays += bState.capacity() / arraySize;
} else if (bState.isOnHold()) {
uint32_t arraySize = bState.getArraySize();
@@ -410,7 +374,7 @@ DataStoreBase::getAddressSpaceUsage() const
LOG_ABORT("should not be reached");
}
}
- return vespalib::AddressSpace(usedArrays, deadArrays, limitArrays);
+ return {usedArrays, deadArrays, limitArrays};
}
void
@@ -427,26 +391,6 @@ DataStoreBase::onActive(uint32_t bufferId, uint32_t typeId, size_t elemsNeeded)
enableFreeList(bufferId);
}
-std::vector<uint32_t>
-DataStoreBase::startCompact(uint32_t typeId)
-{
- std::vector<uint32_t> toHold;
-
- for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) {
- BufferState &state = getBufferState(bufferId);
- if (state.isActive() &&
- state.getTypeId() == typeId &&
- !state.getCompacting()) {
- state.setCompacting();
- toHold.push_back(bufferId);
- disableFreeList(bufferId);
- }
- }
- switch_primary_buffer(typeId, 0u);
- inc_compaction_count();
- return toHold;
-}
-
void
DataStoreBase::finishCompact(const std::vector<uint32_t> &toHold)
{
@@ -467,52 +411,14 @@ DataStoreBase::fallbackResize(uint32_t bufferId, size_t elemsNeeded)
state.fallbackResize(bufferId, elemsNeeded,
_buffers[bufferId].get_atomic_buffer(),
toHoldBuffer);
- GenerationHeldBase::UP
- hold(new FallbackHold(oldAllocElems * elementSize,
- std::move(toHoldBuffer),
- oldUsedElems,
- state.getTypeHandler(),
- state.getTypeId()));
+ auto hold = std::make_unique<FallbackHold>(oldAllocElems * elementSize,
+ std::move(toHoldBuffer),
+ oldUsedElems,
+ state.getTypeHandler(),
+ state.getTypeId());
if (!_initializing) {
- _genHolder.hold(std::move(hold));
- }
-}
-
-uint32_t
-DataStoreBase::startCompactWorstBuffer(uint32_t typeId)
-{
- uint32_t buffer_id = get_primary_buffer_id(typeId);
- const BufferTypeBase *typeHandler = _typeHandlers[typeId];
- assert(typeHandler->get_active_buffers_count() >= 1u);
- if (typeHandler->get_active_buffers_count() == 1u) {
- // Single active buffer for type, no need for scan
- markCompacting(buffer_id);
- return buffer_id;
- }
- // Multiple active buffers for type, must perform full scan
- return startCompactWorstBuffer(buffer_id,
- [=](const BufferState &state) { return state.isActive(typeId); });
-}
-
-template <typename BufferStateActiveFilter>
-uint32_t
-DataStoreBase::startCompactWorstBuffer(uint32_t initWorstBufferId, BufferStateActiveFilter &&filterFunc)
-{
- uint32_t worstBufferId = initWorstBufferId;
- size_t worstDeadElems = 0;
- for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) {
- const auto &state = getBufferState(bufferId);
- if (filterFunc(state)) {
- assert(!state.getCompacting());
- size_t deadElems = state.getDeadElems() - state.getTypeHandler()->getReservedElements(bufferId);
- if (deadElems > worstDeadElems) {
- worstBufferId = bufferId;
- worstDeadElems = deadElems;
- }
- }
+ _genHolder.insert(std::move(hold));
}
- markCompacting(worstBufferId);
- return worstBufferId;
}
void
@@ -527,7 +433,7 @@ DataStoreBase::markCompacting(uint32_t bufferId)
assert(!state.getCompacting());
state.setCompacting();
state.disableElemHoldList();
- state.free_list().disable();
+ state.disable_free_list();
inc_compaction_count();
}
@@ -535,9 +441,15 @@ std::unique_ptr<CompactingBuffers>
DataStoreBase::start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
// compact memory usage
- CompactBufferCandidates elem_buffers(_numBuffers, compaction_strategy.get_max_buffers(), compaction_strategy.get_active_buffers_ratio(), compaction_strategy.getMaxDeadBytesRatio() / 2, CompactionStrategy::DEAD_BYTES_SLACK);
+ CompactBufferCandidates elem_buffers(_numBuffers, compaction_strategy.get_max_buffers(),
+ compaction_strategy.get_active_buffers_ratio(),
+ compaction_strategy.getMaxDeadBytesRatio() / 2,
+ CompactionStrategy::DEAD_BYTES_SLACK);
// compact address space
- CompactBufferCandidates array_buffers(_numBuffers, compaction_strategy.get_max_buffers(), compaction_strategy.get_active_buffers_ratio(), compaction_strategy.getMaxDeadAddressSpaceRatio() / 2, CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK);
+ CompactBufferCandidates array_buffers(_numBuffers, compaction_strategy.get_max_buffers(),
+ compaction_strategy.get_active_buffers_ratio(),
+ compaction_strategy.getMaxDeadAddressSpaceRatio() / 2,
+ CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK);
uint32_t free_buffers = 0;
for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) {
const auto &state = getBufferState(bufferId);
@@ -546,7 +458,7 @@ DataStoreBase::start_compact_worst_buffers(CompactionSpec compaction_spec, const
uint32_t arraySize = typeHandler->getArraySize();
uint32_t reservedElements = typeHandler->getReservedElements(bufferId);
size_t used_elems = state.size();
- size_t deadElems = state.getDeadElems() - reservedElements;
+ size_t deadElems = state.stats().dead_elems() - reservedElements;
if (compaction_spec.compact_memory()) {
elem_buffers.add(bufferId, used_elems, deadElems);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index d83b3a84847..598f0872253 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -4,12 +4,14 @@
#include "bufferstate.h"
#include "free_list.h"
+#include "memory_stats.h"
#include <vespa/vespalib/util/address_space.h>
#include <vespa/vespalib/util/generationholder.h>
+#include <vespa/vespalib/util/generation_hold_list.h>
#include <vespa/vespalib/util/memoryusage.h>
-#include <vector>
-#include <deque>
#include <atomic>
+#include <deque>
+#include <vector>
namespace vespalib::datastore {
@@ -24,25 +26,19 @@ class CompactionStrategy;
*/
class DataStoreBase
{
-public:
- /**
- * Hold list before freeze, before knowing how long elements must be held.
- */
- class ElemHold1ListElem
- {
- public:
- EntryRef _ref;
- size_t _len; // Aligned length
-
- ElemHold1ListElem(EntryRef ref, size_t len)
- : _ref(ref),
- _len(len)
- { }
+protected:
+ struct EntryRefHoldElem {
+ EntryRef ref;
+ size_t num_elems;
+
+ EntryRefHoldElem(EntryRef ref_in, size_t num_elems_in)
+ : ref(ref_in),
+ num_elems(num_elems_in)
+ {}
};
-protected:
+ using EntryRefHoldList = GenerationHoldList<EntryRefHoldElem, false, true>;
using generation_t = vespalib::GenerationHandler::generation_t;
- using sgeneration_t = vespalib::GenerationHandler::sgeneration_t;
private:
class BufferAndTypeId {
@@ -59,32 +55,16 @@ private:
uint32_t _typeId;
};
std::vector<BufferAndTypeId> _buffers; // For fast mapping with known types
-protected:
+
// Provides a mapping from typeId -> primary buffer for that type.
// The primary buffer is used for allocations of new element(s) if no available slots are found in free lists.
std::vector<uint32_t> _primary_buffer_ids;
+protected:
void* getBuffer(uint32_t bufferId) { return _buffers[bufferId].get_buffer_relaxed(); }
/**
- * Hold list at freeze, when knowing how long elements must be held
- */
- class ElemHold2ListElem : public ElemHold1ListElem
- {
- public:
- generation_t _generation;
-
- ElemHold2ListElem(const ElemHold1ListElem &hold1, generation_t generation)
- : ElemHold1ListElem(hold1),
- _generation(generation)
- { }
- };
-
- using ElemHold1List = vespalib::Array<ElemHold1ListElem>;
- using ElemHold2List = std::deque<ElemHold2ListElem>;
-
- /**
- * Class used to hold the old buffer as part of fallbackResize().
+ * Class used to hold the entire old buffer as part of fallbackResize().
*/
class FallbackHold : public vespalib::GenerationHeldBase
{
@@ -102,52 +82,6 @@ protected:
class BufferHold;
-public:
- class MemStats
- {
- public:
- size_t _allocElems;
- size_t _usedElems;
- size_t _deadElems;
- size_t _holdElems;
- size_t _allocBytes;
- size_t _usedBytes;
- size_t _deadBytes;
- size_t _holdBytes;
- uint32_t _freeBuffers;
- uint32_t _activeBuffers;
- uint32_t _holdBuffers;
-
- MemStats()
- : _allocElems(0),
- _usedElems(0),
- _deadElems(0),
- _holdElems(0),
- _allocBytes(0),
- _usedBytes(0),
- _deadBytes(0),
- _holdBytes(0),
- _freeBuffers(0),
- _activeBuffers(0),
- _holdBuffers(0)
- { }
-
- MemStats& operator+=(const MemStats &rhs) {
- _allocElems += rhs._allocElems;
- _usedElems += rhs._usedElems;
- _deadElems += rhs._deadElems;
- _holdElems += rhs._holdElems;
- _allocBytes += rhs._allocBytes;
- _usedBytes += rhs._usedBytes;
- _deadBytes += rhs._deadBytes;
- _holdBytes += rhs._holdBytes;
- _freeBuffers += rhs._freeBuffers;
- _activeBuffers += rhs._activeBuffers;
- _holdBuffers += rhs._holdBuffers;
- return *this;
- }
- };
-
private:
std::vector<BufferState> _states;
protected:
@@ -156,10 +90,7 @@ protected:
std::vector<FreeList> _free_lists;
bool _freeListsEnabled;
bool _initializing;
-
- ElemHold1List _elemHold1List;
- ElemHold2List _elemHold2List;
-
+ EntryRefHoldList _entry_ref_hold_list;
const uint32_t _numBuffers;
const uint32_t _offset_bits;
uint32_t _hold_buffer_count;
@@ -174,6 +105,7 @@ protected:
virtual ~DataStoreBase();
+private:
/**
* Get the next buffer id after the given buffer id.
*/
@@ -183,6 +115,7 @@ protected:
ret = 0;
return ret;
}
+protected:
/**
* Get the primary buffer for the given type id.
@@ -194,15 +127,14 @@ protected:
/**
* Trim elem hold list, freeing elements that no longer needs to be held.
*
- * @param usedGen lowest generation that is still used.
+ * @param oldest_used_gen the oldest generation that is still used.
*/
- virtual void trimElemHoldList(generation_t usedGen) = 0;
+ virtual void reclaim_entry_refs(generation_t oldest_used_gen) = 0;
- virtual void clearElemHoldList() = 0;
+ virtual void reclaim_all_entry_refs() = 0;
- template <typename BufferStateActiveFilter>
- uint32_t startCompactWorstBuffer(uint32_t initWorstBufferId, BufferStateActiveFilter &&filterFunc);
void markCompacting(uint32_t bufferId);
+
public:
uint32_t addType(BufferTypeBase *typeHandler);
void init_primary_buffers();
@@ -238,9 +170,11 @@ public:
*/
void switch_primary_buffer(uint32_t typeId, size_t elemsNeeded);
+private:
bool consider_grow_active_buffer(uint32_t type_id, size_t elems_needed);
void switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded);
+public:
vespalib::MemoryUsage getMemoryUsage() const;
vespalib::AddressSpace getAddressSpaceUsage() const;
@@ -252,31 +186,28 @@ public:
const BufferState &getBufferState(uint32_t bufferId) const { return _states[bufferId]; }
BufferState &getBufferState(uint32_t bufferId) { return _states[bufferId]; }
uint32_t getNumBuffers() const { return _numBuffers; }
- bool hasElemHold1() const { return !_elemHold1List.empty(); }
-
- /**
- * Transfer element holds from hold1 list to hold2 list.
- */
- void transferElemHoldList(generation_t generation);
+public:
/**
- * Transfer holds from hold1 to hold2 lists, assigning generation.
+ * Assign generation on data elements on hold lists added since the last time this function was called.
*/
- void transferHoldLists(generation_t generation);
+ void assign_generation(generation_t current_gen);
+private:
/**
* Hold of buffer has ended.
*/
void doneHoldBuffer(uint32_t bufferId);
+public:
/**
- * Trim hold lists, freeing buffers that no longer needs to be held.
+ * Reclaim memory from hold lists, freeing buffers and entry refs that no longer needs to be held.
*
- * @param usedGen lowest generation that is still used.
+ * @param oldest_used_gen oldest generation that is still used.
*/
- void trimHoldLists(generation_t usedGen);
+ void reclaim_memory(generation_t oldest_used_gen);
- void clearHoldLists();
+ void reclaim_all_memory();
template <typename EntryType, typename RefType>
EntryType *getEntry(RefType ref) {
@@ -300,12 +231,6 @@ public:
void dropBuffers();
-
- void incDead(uint32_t bufferId, size_t deadElems) {
- BufferState &state = _states[bufferId];
- state.incDeadElems(deadElems);
- }
-
/**
* Enable free list management.
* This only works for fixed size elements.
@@ -317,16 +242,14 @@ public:
*/
void disableFreeLists();
+private:
/**
* Enable free list management.
* This only works for fixed size elements.
*/
void enableFreeList(uint32_t bufferId);
- /**
- * Disable free list management.
- */
- void disableFreeList(uint32_t bufferId);
+public:
void disableElemHoldList();
bool has_free_lists_enabled() const { return _freeListsEnabled; }
@@ -341,7 +264,7 @@ public:
/**
* Returns aggregated memory statistics for all buffers in this data store.
*/
- MemStats getMemStats() const;
+ MemoryStats getMemStats() const;
/**
* Assume that no readers are present while data structure is being initialized.
@@ -359,16 +282,18 @@ private:
void onActive(uint32_t bufferId, uint32_t typeId, size_t elemsNeeded);
void inc_hold_buffer_count();
+
public:
uint32_t getTypeId(uint32_t bufferId) const {
return _buffers[bufferId].getTypeId();
}
- std::vector<uint32_t> startCompact(uint32_t typeId);
-
void finishCompact(const std::vector<uint32_t> &toHold);
+
+private:
void fallbackResize(uint32_t bufferId, size_t elementsNeeded);
+public:
vespalib::GenerationHolder &getGenerationHolder() {
return _genHolder;
}
@@ -378,7 +303,6 @@ public:
return self._genHolder;
}
- uint32_t startCompactWorstBuffer(uint32_t typeId);
std::unique_ptr<CompactingBuffers> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy);
uint64_t get_compaction_count() const { return _compaction_count.load(std::memory_order_relaxed); }
void inc_compaction_count() const { ++_compaction_count; }
@@ -386,3 +310,7 @@ public:
};
}
+
+namespace vespalib {
+extern template class GenerationHoldList<datastore::DataStoreBase::EntryRefHoldElem, false, true>;
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
index 6f001ce3c94..47c64722785 100644
--- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
@@ -5,10 +5,15 @@
#include "entry_ref_filter.h"
#include "i_compactable.h"
#include <vespa/vespalib/util/array.hpp>
+#include <vespa/vespalib/util/generation_hold_list.hpp>
#include <vespa/vespalib/util/memoryusage.h>
#include <cassert>
#include <stdexcept>
+namespace vespalib {
+template class GenerationHoldList<uint32_t, false, true>;
+}
+
namespace vespalib::datastore {
FixedSizeHashMap::Node::Node(Node&&)
@@ -30,8 +35,7 @@ FixedSizeHashMap::FixedSizeHashMap(uint32_t modulo, uint32_t capacity, uint32_t
_free_head(no_node_idx),
_free_count(0u),
_hold_count(0u),
- _hold_1_list(),
- _hold_2_list(),
+ _hold_list(),
_num_shards(num_shards)
{
_nodes.reserve(capacity);
@@ -49,7 +53,10 @@ FixedSizeHashMap::FixedSizeHashMap(uint32_t modulo, uint32_t capacity, uint32_t
}
}
-FixedSizeHashMap::~FixedSizeHashMap() = default;
+FixedSizeHashMap::~FixedSizeHashMap()
+{
+ _hold_list.reclaim_all();
+}
void
FixedSizeHashMap::force_add(const EntryComparator& comp, const KvType& kv)
@@ -96,37 +103,6 @@ FixedSizeHashMap::add(const ShardedHashComparator & comp, std::function<EntryRef
return _nodes[node_idx].get_kv();
}
-void
-FixedSizeHashMap::transfer_hold_lists_slow(generation_t generation)
-{
- auto &hold_2_list = _hold_2_list;
- for (uint32_t node_idx : _hold_1_list) {
- hold_2_list.push_back(std::make_pair(generation, node_idx));
- }
- _hold_1_list.clear();
-
-}
-
-
-void
-FixedSizeHashMap::trim_hold_lists_slow(generation_t first_used)
-{
- while (!_hold_2_list.empty()) {
- auto& first = _hold_2_list.front();
- if (static_cast<sgeneration_t>(first.first - first_used) >= 0) {
- break;
- }
- uint32_t node_idx = first.second;
- auto& node = _nodes[node_idx];
- node.get_next_node_idx().store(_free_head, std::memory_order_relaxed);
- _free_head = node_idx;
- ++_free_count;
- --_hold_count;
- node.on_free();
- _hold_2_list.erase(_hold_2_list.begin());
- }
-}
-
FixedSizeHashMap::KvType*
FixedSizeHashMap::remove(const ShardedHashComparator & comp)
{
@@ -145,7 +121,7 @@ FixedSizeHashMap::remove(const ShardedHashComparator & comp)
}
--_count;
++_hold_count;
- _hold_1_list.push_back(node_idx);
+ _hold_list.insert(node_idx);
return &_nodes[node_idx].get_kv();
}
prev_node_idx = node_idx;
@@ -154,6 +130,19 @@ FixedSizeHashMap::remove(const ShardedHashComparator & comp)
return nullptr;
}
+void
+FixedSizeHashMap::reclaim_memory(generation_t oldest_used_gen)
+{
+ _hold_list.reclaim(oldest_used_gen, [this](uint32_t node_idx) {
+ auto& node = _nodes[node_idx];
+ node.get_next_node_idx().store(_free_head, std::memory_order_relaxed);
+ _free_head = node_idx;
+ ++_free_count;
+ --_hold_count;
+ node.on_free();
+ });
+}
+
MemoryUsage
FixedSizeHashMap::get_memory_usage() const
{
@@ -183,7 +172,7 @@ FixedSizeHashMap::foreach_key(const std::function<void(EntryRef)>& callback) con
}
void
-FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers)
+FixedSizeHashMap::move_keys_on_compact(ICompactable& compactable, const EntryRefFilter &compacting_buffers)
{
for (auto& chain_head : _chain_heads) {
uint32_t node_idx = chain_head.load_relaxed();
@@ -192,7 +181,7 @@ FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &com
EntryRef old_ref = node.get_kv().first.load_relaxed();
assert(old_ref.valid());
if (compacting_buffers.has(old_ref)) {
- EntryRef new_ref = compactable.move(old_ref);
+ EntryRef new_ref = compactable.move_on_compact(old_ref);
node.get_kv().first.store_release(new_ref);
}
node_idx = node.get_next_node_idx().load(std::memory_order_relaxed);
diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
index c522bcc3c33..1e13f206adb 100644
--- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
@@ -6,11 +6,12 @@
#include "entry_comparator.h"
#include <vespa/vespalib/util/array.h>
#include <vespa/vespalib/util/arrayref.h>
+#include <vespa/vespalib/util/generation_hold_list.h>
#include <vespa/vespalib/util/generationhandler.h>
-#include <limits>
#include <atomic>
#include <deque>
#include <functional>
+#include <limits>
namespace vespalib {
class GenerationHolder;
@@ -56,8 +57,8 @@ private:
* A reader must own an appropriate GenerationHandler::Guard to ensure
* that memory is held while it can be accessed by reader.
*
- * The writer must update generation and call transfer_hold_lists and
- * trim_hold_lists as needed to free up memory no longer needed by any
+ * The writer must update generation and call assign_generation and
+ * reclaim_memory as needed to free up memory no longer needed by any
* readers.
*/
class FixedSizeHashMap {
@@ -65,7 +66,6 @@ public:
static constexpr uint32_t no_node_idx = std::numeric_limits<uint32_t>::max();
using KvType = std::pair<AtomicEntryRef, AtomicEntryRef>;
using generation_t = GenerationHandler::generation_t;
- using sgeneration_t = GenerationHandler::sgeneration_t;
private:
class ChainHead {
std::atomic<uint32_t> _node_idx;
@@ -103,6 +103,8 @@ private:
const KvType& get_kv() const noexcept { return _kv; }
};
+ using NodeIdxHoldList = GenerationHoldList<uint32_t, false, true>;
+
Array<ChainHead> _chain_heads;
Array<Node> _nodes;
uint32_t _modulo;
@@ -110,12 +112,9 @@ private:
uint32_t _free_head;
uint32_t _free_count;
uint32_t _hold_count;
- Array<uint32_t> _hold_1_list;
- std::deque<std::pair<generation_t, uint32_t>> _hold_2_list;
+ NodeIdxHoldList _hold_list;
uint32_t _num_shards;
- void transfer_hold_lists_slow(generation_t generation);
- void trim_hold_lists_slow(generation_t first_used);
void force_add(const EntryComparator& comp, const KvType& kv);
public:
FixedSizeHashMap(uint32_t module, uint32_t capacity, uint32_t num_shards);
@@ -143,23 +142,17 @@ public:
return nullptr;
}
- void transfer_hold_lists(generation_t generation) {
- if (!_hold_1_list.empty()) {
- transfer_hold_lists_slow(generation);
- }
+ void assign_generation(generation_t current_gen) {
+ _hold_list.assign_generation(current_gen);
}
- void trim_hold_lists(generation_t first_used) {
- if (!_hold_2_list.empty() && static_cast<sgeneration_t>(_hold_2_list.front().first - first_used) < 0) {
- trim_hold_lists_slow(first_used);
- }
- }
+ void reclaim_memory(generation_t oldest_used_gen);
bool full() const noexcept { return _nodes.size() == _nodes.capacity() && _free_count == 0u; }
size_t size() const noexcept { return _count; }
MemoryUsage get_memory_usage() const;
void foreach_key(const std::function<void(EntryRef)>& callback) const;
- void move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers);
+ void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter &compacting_buffers);
/*
* Scan dictionary and call normalize function for each value. If
* returned value is different then write back the modified value to
@@ -182,3 +175,7 @@ public:
};
}
+
+namespace vespalib {
+extern template class GenerationHoldList<uint32_t, false, true>;
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/free_list.cpp b/vespalib/src/vespa/vespalib/datastore/free_list.cpp
index 6c96e51241c..44e49b68df9 100644
--- a/vespalib/src/vespa/vespalib/datastore/free_list.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/free_list.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "free_list.h"
+#include <algorithm>
#include <cassert>
namespace vespalib::datastore {
diff --git a/vespalib/src/vespa/vespalib/datastore/i_compactable.h b/vespalib/src/vespa/vespalib/datastore/i_compactable.h
index 069d32bb481..31c082e4371 100644
--- a/vespalib/src/vespa/vespalib/datastore/i_compactable.h
+++ b/vespalib/src/vespa/vespalib/datastore/i_compactable.h
@@ -8,12 +8,13 @@ namespace vespalib::datastore {
* Interface for moving an entry as part of compaction of data in old
* buffers into new buffers.
*
- * Old entry is unchanged and not placed on any hold lists since we
- * expect the old buffers to be freed soon anyway.
+ * A copy of the old entry is created and a reference to the new copy is
+ * returned. The old entry is unchanged and not placed on any hold
+ * lists since we expect the old buffers to be freed soon anyway.
*/
struct ICompactable {
virtual ~ICompactable() = default;
- virtual EntryRef move(EntryRef ref) = 0;
+ virtual EntryRef move_on_compact(EntryRef ref) = 0;
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
index bb105d41519..5a75a30d182 100644
--- a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
+++ b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
@@ -25,12 +25,12 @@ public:
using generation_t = vespalib::GenerationHandler::generation_t;
virtual ~IUniqueStoreDictionary() = default;
virtual void freeze() = 0;
- virtual void transfer_hold_lists(generation_t generation) = 0;
- virtual void trim_hold_lists(generation_t firstUsed) = 0;
+ virtual void assign_generation(generation_t current_gen) = 0;
+ virtual void reclaim_memory(generation_t oldest_used_gen) = 0;
virtual UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) = 0;
virtual EntryRef find(const EntryComparator& comp) = 0;
virtual void remove(const EntryComparator& comp, EntryRef ref) = 0;
- virtual void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0;
+ virtual void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0;
virtual uint32_t get_num_uniques() const = 0;
virtual vespalib::MemoryUsage get_memory_usage() const = 0;
virtual void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) = 0;
diff --git a/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp b/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
new file mode 100644
index 00000000000..8e060b4cfb4
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
@@ -0,0 +1,40 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "memory_stats.h"
+
+namespace vespalib::datastore {
+
+MemoryStats::MemoryStats()
+ : _allocElems(0),
+ _usedElems(0),
+ _deadElems(0),
+ _holdElems(0),
+ _allocBytes(0),
+ _usedBytes(0),
+ _deadBytes(0),
+ _holdBytes(0),
+ _freeBuffers(0),
+ _activeBuffers(0),
+ _holdBuffers(0)
+{
+}
+
+MemoryStats&
+MemoryStats::operator+=(const MemoryStats& rhs)
+{
+ _allocElems += rhs._allocElems;
+ _usedElems += rhs._usedElems;
+ _deadElems += rhs._deadElems;
+ _holdElems += rhs._holdElems;
+ _allocBytes += rhs._allocBytes;
+ _usedBytes += rhs._usedBytes;
+ _deadBytes += rhs._deadBytes;
+ _holdBytes += rhs._holdBytes;
+ _freeBuffers += rhs._freeBuffers;
+ _activeBuffers += rhs._activeBuffers;
+ _holdBuffers += rhs._holdBuffers;
+ return *this;
+}
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/datastore/memory_stats.h b/vespalib/src/vespa/vespalib/datastore/memory_stats.h
new file mode 100644
index 00000000000..18d7dd77559
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/memory_stats.h
@@ -0,0 +1,32 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <cstddef>
+#include <cstdint>
+
+namespace vespalib::datastore {
+
+/**
+ * Represents aggregated memory statistics for all buffers in a data store.
+ */
+class MemoryStats
+{
+public:
+ size_t _allocElems;
+ size_t _usedElems;
+ size_t _deadElems;
+ size_t _holdElems;
+ size_t _allocBytes;
+ size_t _usedBytes;
+ size_t _deadBytes;
+ size_t _holdBytes;
+ uint32_t _freeBuffers;
+ uint32_t _activeBuffers;
+ uint32_t _holdBuffers;
+
+ MemoryStats();
+ MemoryStats& operator+=(const MemoryStats& rhs);
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
index 0d67bf71c20..7395ef68a73 100644
--- a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
@@ -28,7 +28,7 @@ RawAllocator<EntryT, RefT>::alloc(size_t numElems, size_t extraElems)
assert((numElems % arraySize) == 0u);
RefT ref((oldBufferSize / arraySize), buffer_id);
EntryT *buffer = _store.getEntryArray<EntryT>(ref, arraySize);
- state.pushed_back(numElems);
+ state.stats().pushed_back(numElems);
return HandleType(ref, buffer);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
index 2ae22084472..a28c3071646 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
@@ -32,7 +32,7 @@ ShardedHashMap::ShardedHashMap(std::unique_ptr<const EntryComparator> comp)
ShardedHashMap::~ShardedHashMap()
{
- _gen_holder.clearHoldLists();
+ _gen_holder.reclaim_all();
for (size_t i = 0; i < num_shards; ++i) {
auto map = _maps[i].load(std::memory_order_relaxed);
delete map;
@@ -58,7 +58,7 @@ ShardedHashMap::hold_shard(std::unique_ptr<const FixedSizeHashMap> map)
{
auto usage = map->get_memory_usage();
auto hold = std::make_unique<ShardedHashMapShardHeld>(usage.allocatedBytes(), std::move(map));
- _gen_holder.hold(std::move(hold));
+ _gen_holder.insert(std::move(hold));
}
ShardedHashMap::KvType&
@@ -107,27 +107,27 @@ ShardedHashMap::find(const EntryComparator& comp, EntryRef key_ref) const
}
void
-ShardedHashMap::transfer_hold_lists(generation_t generation)
+ShardedHashMap::assign_generation(generation_t current_gen)
{
for (size_t i = 0; i < num_shards; ++i) {
auto map = _maps[i].load(std::memory_order_relaxed);
if (map != nullptr) {
- map->transfer_hold_lists(generation);
+ map->assign_generation(current_gen);
}
}
- _gen_holder.transferHoldLists(generation);
+ _gen_holder.assign_generation(current_gen);
}
void
-ShardedHashMap::trim_hold_lists(generation_t first_used)
+ShardedHashMap::reclaim_memory(generation_t oldest_used_gen)
{
for (size_t i = 0; i < num_shards; ++i) {
auto map = _maps[i].load(std::memory_order_relaxed);
if (map != nullptr) {
- map->trim_hold_lists(first_used);
+ map->reclaim_memory(oldest_used_gen);
}
}
- _gen_holder.trimHoldLists(first_used);
+ _gen_holder.reclaim(oldest_used_gen);
}
size_t
@@ -153,7 +153,7 @@ ShardedHashMap::get_memory_usage() const
memory_usage.merge(map->get_memory_usage());
}
}
- size_t gen_holder_held_bytes = _gen_holder.getHeldBytes();
+ size_t gen_holder_held_bytes = _gen_holder.get_held_bytes();
memory_usage.incAllocatedBytes(gen_holder_held_bytes);
memory_usage.incAllocatedBytesOnHold(gen_holder_held_bytes);
return memory_usage;
@@ -171,12 +171,12 @@ ShardedHashMap::foreach_key(std::function<void(EntryRef)> callback) const
}
void
-ShardedHashMap::move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers)
+ShardedHashMap::move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers)
{
for (size_t i = 0; i < num_shards; ++i) {
auto map = _maps[i].load(std::memory_order_relaxed);
if (map != nullptr) {
- map->move_keys(compactable, compacting_buffers);
+ map->move_keys_on_compact(compactable, compacting_buffers);
}
}
}
@@ -222,7 +222,7 @@ ShardedHashMap::foreach_value(std::function<void(const std::vector<EntryRef>&)>
bool
ShardedHashMap::has_held_buffers() const
{
- return _gen_holder.getHeldBytes() != 0;
+ return _gen_holder.get_held_bytes() != 0;
}
void
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
index e0ba9488351..572a8790828 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
@@ -28,8 +28,8 @@ struct ICompactable;
* A reader must own an appropriate GenerationHandler::Guard to ensure
* that memory is held while it can be accessed by reader.
*
- * The writer must update generation and call transfer_hold_lists and
- * trim_hold_lists as needed to free up memory no longer needed by any
+ * The writer must update generation and call assign_generation and
+ * reclaim_memory as needed to free up memory no longer needed by any
* readers.
*/
class ShardedHashMap {
@@ -52,13 +52,13 @@ public:
KvType* remove(const EntryComparator& comp, EntryRef key_ref);
KvType* find(const EntryComparator& comp, EntryRef key_ref);
const KvType* find(const EntryComparator& comp, EntryRef key_ref) const;
- void transfer_hold_lists(generation_t generation);
- void trim_hold_lists(generation_t first_used);
+ void assign_generation(generation_t current_gen);
+ void reclaim_memory(generation_t oldest_used_gen);
size_t size() const noexcept;
const EntryComparator &get_default_comparator() const noexcept { return *_comp; }
MemoryUsage get_memory_usage() const;
void foreach_key(std::function<void(EntryRef)> callback) const;
- void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers);
+ void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers);
bool normalize_values(std::function<EntryRef(EntryRef)> normalize);
bool normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter);
void foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter);
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h
index e7c374985a7..1313d57fbab 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h
@@ -70,8 +70,8 @@ public:
inline const DataStoreType& get_data_store() const noexcept { return _allocator.get_data_store(); }
// Pass on hold list management to underlying store
- void transferHoldLists(generation_t generation);
- void trimHoldLists(generation_t firstUsed);
+ void assign_generation(generation_t current_gen);
+ void reclaim_memory(generation_t oldest_used_gen);
vespalib::GenerationHolder &getGenerationHolder() { return _store.getGenerationHolder(); }
void setInitializing(bool initializing) { _store.setInitializing(initializing); }
void freeze();
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index 37a56bf2561..b8493017020 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -109,20 +109,20 @@ private:
}
}
- EntryRef move(EntryRef oldRef) override {
+ EntryRef move_on_compact(EntryRef oldRef) override {
RefT iRef(oldRef);
uint32_t buffer_id = iRef.bufferId();
auto &inner_mapping = _mapping[buffer_id];
assert(iRef.offset() < inner_mapping.size());
EntryRef &mappedRef = inner_mapping[iRef.offset()];
assert(!mappedRef.valid());
- EntryRef newRef = _store.move(oldRef);
+ EntryRef newRef = _store.move_on_compact(oldRef);
mappedRef = newRef;
return newRef;
}
void fillMapping() {
- _dict.move_keys(*this, _filter);
+ _dict.move_keys_on_compact(*this, _filter);
}
public:
@@ -190,18 +190,18 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::bufferState(EntryRef ref) const
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
void
-UniqueStore<EntryT, RefT, Compare, Allocator>::transferHoldLists(generation_t generation)
+UniqueStore<EntryT, RefT, Compare, Allocator>::assign_generation(generation_t current_gen)
{
- _dict->transfer_hold_lists(generation);
- _store.transferHoldLists(generation);
+ _dict->assign_generation(current_gen);
+ _store.assign_generation(current_gen);
}
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
void
-UniqueStore<EntryT, RefT, Compare, Allocator>::trimHoldLists(generation_t firstUsed)
+UniqueStore<EntryT, RefT, Compare, Allocator>::reclaim_memory(generation_t oldest_used_gen)
{
- _dict->trim_hold_lists(firstUsed);
- _store.trimHoldLists(firstUsed);
+ _dict->reclaim_memory(oldest_used_gen);
+ _store.reclaim_memory(oldest_used_gen);
}
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h
index 04df88ab4b9..0f6d9ddfc9b 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h
@@ -35,7 +35,7 @@ public:
~UniqueStoreAllocator() override;
EntryRef allocate(const EntryType& value);
void hold(EntryRef ref);
- EntryRef move(EntryRef ref) override;
+ EntryRef move_on_compact(EntryRef ref) override;
const WrappedEntryType& get_wrapped(EntryRef ref) const {
RefType iRef(ref);
return *_store.template getEntry<WrappedEntryType>(iRef);
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
index 04a229d4ffa..8ad11b18218 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
@@ -28,7 +28,7 @@ UniqueStoreAllocator<EntryT, RefT>::UniqueStoreAllocator(std::shared_ptr<alloc::
template <typename EntryT, typename RefT>
UniqueStoreAllocator<EntryT, RefT>::~UniqueStoreAllocator()
{
- _store.clearHoldLists();
+ _store.reclaim_all_memory();
_store.dropBuffers();
}
@@ -48,7 +48,7 @@ UniqueStoreAllocator<EntryT, RefT>::hold(EntryRef ref)
template <typename EntryT, typename RefT>
EntryRef
-UniqueStoreAllocator<EntryT, RefT>::move(EntryRef ref)
+UniqueStoreAllocator<EntryT, RefT>::move_on_compact(EntryRef ref)
{
return _store.template allocator<WrappedEntryType>(0).alloc(get_wrapped(ref)).ref;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
index 702bae38e7c..8c5f284bb14 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
@@ -74,12 +74,12 @@ public:
UniqueStoreDictionary(std::unique_ptr<EntryComparator> compare);
~UniqueStoreDictionary() override;
void freeze() override;
- void transfer_hold_lists(generation_t generation) override;
- void trim_hold_lists(generation_t firstUsed) override;
+ void assign_generation(generation_t current_gen) override;
+ void reclaim_memory(generation_t oldest_used_gen) override;
UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override;
EntryRef find(const EntryComparator& comp) override;
void remove(const EntryComparator& comp, EntryRef ref) override;
- void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override;
+ void move_keys_on_compact(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override;
uint32_t get_num_uniques() const override;
vespalib::MemoryUsage get_memory_usage() const override;
void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) override;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
index 8029b66309d..6708b4c1448 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
@@ -41,25 +41,25 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::freeze()
template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
void
-UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::transfer_hold_lists(generation_t generation)
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::assign_generation(generation_t current_gen)
{
if constexpr (has_btree_dictionary) {
- this->_btree_dict.getAllocator().transferHoldLists(generation);
+ this->_btree_dict.getAllocator().assign_generation(current_gen);
}
if constexpr (has_hash_dictionary) {
- this->_hash_dict.transfer_hold_lists(generation);
+ this->_hash_dict.assign_generation(current_gen);
}
}
template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
void
-UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::trim_hold_lists(generation_t firstUsed)
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::reclaim_memory(generation_t oldest_used_gen)
{
if constexpr (has_btree_dictionary) {
- this->_btree_dict.getAllocator().trimHoldLists(firstUsed);
+ this->_btree_dict.getAllocator().reclaim_memory(oldest_used_gen);
}
if constexpr (has_hash_dictionary) {
- this->_hash_dict.trim_hold_lists(firstUsed);
+ this->_hash_dict.reclaim_memory(oldest_used_gen);
}
}
@@ -140,7 +140,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::remove(const
template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
void
-UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const EntryRefFilter& compacting_buffers)
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys_on_compact(ICompactable &compactable, const EntryRefFilter& compacting_buffers)
{
if constexpr (has_btree_dictionary) {
auto itr = this->_btree_dict.begin();
@@ -148,7 +148,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICo
EntryRef oldRef(itr.getKey().load_relaxed());
assert(oldRef.valid());
if (compacting_buffers.has(oldRef)) {
- EntryRef newRef(compactable.move(oldRef));
+ EntryRef newRef(compactable.move_on_compact(oldRef));
this->_btree_dict.thaw(itr);
itr.writeKey(AtomicEntryRef(newRef));
if constexpr (has_hash_dictionary) {
@@ -160,7 +160,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICo
++itr;
}
} else {
- this->_hash_dict.move_keys(compactable, compacting_buffers);
+ this->_hash_dict.move_keys_on_compact(compactable, compacting_buffers);
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
index be5fa8f6c1e..8977fd1cce8 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
@@ -111,7 +111,7 @@ public:
~UniqueStoreStringAllocator() override;
EntryRef allocate(const char *value);
void hold(EntryRef ref);
- EntryRef move(EntryRef ref) override;
+ EntryRef move_on_compact(EntryRef ref) override;
const UniqueStoreEntryBase& get_wrapped(EntryRef ref) const {
RefType iRef(ref);
auto &state = _store.getBufferState(iRef.bufferId());
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
index 71ea16bcde2..65cab4850ba 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
@@ -30,7 +30,7 @@ UniqueStoreStringAllocator<RefT>::UniqueStoreStringAllocator(std::shared_ptr<all
template <typename RefT>
UniqueStoreStringAllocator<RefT>::~UniqueStoreStringAllocator()
{
- _store.clearHoldLists();
+ _store.reclaim_all_memory();
_store.dropBuffers();
}
@@ -49,7 +49,7 @@ UniqueStoreStringAllocator<RefT>::allocate(const char *value)
auto handle = _store.template freeListAllocator<WrappedExternalEntryType, UniqueStoreEntryReclaimer<WrappedExternalEntryType>>(0).alloc(std::string(value));
RefT iRef(handle.ref);
auto &state = _store.getBufferState(iRef.bufferId());
- state.incExtraUsedBytes(value_len + 1);
+ state.stats().inc_extra_used_bytes(value_len + 1);
return handle.ref;
}
}
@@ -71,7 +71,7 @@ UniqueStoreStringAllocator<RefT>::hold(EntryRef ref)
template <typename RefT>
EntryRef
-UniqueStoreStringAllocator<RefT>::move(EntryRef ref)
+UniqueStoreStringAllocator<RefT>::move_on_compact(EntryRef ref)
{
RefT iRef(ref);
uint32_t type_id = _store.getTypeId(iRef.bufferId());
@@ -87,7 +87,7 @@ UniqueStoreStringAllocator<RefT>::move(EntryRef ref)
auto handle = _store.template allocator<WrappedExternalEntryType>(0).alloc(*_store.template getEntry<WrappedExternalEntryType>(iRef));
auto &state = _store.getBufferState(RefT(handle.ref).bufferId());
auto &value = static_cast<const WrappedExternalEntryType *>(handle.data)->value();
- state.incExtraUsedBytes(value.size() + 1);
+ state.stats().inc_extra_used_bytes(value.size() + 1);
return handle.ref;
}
}
diff --git a/vespalib/src/vespa/vespalib/util/generation_hold_list.h b/vespalib/src/vespa/vespalib/util/generation_hold_list.h
new file mode 100644
index 00000000000..bdb58afb504
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/generation_hold_list.h
@@ -0,0 +1,106 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "generationhandler.h"
+#include <atomic>
+#include <deque>
+#include <vector>
+
+namespace vespalib {
+
+/**
+ * Class used to hold data elements until they can be safely reclaimed when they are no longer accessed by readers.
+ *
+ * This class must be used in accordance with a GenerationHandler.
+ */
+template <typename T, bool track_bytes_held, bool use_deque>
+class GenerationHoldList {
+private:
+ using generation_t = vespalib::GenerationHandler::generation_t;
+
+ struct ElemWithGen {
+ T elem;
+ generation_t gen;
+ ElemWithGen(T elem_in, generation_t gen_in)
+ : elem(std::move(elem_in)),
+ gen(gen_in)
+ {}
+ size_t byte_size() const {
+ if constexpr (track_bytes_held) {
+ return elem->byte_size();
+ }
+ return 0;
+ }
+ };
+
+ struct NoopFunc { void operator()(const T&){} };
+
+ using ElemList = std::vector<T>;
+ using ElemWithGenList = std::conditional_t<use_deque,
+ std::deque<ElemWithGen>,
+ std::vector<ElemWithGen>>;
+
+ ElemList _phase_1_list;
+ ElemWithGenList _phase_2_list;
+ std::atomic<size_t> _held_bytes;
+
+ /**
+ * Transfer elements from phase 1 to phase 2 list, assigning the current generation.
+ */
+ void assign_generation_internal(generation_t current_gen);
+
+ template<typename Func>
+ void reclaim_internal(generation_t oldest_used_gen, Func callback);
+
+public:
+ GenerationHoldList();
+ ~GenerationHoldList();
+
+ /**
+ * Insert the given data element on this hold list.
+ */
+ void insert(T data);
+
+ /**
+ * Assign the current generation to all data elements inserted on the hold list
+ * since the last time this function was called.
+ */
+ void assign_generation(generation_t current_gen) {
+ if (!_phase_1_list.empty()) {
+ assign_generation_internal(current_gen);
+ }
+ }
+
+ /**
+ * Reclaim all data elements where the assigned generation < oldest used generation.
+ * The callback function is called for each data element reclaimed.
+ **/
+ template<typename Func>
+ void reclaim(generation_t oldest_used_gen, Func callback) {
+ if (!_phase_2_list.empty() && (_phase_2_list.front().gen < oldest_used_gen)) {
+ reclaim_internal(oldest_used_gen, callback);
+ }
+ }
+
+ void reclaim(generation_t oldest_used_gen) {
+ reclaim(oldest_used_gen, NoopFunc());
+ }
+
+ /**
+ * Reclaim all data elements from this hold list.
+ */
+ void reclaim_all();
+
+ /**
+ * Reclaim all data elements from this hold list.
+ * The callback function is called for all data elements reclaimed.
+ */
+ template<typename Func>
+ void reclaim_all(Func callback);
+
+ size_t get_held_bytes() const { return _held_bytes.load(std::memory_order_relaxed); }
+
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/util/generation_hold_list.hpp b/vespalib/src/vespa/vespalib/util/generation_hold_list.hpp
new file mode 100644
index 00000000000..4855d1c651d
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/generation_hold_list.hpp
@@ -0,0 +1,88 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "generation_hold_list.h"
+#include <cassert>
+
+namespace vespalib {
+
+template <typename T, bool track_bytes_held, bool use_deque>
+void
+GenerationHoldList<T, track_bytes_held, use_deque>::assign_generation_internal(generation_t current_gen)
+{
+ for (auto& elem : _phase_1_list) {
+ _phase_2_list.push_back(ElemWithGen(std::move(elem), current_gen));
+ }
+ _phase_1_list.clear();
+}
+
+template <typename T, bool track_bytes_held, bool use_deque>
+template <typename Func>
+void
+GenerationHoldList<T, track_bytes_held, use_deque>::reclaim_internal(generation_t oldest_used_gen, Func func)
+{
+ auto itr = _phase_2_list.begin();
+ auto ite = _phase_2_list.end();
+ for (; itr != ite; ++itr) {
+ if (itr->gen >= oldest_used_gen) {
+ break;
+ }
+ const auto& elem = itr->elem;
+ func(elem);
+ if constexpr (track_bytes_held) {
+ _held_bytes.store(get_held_bytes() - itr->byte_size(), std::memory_order_relaxed);
+ }
+ }
+ if (itr != _phase_2_list.begin()) {
+ _phase_2_list.erase(_phase_2_list.begin(), itr);
+ }
+}
+
+template <typename T, bool track_bytes_held, bool use_deque>
+GenerationHoldList<T, track_bytes_held, use_deque>::GenerationHoldList()
+ : _phase_1_list(),
+ _phase_2_list(),
+ _held_bytes()
+{
+}
+
+template <typename T, bool track_bytes_held, bool use_deque>
+GenerationHoldList<T, track_bytes_held, use_deque>::~GenerationHoldList()
+{
+ assert(_phase_1_list.empty());
+ assert(_phase_2_list.empty());
+ assert(get_held_bytes() == 0);
+}
+
+template <typename T, bool track_bytes_held, bool use_deque>
+void
+GenerationHoldList<T, track_bytes_held, use_deque>::insert(T data)
+{
+ _phase_1_list.push_back(std::move(data));
+ if constexpr (track_bytes_held) {
+ _held_bytes.store(get_held_bytes() + _phase_1_list.back()->byte_size(), std::memory_order_relaxed);
+ }
+}
+
+template <typename T, bool track_bytes_held, bool use_deque>
+void
+GenerationHoldList<T, track_bytes_held, use_deque>::reclaim_all()
+{
+ _phase_1_list.clear();
+ _phase_2_list.clear();
+ _held_bytes = 0;
+}
+
+template <typename T, bool track_bytes_held, bool use_deque>
+template <typename Func>
+void
+GenerationHoldList<T, track_bytes_held, use_deque>::reclaim_all(Func func)
+{
+ for (const auto& elem_with_gen : _phase_2_list) {
+ func(elem_with_gen.elem);
+ }
+ reclaim_all();
+}
+
+}
diff --git a/vespalib/src/vespa/vespalib/util/generationhandler.cpp b/vespalib/src/vespa/vespalib/util/generationhandler.cpp
index d1cc0271068..3562926d88d 100644
--- a/vespalib/src/vespa/vespalib/util/generationhandler.cpp
+++ b/vespalib/src/vespa/vespalib/util/generationhandler.cpp
@@ -111,7 +111,7 @@ GenerationHandler::Guard::operator=(Guard &&rhs)
}
void
-GenerationHandler::updateFirstUsedGeneration()
+GenerationHandler::update_oldest_used_generation()
{
for (;;) {
if (_first == _last.load(std::memory_order_relaxed))
@@ -125,12 +125,12 @@ GenerationHandler::updateFirstUsedGeneration()
toFree->_next = _free;
_free = toFree;
}
- _firstUsedGeneration.store(_first->_generation, std::memory_order_relaxed);
+ _oldest_used_generation.store(_first->_generation, std::memory_order_relaxed);
}
GenerationHandler::GenerationHandler()
: _generation(0),
- _firstUsedGeneration(0),
+ _oldest_used_generation(0),
_last(nullptr),
_first(nullptr),
_free(nullptr),
@@ -144,7 +144,7 @@ GenerationHandler::GenerationHandler()
GenerationHandler::~GenerationHandler(void)
{
- updateFirstUsedGeneration();
+ update_oldest_used_generation();
assert(_first == _last.load(std::memory_order_relaxed));
while (_free != nullptr) {
GenerationHold *toFree = _free;
@@ -190,7 +190,7 @@ GenerationHandler::incGeneration()
// reader
set_generation(ngen);
last->_generation.store(ngen, std::memory_order_relaxed);
- updateFirstUsedGeneration();
+ update_oldest_used_generation();
return;
}
GenerationHold *nhold = nullptr;
@@ -207,7 +207,7 @@ GenerationHandler::incGeneration()
last->_next = nhold;
set_generation(ngen);
_last.store(nhold, std::memory_order_release);
- updateFirstUsedGeneration();
+ update_oldest_used_generation();
}
uint32_t
@@ -215,7 +215,7 @@ GenerationHandler::getGenerationRefCount(generation_t gen) const
{
if (static_cast<sgeneration_t>(gen - getCurrentGeneration()) > 0)
return 0u;
- if (static_cast<sgeneration_t>(getFirstUsedGeneration() - gen) > 0)
+ if (static_cast<sgeneration_t>(get_oldest_used_generation() - gen) > 0)
return 0u;
for (GenerationHold *hold = _first; hold != nullptr; hold = hold->_next) {
if (hold->_generation.load(std::memory_order_relaxed) == gen)
diff --git a/vespalib/src/vespa/vespalib/util/generationhandler.h b/vespalib/src/vespa/vespalib/util/generationhandler.h
index 9637ad0e414..6ba71b7f5fb 100644
--- a/vespalib/src/vespa/vespalib/util/generationhandler.h
+++ b/vespalib/src/vespa/vespalib/util/generationhandler.h
@@ -73,7 +73,7 @@ public:
private:
std::atomic<generation_t> _generation;
- std::atomic<generation_t> _firstUsedGeneration;
+ std::atomic<generation_t> _oldest_used_generation;
std::atomic<GenerationHold *> _last; // Points to "current generation" entry
GenerationHold *_first; // Points to "firstUsedGeneration" entry
GenerationHold *_free; // List of free entries
@@ -101,17 +101,17 @@ public:
void incGeneration();
/**
- * Update first used generation.
+ * Update the oldest used generation.
* Should be called by the writer thread.
*/
- void updateFirstUsedGeneration();
+ void update_oldest_used_generation();
/**
- * Returns the first generation guarded by a reader. It might be too low
- * if writer hasn't updated first used generation after last reader left.
+ * Returns the oldest generation guarded by a reader.
+ * It might be too low if writer hasn't updated oldest used generation after last reader left.
*/
- generation_t getFirstUsedGeneration() const noexcept {
- return _firstUsedGeneration.load(std::memory_order_relaxed);
+ generation_t get_oldest_used_generation() const noexcept {
+ return _oldest_used_generation.load(std::memory_order_relaxed);
}
/**
diff --git a/vespalib/src/vespa/vespalib/util/generationholder.cpp b/vespalib/src/vespa/vespalib/util/generationholder.cpp
index 5bfa7d152ce..07bde82f007 100644
--- a/vespalib/src/vespa/vespalib/util/generationholder.cpp
+++ b/vespalib/src/vespa/vespalib/util/generationholder.cpp
@@ -1,66 +1,20 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "generationholder.h"
-#include <cassert>
+#include "generation_hold_list.hpp"
namespace vespalib {
-GenerationHeldBase::~GenerationHeldBase() = default;
-
-GenerationHolder::GenerationHolder()
- : _hold1List(),
- _hold2List(),
- _heldBytes(0)
-{ }
-
-GenerationHolder::~GenerationHolder()
-{
- assert(_hold1List.empty());
- assert(_hold2List.empty());
- assert(getHeldBytes() == 0);
-}
+template class GenerationHoldList<GenerationHeldBase::UP, true, false>;
-void
-GenerationHolder::hold(GenerationHeldBase::UP data)
-{
- _hold1List.push_back(GenerationHeldBase::SP(data.release()));
- _heldBytes.store(getHeldBytes() + _hold1List.back()->getSize(), std::memory_order_relaxed);
-}
+template void GenerationHolderParent::reclaim_internal
+ <GenerationHolderParent::NoopFunc>(generation_t oldest_used_gen, NoopFunc func);
-void
-GenerationHolder::transferHoldListsSlow(generation_t generation)
-{
- HoldList::iterator it(_hold1List.begin());
- HoldList::iterator ite(_hold1List.end());
- HoldList &hold2List = _hold2List;
- for (; it != ite; ++it) {
- assert((*it)->_generation == 0u);
- (*it)->_generation = generation;
- hold2List.push_back(*it);
- }
- _hold1List.clear();
-}
-
-void
-GenerationHolder::trimHoldListsSlow(generation_t usedGen)
-{
- for (;;) {
- if (_hold2List.empty())
- break;
- GenerationHeldBase &first = *_hold2List.front();
- if (static_cast<sgeneration_t>(first._generation - usedGen) >= 0)
- break;
- _heldBytes.store(getHeldBytes() - first.getSize(), std::memory_order_relaxed);
- _hold2List.erase(_hold2List.begin());
- }
-}
+GenerationHeldBase::~GenerationHeldBase() = default;
-void
-GenerationHolder::clearHoldLists()
+GenerationHolder::GenerationHolder()
+ : GenerationHolderParent()
{
- _hold1List.clear();
- _hold2List.clear();
- _heldBytes = 0;
}
}
diff --git a/vespalib/src/vespa/vespalib/util/generationholder.h b/vespalib/src/vespa/vespalib/util/generationholder.h
index ed68a80a308..86d402f9b3b 100644
--- a/vespalib/src/vespa/vespalib/util/generationholder.h
+++ b/vespalib/src/vespa/vespalib/util/generationholder.h
@@ -2,8 +2,8 @@
#pragma once
+#include "generation_hold_list.h"
#include "generationhandler.h"
-#include <vector>
#include <memory>
namespace vespalib {
@@ -11,79 +11,31 @@ namespace vespalib {
class GenerationHeldBase
{
public:
- typedef GenerationHandler::generation_t generation_t;
- typedef std::unique_ptr<GenerationHeldBase> UP;
- typedef std::shared_ptr<GenerationHeldBase> SP;
+ using generation_t = GenerationHandler::generation_t;
+ using UP = std::unique_ptr<GenerationHeldBase>;
+ using SP = std::shared_ptr<GenerationHeldBase>;
- generation_t _generation;
private:
- size_t _size;
+ size_t _byte_size;
public:
- GenerationHeldBase(size_t size)
- : _generation(0u),
- _size(size)
+ GenerationHeldBase(size_t byte_size_in)
+ : _byte_size(byte_size_in)
{ }
virtual ~GenerationHeldBase();
- size_t getSize() const { return _size; }
+ size_t byte_size() const { return _byte_size; }
};
+using GenerationHolderParent = GenerationHoldList<GenerationHeldBase::UP, true, false>;
+
/*
* GenerationHolder is meant to hold large elements until readers can
* no longer access them.
*/
-class GenerationHolder
-{
-private:
- typedef GenerationHandler::generation_t generation_t;
- typedef GenerationHandler::sgeneration_t sgeneration_t;
-
- typedef std::vector<GenerationHeldBase::SP> HoldList;
-
- HoldList _hold1List;
- HoldList _hold2List;
- std::atomic<size_t> _heldBytes;
-
- /**
- * Transfer holds from hold1 to hold2 lists, assigning generation.
- */
- void transferHoldListsSlow(generation_t generation);
-
- /**
- * Remove all data elements from this holder where generation < usedGen.
- **/
- void trimHoldListsSlow(generation_t usedGen);
-
+class GenerationHolder : public GenerationHolderParent {
public:
GenerationHolder();
- ~GenerationHolder();
-
- /**
- * Add the given data pointer to this holder.
- **/
- void hold(GenerationHeldBase::UP data);
-
- /**
- * Transfer holds from hold1 to hold2 lists, assigning generation.
- */
- void transferHoldLists(generation_t generation) {
- if (!_hold1List.empty()) {
- transferHoldListsSlow(generation);
- }
- }
-
- /**
- * Remove all data elements from this holder where generation < usedGen.
- **/
- void trimHoldLists(generation_t usedGen) {
- if (!_hold2List.empty() && static_cast<sgeneration_t>(_hold2List.front()->_generation - usedGen) < 0) {
- trimHoldListsSlow(usedGen);
- }
- }
-
- void clearHoldLists();
- size_t getHeldBytes() const { return _heldBytes.load(std::memory_order_relaxed); }
};
}
diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.h b/vespalib/src/vespa/vespalib/util/rcuvector.h
index 5d084fe3815..b0929303692 100644
--- a/vespalib/src/vespa/vespalib/util/rcuvector.h
+++ b/vespalib/src/vespa/vespalib/util/rcuvector.h
@@ -182,7 +182,7 @@ public:
/**
* Remove all old data vectors where generation < firstUsed.
**/
- void removeOldGenerations(generation_t firstUsed);
+ void reclaim_memory(generation_t oldest_used_gen);
MemoryUsage getMemoryUsage() const override;
};
diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.hpp b/vespalib/src/vespa/vespalib/util/rcuvector.hpp
index 97a73a73cc9..eadda8ac1e9 100644
--- a/vespalib/src/vespa/vespalib/util/rcuvector.hpp
+++ b/vespalib/src/vespa/vespalib/util/rcuvector.hpp
@@ -80,7 +80,7 @@ RcuVectorBase<T>::replaceVector(ArrayType replacement) {
replacement.swap(_data); // atomic switch of underlying data
size_t holdSize = replacement.capacity() * sizeof(T);
auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(replacement));
- _genHolder.hold(std::move(hold));
+ _genHolder.insert(std::move(hold));
onReallocation();
}
@@ -116,7 +116,7 @@ RcuVectorBase<T>::shrink(size_t newSize)
tmpData.swap(_data); // atomic switch of underlying data
size_t holdSize = tmpData.capacity() * sizeof(T);
auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(tmpData));
- _genHolder.hold(std::move(hold));
+ _genHolder.insert(std::move(hold));
onReallocation();
}
}
@@ -162,7 +162,7 @@ template <typename T>
void
RcuVector<T>::onReallocation() {
RcuVectorBase<T>::onReallocation();
- _genHolderStore.transferHoldLists(_generation);
+ _genHolderStore.assign_generation(_generation);
}
template <typename T>
@@ -182,14 +182,14 @@ RcuVector<T>::RcuVector(GrowStrategy growStrategy)
template <typename T>
RcuVector<T>::~RcuVector()
{
- _genHolderStore.clearHoldLists();
+ _genHolderStore.reclaim_all();
}
template <typename T>
void
-RcuVector<T>::removeOldGenerations(generation_t firstUsed)
+RcuVector<T>::reclaim_memory(generation_t oldest_used_gen)
{
- _genHolderStore.trimHoldLists(firstUsed);
+ _genHolderStore.reclaim(oldest_used_gen);
}
template <typename T>
@@ -197,7 +197,7 @@ MemoryUsage
RcuVector<T>::getMemoryUsage() const
{
MemoryUsage retval(RcuVectorBase<T>::getMemoryUsage());
- retval.mergeGenerationHeldBytes(_genHolderStore.getHeldBytes());
+ retval.mergeGenerationHeldBytes(_genHolderStore.get_held_bytes());
return retval;
}