summaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorGeir Storli <geirst@yahooinc.com>2022-10-06 15:47:55 +0000
committerGeir Storli <geirst@yahooinc.com>2022-10-06 15:52:12 +0000
commit015fa522fd1b8a85a490bde41262a6401d8dfd20 (patch)
treee86b210f0f25f432908aa30b0a178156ddf8d925 /vespalib
parente058823899d4575cf3cd544c3fd0f739c0d085e3 (diff)
Move tracking of datastore buffer statistics to separate classes.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp24
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp18
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp22
-rw-r--r--vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp44
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/CMakeLists.txt2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/allocator.hpp6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp57
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_stats.h76
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.cpp57
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h45
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.hpp10
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp34
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h55
-rw-r--r--vespalib/src/vespa/vespalib/datastore/memory_stats.cpp40
-rw-r--r--vespalib/src/vespa/vespalib/datastore/memory_stats.h32
-rw-r--r--vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp4
20 files changed, 319 insertions, 217 deletions
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 1e8632aee95..1708b0fd948 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -20,7 +20,7 @@ using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MemoryAllocatorObserver::Stats;
-using BufferStats = vespalib::datastore::test::BufferStats;
+using TestBufferStats = vespalib::datastore::test::BufferStats;
using MemStats = vespalib::datastore::test::MemStats;
namespace {
@@ -98,16 +98,16 @@ struct ArrayStoreTest : public TestT
}
void assertBufferState(EntryRef ref, const MemStats& expStats) const {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
- EXPECT_EQ(expStats._hold, store.bufferState(ref).getHoldElems());
- EXPECT_EQ(expStats._dead, store.bufferState(ref).getDeadElems());
+ EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
+ EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
}
- void assert_buffer_stats(EntryRef ref, const BufferStats& exp_stats) const {
+ void assert_buffer_stats(EntryRef ref, const TestBufferStats& exp_stats) const {
auto& state = store.bufferState(ref);
EXPECT_EQ(exp_stats._used, state.size());
- EXPECT_EQ(exp_stats._hold, state.getHoldElems());
- EXPECT_EQ(exp_stats._dead, state.getDeadElems());
- EXPECT_EQ(exp_stats._extra_used, state.getExtraUsedBytes());
- EXPECT_EQ(exp_stats._extra_hold, state.getExtraHoldBytes());
+ EXPECT_EQ(exp_stats._hold, state.stats().hold_elems());
+ EXPECT_EQ(exp_stats._dead, state.stats().dead_elems());
+ EXPECT_EQ(exp_stats._extra_used, state.stats().extra_used_bytes());
+ EXPECT_EQ(exp_stats._extra_hold, state.stats().extra_hold_bytes());
}
void assertMemoryUsage(const MemStats expStats) const {
MemoryUsage act = store.getMemoryUsage();
@@ -280,13 +280,13 @@ TEST_P(NumberStoreFreeListsDisabledTest, large_arrays_are_NOT_allocated_from_fre
TEST_P(NumberStoreTest, track_size_of_large_array_allocations_with_free_lists_enabled) {
EntryRef ref = add({1,2,3,4});
- assert_buffer_stats(ref, BufferStats().used(2).hold(0).dead(1).extra_used(16));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(16));
remove({1,2,3,4});
- assert_buffer_stats(ref, BufferStats().used(2).hold(1).dead(1).extra_hold(16).extra_used(16));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(1).dead(1).extra_hold(16).extra_used(16));
trimHoldLists();
- assert_buffer_stats(ref, BufferStats().used(2).hold(0).dead(2).extra_used(0));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(2).extra_used(0));
add({5,6,7,8,9});
- assert_buffer_stats(ref, BufferStats().used(2).hold(0).dead(1).extra_used(20));
+ assert_buffer_stats(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(20));
}
TEST_F(SmallOffsetNumberStoreTest, new_underlying_buffer_is_allocated_when_current_is_full)
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 9522aa1e0dc..bf389e5e78e 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -55,7 +55,7 @@ public:
using GrowthStats = std::vector<int>;
-using BufferStats = std::vector<int>;
+using BufferIds = std::vector<int>;
constexpr float ALLOC_GROW_FACTOR = 0.4;
constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
@@ -124,8 +124,8 @@ public:
++i;
}
}
- BufferStats getBuffers(size_t bufs) {
- BufferStats buffers;
+ BufferIds getBuffers(size_t bufs) {
+ BufferIds buffers;
while (buffers.size() < bufs) {
RefType iRef = (_type.getArraySize() == 1) ?
(_store.template allocator<DataType>(_typeId).alloc().ref) :
@@ -143,8 +143,8 @@ public:
using MyRef = MyStore::RefType;
void
-assertMemStats(const DataStoreBase::MemStats &exp,
- const DataStoreBase::MemStats &act)
+assertMemStats(const MemoryStats &exp,
+ const MemoryStats &act)
{
EXPECT_EQ(exp._allocElems, act._allocElems);
EXPECT_EQ(exp._usedElems, act._usedElems);
@@ -414,7 +414,7 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
- DataStoreBase::MemStats m;
+ MemoryStats m;
m._allocElems = MyRef::offsetSize();
m._usedElems = 1; // ref = 0 is reserved
m._deadElems = 1; // ref = 0 is reserved
@@ -466,7 +466,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{ // increase extra used bytes
auto prev_stats = s.getMemStats();
- s.get_active_buffer_state().incExtraUsedBytes(50);
+ s.get_active_buffer_state().stats().inc_extra_used_bytes(50);
auto curr_stats = s.getMemStats();
EXPECT_EQ(prev_stats._allocBytes + 50, curr_stats._allocBytes);
EXPECT_EQ(prev_stats._usedBytes + 50, curr_stats._usedBytes);
@@ -474,7 +474,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{ // increase extra hold bytes
auto prev_stats = s.getMemStats();
- s.get_active_buffer_state().incExtraHoldBytes(30);
+ s.get_active_buffer_state().stats().inc_extra_hold_bytes(30);
auto curr_stats = s.getMemStats();
EXPECT_EQ(prev_stats._holdBytes + 30, curr_stats._holdBytes);
}
@@ -655,7 +655,7 @@ TEST(DataStoreTest, can_set_memory_allocator)
namespace {
void
-assertBuffers(BufferStats exp_buffers, size_t num_arrays_for_new_buffer)
+assertBuffers(BufferIds exp_buffers, size_t num_arrays_for_new_buffer)
{
EXPECT_EQ(exp_buffers, IntGrowStore(1, 1, 1024, num_arrays_for_new_buffer).getBuffers(exp_buffers.size()));
}
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
index 56c1d0c0f63..6612ef998c5 100644
--- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -21,10 +21,10 @@ enum class DictionaryType { BTREE, HASH, BTREE_AND_HASH };
using namespace vespalib::datastore;
using vespalib::ArrayRef;
using generation_t = vespalib::GenerationHandler::generation_t;
-using vespalib::datastore::test::BufferStats;
using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MemoryAllocatorObserver::Stats;
+using TestBufferStats = vespalib::datastore::test::BufferStats;
template <typename UniqueStoreT>
struct TestBaseValues {
@@ -94,10 +94,10 @@ struct TestBase : public ::testing::Test {
uint32_t getBufferId(EntryRef ref) const {
return EntryRefType(ref).bufferId();
}
- void assertBufferState(EntryRef ref, const BufferStats expStats) const {
+ void assertBufferState(EntryRef ref, const TestBufferStats expStats) const {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
- EXPECT_EQ(expStats._hold, store.bufferState(ref).getHoldElems());
- EXPECT_EQ(expStats._dead, store.bufferState(ref).getDeadElems());
+ EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
+ EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
}
void assertStoreContent() const {
for (const auto &elem : refStore) {
@@ -320,9 +320,9 @@ TYPED_TEST(TestBase, elements_are_put_on_hold_when_value_is_removed)
EntryRef ref = this->add(this->values()[0]);
size_t reserved = this->get_reserved(ref);
size_t array_size = this->get_array_size(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
}
TYPED_TEST(TestBase, elements_are_reference_counted)
@@ -333,11 +333,11 @@ TYPED_TEST(TestBase, elements_are_reference_counted)
// Note: The first buffer have the first element reserved -> we expect 2 elements used here.
size_t reserved = this->get_reserved(ref);
size_t array_size = this->get_array_size(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
}
TEST_F(SmallOffsetNumberTest, new_underlying_buffer_is_allocated_when_current_is_full)
@@ -367,7 +367,7 @@ TYPED_TEST(TestBase, store_can_be_compacted)
this->trimHoldLists();
size_t reserved = this->get_reserved(val0Ref);
size_t array_size = this->get_array_size(val0Ref);
- this->assertBufferState(val0Ref, BufferStats().used(reserved + 3 * array_size).dead(reserved + array_size));
+ this->assertBufferState(val0Ref, TestBufferStats().used(reserved + 3 * array_size).dead(reserved + array_size));
uint32_t val1BufferId = this->getBufferId(val0Ref);
EXPECT_EQ(2u, this->refStore.size());
@@ -396,7 +396,7 @@ TYPED_TEST(TestBase, store_can_be_instantiated_with_builder)
EntryRef val1Ref = builder.mapEnumValueToEntryRef(2);
size_t reserved = this->get_reserved(val0Ref);
size_t array_size = this->get_array_size(val0Ref);
- this->assertBufferState(val0Ref, BufferStats().used(2 * array_size + reserved).dead(reserved)); // Note: First element is reserved
+ this->assertBufferState(val0Ref, TestBufferStats().used(2 * array_size + reserved).dead(reserved)); // Note: First element is reserved
EXPECT_TRUE(val0Ref.valid());
EXPECT_TRUE(val1Ref.valid());
EXPECT_NE(val0Ref.ref(), val1Ref.ref());
diff --git a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
index 777da0c2b16..f68dd4dde66 100644
--- a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
@@ -11,7 +11,7 @@
using namespace vespalib::datastore;
using vespalib::MemoryUsage;
using generation_t = vespalib::GenerationHandler::generation_t;
-using BufferStats = vespalib::datastore::test::BufferStats;
+using TestBufferStats = vespalib::datastore::test::BufferStats;
using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MemoryAllocatorObserver::Stats;
@@ -60,12 +60,12 @@ struct TestBase : public ::testing::Test {
const BufferState &buffer_state(EntryRef ref) const {
return allocator.get_data_store().getBufferState(get_buffer_id(ref));
}
- void assert_buffer_state(EntryRef ref, const BufferStats expStats) const {
+ void assert_buffer_state(EntryRef ref, const TestBufferStats expStats) const {
EXPECT_EQ(expStats._used, buffer_state(ref).size());
- EXPECT_EQ(expStats._hold, buffer_state(ref).getHoldElems());
- EXPECT_EQ(expStats._dead, buffer_state(ref).getDeadElems());
- EXPECT_EQ(expStats._extra_used, buffer_state(ref).getExtraUsedBytes());
- EXPECT_EQ(expStats._extra_hold, buffer_state(ref).getExtraHoldBytes());
+ EXPECT_EQ(expStats._hold, buffer_state(ref).stats().hold_elems());
+ EXPECT_EQ(expStats._dead, buffer_state(ref).stats().dead_elems());
+ EXPECT_EQ(expStats._extra_used, buffer_state(ref).stats().extra_used_bytes());
+ EXPECT_EQ(expStats._extra_hold, buffer_state(ref).stats().extra_hold_bytes());
}
void trim_hold_lists() {
allocator.get_data_store().transferHoldLists(generation++);
@@ -86,32 +86,32 @@ TEST_F(StringTest, can_add_and_get_values)
TEST_F(StringTest, elements_are_put_on_hold_when_value_is_removed)
{
EntryRef ref = add(small.c_str());
- assert_buffer_state(ref, BufferStats().used(16).hold(0).dead(0));
+ assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(0));
remove(ref);
- assert_buffer_state(ref, BufferStats().used(16).hold(16).dead(0));
+ assert_buffer_state(ref, TestBufferStats().used(16).hold(16).dead(0));
trim_hold_lists();
- assert_buffer_state(ref, BufferStats().used(16).hold(0).dead(16));
+ assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(16));
}
TEST_F(StringTest, extra_bytes_used_is_tracked)
{
EntryRef ref = add(spaces1000.c_str());
// Note: The first buffer have the first element reserved -> we expect 2 elements used here.
- assert_buffer_state(ref, BufferStats().used(2).hold(0).dead(1).extra_used(1001));
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
remove(ref);
- assert_buffer_state(ref, BufferStats().used(2).hold(1).dead(1).extra_used(1001).extra_hold(1001));
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(1).dead(1).extra_used(1001).extra_hold(1001));
trim_hold_lists();
- assert_buffer_state(ref, BufferStats().used(2).hold(0).dead(2));
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(2));
ref = add(spaces1000.c_str());
- assert_buffer_state(ref, BufferStats().used(2).hold(0).dead(1).extra_used(1001));
+ assert_buffer_state(ref, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
EntryRef ref2 = move(ref);
assert_get(ref2, spaces1000.c_str());
- assert_buffer_state(ref, BufferStats().used(3).hold(0).dead(1).extra_used(2002));
+ assert_buffer_state(ref, TestBufferStats().used(3).hold(0).dead(1).extra_used(2002));
remove(ref);
remove(ref2);
- assert_buffer_state(ref, BufferStats().used(3).hold(2).dead(1).extra_used(2002).extra_hold(2002));
+ assert_buffer_state(ref, TestBufferStats().used(3).hold(2).dead(1).extra_used(2002).extra_hold(2002));
trim_hold_lists();
- assert_buffer_state(ref, BufferStats().used(3).hold(0).dead(3));
+ assert_buffer_state(ref, TestBufferStats().used(3).hold(0).dead(3));
}
TEST_F(StringTest, string_length_determines_buffer)
@@ -139,8 +139,8 @@ TEST_F(StringTest, free_list_is_used_when_enabled)
EntryRef ref4 = add(spaces1000.c_str());
EXPECT_EQ(ref1, ref3);
EXPECT_EQ(ref2, ref4);
- assert_buffer_state(ref1, BufferStats().used(16).hold(0).dead(0));
- assert_buffer_state(ref2, BufferStats().used(2).hold(0).dead(1).extra_used(1001));
+ assert_buffer_state(ref1, TestBufferStats().used(16).hold(0).dead(0));
+ assert_buffer_state(ref2, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
}
TEST_F(StringTest, free_list_is_not_used_when_disabled)
@@ -155,8 +155,8 @@ TEST_F(StringTest, free_list_is_not_used_when_disabled)
EntryRef ref4 = add(spaces1000.c_str());
EXPECT_NE(ref1, ref3);
EXPECT_NE(ref2, ref4);
- assert_buffer_state(ref1, BufferStats().used(32).hold(0).dead(16));
- assert_buffer_state(ref2, BufferStats().used(3).hold(0).dead(2).extra_used(1001));
+ assert_buffer_state(ref1, TestBufferStats().used(32).hold(0).dead(16));
+ assert_buffer_state(ref2, TestBufferStats().used(3).hold(0).dead(2).extra_used(1001));
}
TEST_F(StringTest, free_list_is_never_used_for_move)
@@ -173,8 +173,8 @@ TEST_F(StringTest, free_list_is_never_used_for_move)
EntryRef ref6 = move(ref2);
EXPECT_NE(ref5, ref3);
EXPECT_NE(ref6, ref4);
- assert_buffer_state(ref1, BufferStats().used(48).hold(0).dead(16));
- assert_buffer_state(ref2, BufferStats().used(4).hold(0).dead(2).extra_used(2002));
+ assert_buffer_state(ref1, TestBufferStats().used(48).hold(0).dead(16));
+ assert_buffer_state(ref2, TestBufferStats().used(4).hold(0).dead(2).extra_used(2002));
}
TEST_F(StringTest, provided_memory_allocator_is_used)
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
index 81262f560c7..8976d73379c 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
@@ -34,7 +34,7 @@ BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
assert(_treeToFreeze.empty());
assert(_internalHoldUntilFreeze.empty());
assert(_leafHoldUntilFreeze.empty());
- DataStoreBase::MemStats stats = _nodeStore.getMemStats();
+ auto stats = _nodeStore.getMemStats();
assert(stats._usedBytes == stats._deadBytes);
assert(stats._holdBytes == 0);
(void) stats;
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
index 7a03c236637..20f80e07a6b 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
@@ -167,7 +167,7 @@ public:
}
// Inherit doc from DataStoreBase
- datastore::DataStoreBase::MemStats getMemStats() const {
+ datastore::MemoryStats getMemStats() const {
return _store.getMemStats();
}
diff --git a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
index 9990e3f5764..f11004363f8 100644
--- a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
@@ -5,6 +5,7 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT
array_store_config.cpp
atomic_entry_ref.cpp
buffer_free_list.cpp
+ buffer_stats.cpp
buffer_type.cpp
bufferstate.cpp
compact_buffer_candidates.cpp
@@ -19,6 +20,7 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT
fixed_size_hash_map.cpp
free_list.cpp
large_array_buffer_type.cpp
+ memory_stats.cpp
sharded_hash_map.cpp
small_array_buffer_type.cpp
unique_store.cpp
diff --git a/vespalib/src/vespa/vespalib/datastore/allocator.hpp b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
index 9b69be49b8e..a65fd8a2352 100644
--- a/vespalib/src/vespa/vespalib/datastore/allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
@@ -28,7 +28,7 @@ Allocator<EntryT, RefT>::alloc(Args && ... args)
RefT ref(oldBufferSize, buffer_id);
EntryT *entry = _store.getEntry<EntryT>(ref);
new (static_cast<void *>(entry)) EntryT(std::forward<Args>(args)...);
- state.pushed_back(1);
+ state.stats().pushed_back(1);
return HandleType(ref, entry);
}
@@ -48,7 +48,7 @@ Allocator<EntryT, RefT>::allocArray(ConstArrayRef array)
for (size_t i = 0; i < array.size(); ++i) {
new (static_cast<void *>(buf + i)) EntryT(array[i]);
}
- state.pushed_back(array.size());
+ state.stats().pushed_back(array.size());
return HandleType(ref, buf);
}
@@ -68,7 +68,7 @@ Allocator<EntryT, RefT>::allocArray(size_t size)
for (size_t i = 0; i < size; ++i) {
new (static_cast<void *>(buf + i)) EntryT();
}
- state.pushed_back(size);
+ state.stats().pushed_back(size);
return HandleType(ref, buf);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index e79398271fb..4df8505e927 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -114,7 +114,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::addLargeArray(const ConstArrayRef &array)
auto handle = _store.template freeListAllocator<LargeArray, NoOpReclaimer>(_largeArrayTypeId)
.alloc(array.cbegin(), array.cend());
auto& state = _store.getBufferState(RefT(handle.ref).bufferId());
- state.incExtraUsedBytes(sizeof(EntryT) * array.size());
+ state.stats().inc_extra_used_bytes(sizeof(EntryT) * array.size());
return handle.ref;
}
@@ -125,7 +125,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::allocate_large_array(size_t array_size)
using NoOpReclaimer = DefaultReclaimer<LargeArray>;
auto handle = _store.template freeListAllocator<LargeArray, NoOpReclaimer>(_largeArrayTypeId).alloc(array_size);
auto& state = _store.getBufferState(RefT(handle.ref).bufferId());
- state.incExtraUsedBytes(sizeof(EntryT) * array_size);
+ state.stats().inc_extra_used_bytes(sizeof(EntryT) * array_size);
return handle.ref;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
new file mode 100644
index 00000000000..0d367cf9835
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
@@ -0,0 +1,57 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "buffer_stats.h"
+#include <cassert>
+
+namespace vespalib::datastore {
+
+BufferStats::BufferStats()
+ : _alloc_elems(0),
+ _used_elems(0),
+ _hold_elems(0),
+ _dead_elems(0),
+ _extra_used_bytes(0),
+ _extra_hold_bytes(0)
+{
+}
+
+void
+BufferStats::dec_hold_elems(size_t value)
+{
+ ElemCount elems = hold_elems();
+ assert(elems >= value);
+ _hold_elems.store(elems - value, std::memory_order_relaxed);
+}
+
+void
+BufferStats::add_to_mem_stats(size_t element_size, MemoryStats& stats) const
+{
+ size_t extra_used = extra_used_bytes();
+ stats._allocElems += capacity();
+ stats._usedElems += size();
+ stats._deadElems += dead_elems();
+ stats._holdElems += hold_elems();
+ stats._allocBytes += (capacity() * element_size) + extra_used;
+ stats._usedBytes += (size() * element_size) + extra_used;
+ stats._deadBytes += dead_elems() * element_size;
+ stats._holdBytes += (hold_elems() * element_size) + extra_hold_bytes();
+}
+
+MutableBufferStats::MutableBufferStats()
+ : BufferStats()
+{
+}
+
+void
+MutableBufferStats::clear()
+{
+ _alloc_elems.store(0, std::memory_order_relaxed);
+ _used_elems.store(0, std::memory_order_relaxed);
+ _hold_elems.store(0, std::memory_order_relaxed);
+ _dead_elems.store(0, std::memory_order_relaxed);
+ _extra_used_bytes.store(0, std::memory_order_relaxed);
+ _extra_hold_bytes.store(0, std::memory_order_relaxed);
+}
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_stats.h b/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
new file mode 100644
index 00000000000..0df74c0a79d
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
@@ -0,0 +1,76 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "buffer_type.h"
+#include "memory_stats.h"
+#include <atomic>
+
+namespace vespalib::datastore {
+
+/**
+ * Represents statistics for a given buffer in a data store.
+ */
+class BufferStats {
+protected:
+ // The number of elements that are allocated in the buffer.
+ std::atomic<ElemCount> _alloc_elems;
+ // The number of elements (of the allocated) that are used: _used_elems <= _alloc_elems.
+ std::atomic<ElemCount> _used_elems;
+ // The number of elements (of the used) that are on hold: _hold_elems <= _used_elems.
+ // "On hold" is a transitionary state used when removing elements.
+ std::atomic<ElemCount> _hold_elems;
+ // The number of elements (of the used) that are dead: _dead_elems <= _used_elems.
+ // A dead element was first on hold, and is now available for reuse in the free list (if enabled).
+ std::atomic<ElemCount> _dead_elems;
+
+ // Number of bytes that are heap allocated (and used) by elements that are stored in this buffer.
+ // For simple types this is always 0.
+ std::atomic<size_t> _extra_used_bytes;
+ // Number of bytes that are heap allocated (and used) by elements that are stored in this buffer and is now on hold.
+ // For simple types this is always 0.
+ std::atomic<size_t> _extra_hold_bytes;
+
+public:
+ BufferStats();
+
+ size_t size() const { return _used_elems.load(std::memory_order_relaxed); }
+ size_t capacity() const { return _alloc_elems.load(std::memory_order_relaxed); }
+ size_t remaining() const { return capacity() - size(); }
+
+ void pushed_back(size_t num_elems) {
+ _used_elems.store(size() + num_elems, std::memory_order_relaxed);
+ }
+
+ size_t dead_elems() const { return _dead_elems.load(std::memory_order_relaxed); }
+ size_t hold_elems() const { return _hold_elems.load(std::memory_order_relaxed); }
+ size_t extra_used_bytes() const { return _extra_used_bytes.load(std::memory_order_relaxed); }
+ size_t extra_hold_bytes() const { return _extra_hold_bytes.load(std::memory_order_relaxed); }
+
+ void inc_dead_elems(size_t value) { _dead_elems.store(dead_elems() + value, std::memory_order_relaxed); }
+ void inc_hold_elems(size_t value) { _hold_elems.store(hold_elems() + value, std::memory_order_relaxed); }
+ void dec_hold_elems(size_t value);
+ void inc_extra_used_bytes(size_t value) { _extra_used_bytes.store(extra_used_bytes() + value, std::memory_order_relaxed); }
+ void inc_extra_hold_bytes(size_t value) { _extra_hold_bytes.store(extra_hold_bytes() + value, std::memory_order_relaxed); }
+
+ void add_to_mem_stats(size_t element_size, MemoryStats& stats) const;
+};
+
+/**
+ * Provides low-level access to buffer stats for integration in BufferState.
+ */
+class MutableBufferStats : public BufferStats {
+public:
+ MutableBufferStats();
+ void clear();
+ void set_alloc_elems(size_t value) { _alloc_elems.store(value, std::memory_order_relaxed); }
+ void set_dead_elems(size_t value) { _dead_elems.store(value, std::memory_order_relaxed); }
+ void set_hold_elems(size_t value) { _hold_elems.store(value, std::memory_order_relaxed); }
+ std::atomic<ElemCount>& used_elems_ref() { return _used_elems; }
+ std::atomic<ElemCount>& dead_elems_ref() { return _dead_elems; }
+ std::atomic<size_t>& extra_used_bytes_ref() { return _extra_used_bytes; }
+ std::atomic<size_t>& extra_hold_bytes_ref() { return _extra_hold_bytes; }
+};
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
index d24d8336131..35455a193d2 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
@@ -11,13 +11,8 @@ using vespalib::alloc::MemoryAllocator;
namespace vespalib::datastore {
BufferState::BufferState()
- : _usedElems(0),
- _allocElems(0),
- _deadElems(0u),
- _holdElems(0u),
- _extraUsedBytes(0),
- _extraHoldBytes(0),
- _free_list(_deadElems),
+ : _stats(),
+ _free_list(_stats.dead_elems_ref()),
_typeHandler(nullptr),
_buffer(Alloc::alloc(0, MemoryAllocator::HUGEPAGE_SIZE)),
_arraySize(0),
@@ -33,14 +28,7 @@ BufferState::~BufferState()
assert(getState() == State::FREE);
assert(!_free_list.enabled());
assert(_free_list.empty());
- assert(_holdElems == 0);
-}
-
-void
-BufferState::decHoldElems(size_t value) {
- ElemCount hold_elems = getHoldElems();
- assert(hold_elems >= value);
- _holdElems.store(hold_elems - value, std::memory_order_relaxed);
+ assert(_stats.hold_elems() == 0);
}
namespace {
@@ -100,10 +88,10 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
assert(_typeHandler == nullptr);
assert(capacity() == 0);
assert(size() == 0);
- assert(getDeadElems() == 0u);
- assert(getHoldElems() == 0);
- assert(getExtraUsedBytes() == 0);
- assert(getExtraHoldBytes() == 0);
+ assert(_stats.dead_elems() == 0u);
+ assert(_stats.hold_elems() == 0);
+ assert(_stats.extra_used_bytes() == 0);
+ assert(_stats.extra_hold_bytes() == 0);
assert(_free_list.empty());
size_t reservedElements = typeHandler->getReservedElements(bufferId);
@@ -115,14 +103,15 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
_buffer.create(alloc.bytes).swap(_buffer);
assert(_buffer.get() != nullptr || alloc.elements == 0u);
buffer.store(_buffer.get(), std::memory_order_release);
- _allocElems.store(alloc.elements, std::memory_order_relaxed);
+ _stats.set_alloc_elems(alloc.elements);
_typeHandler.store(typeHandler, std::memory_order_release);
assert(typeId <= std::numeric_limits<uint16_t>::max());
_typeId = typeId;
_arraySize = typeHandler->getArraySize();
_free_list.set_array_size(_arraySize);
_state.store(State::ACTIVE, std::memory_order_release);
- typeHandler->onActive(bufferId, &_usedElems, &_deadElems, buffer.load(std::memory_order::relaxed));
+ typeHandler->onActive(bufferId, &_stats.used_elems_ref(), &_stats.dead_elems_ref(),
+ buffer.load(std::memory_order::relaxed));
}
void
@@ -132,11 +121,11 @@ BufferState::onHold(uint32_t buffer_id)
assert(getTypeHandler() != nullptr);
_state.store(State::HOLD, std::memory_order_release);
_compacting = false;
- assert(getDeadElems() <= size());
- assert(getHoldElems() <= (size() - getDeadElems()));
- _deadElems.store(0, std::memory_order_relaxed);
- _holdElems.store(size(), std::memory_order_relaxed); // Put everyting on hold
- getTypeHandler()->onHold(buffer_id, &_usedElems, &_deadElems);
+ assert(_stats.dead_elems() <= size());
+ assert(_stats.hold_elems() <= (size() - _stats.dead_elems()));
+ _stats.set_dead_elems(0);
+ _stats.set_hold_elems(size());
+ getTypeHandler()->onHold(buffer_id, &_stats.used_elems_ref(), &_stats.dead_elems_ref());
_free_list.disable();
}
@@ -146,18 +135,13 @@ BufferState::onFree(std::atomic<void*>& buffer)
assert(buffer.load(std::memory_order_relaxed) == _buffer.get());
assert(getState() == State::HOLD);
assert(_typeHandler != nullptr);
- assert(getDeadElems() <= size());
- assert(getHoldElems() == size() - getDeadElems());
+ assert(_stats.dead_elems() <= size());
+ assert(_stats.hold_elems() == (size() - _stats.dead_elems()));
getTypeHandler()->destroyElements(buffer, size());
Alloc::alloc().swap(_buffer);
getTypeHandler()->onFree(size());
buffer.store(nullptr, std::memory_order_release);
- _usedElems.store(0, std::memory_order_relaxed);
- _allocElems.store(0, std::memory_order_relaxed);
- _deadElems.store(0, std::memory_order_relaxed);
- _holdElems.store(0, std::memory_order_relaxed);
- _extraUsedBytes.store(0, std::memory_order_relaxed);
- _extraHoldBytes.store(0, std::memory_order_relaxed);
+ _stats.clear();
_state.store(State::FREE, std::memory_order_release);
_typeHandler = nullptr;
_arraySize = 0;
@@ -192,7 +176,6 @@ BufferState::disableElemHoldList()
_disableElemHoldList = true;
}
-
void
BufferState::fallbackResize(uint32_t bufferId,
size_t elementsNeeded,
@@ -211,13 +194,13 @@ BufferState::fallbackResize(uint32_t bufferId,
std::atomic_thread_fence(std::memory_order_release);
_buffer = std::move(newBuffer);
buffer.store(_buffer.get(), std::memory_order_release);
- _allocElems.store(alloc.elements, std::memory_order_relaxed);
+ _stats.set_alloc_elems(alloc.elements);
}
void
BufferState::resume_primary_buffer(uint32_t buffer_id)
{
- getTypeHandler()->resume_primary_buffer(buffer_id, &_usedElems, &_deadElems);
+ getTypeHandler()->resume_primary_buffer(buffer_id, &_stats.used_elems_ref(), &_stats.dead_elems_ref());
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index 8f32a93b487..c35a51b0c99 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -3,6 +3,7 @@
#pragma once
#include "buffer_free_list.h"
+#include "buffer_stats.h"
#include "buffer_type.h"
#include "entryref.h"
#include <vespa/vespalib/util/generationhandler.h>
@@ -38,17 +39,7 @@ public:
};
private:
- std::atomic<ElemCount> _usedElems;
- std::atomic<ElemCount> _allocElems;
- std::atomic<ElemCount> _deadElems;
- std::atomic<ElemCount> _holdElems;
- // Number of bytes that are heap allocated by elements that are stored in this buffer.
- // For simple types this is 0.
- std::atomic<size_t> _extraUsedBytes;
- // Number of bytes that are heap allocated by elements that are stored in this buffer and is now on hold.
- // For simple types this is 0.
- std::atomic<size_t> _extraHoldBytes;
-
+ MutableBufferStats _stats;
BufferFreeList _free_list;
std::atomic<BufferTypeBase*> _typeHandler;
Alloc _buffer;
@@ -91,36 +82,28 @@ public:
*/
void onFree(std::atomic<void*>& buffer);
-
/**
* Disable hold of elements, just mark then as dead without cleanup.
* Typically used when tearing down data structure in a controlled manner.
*/
void disableElemHoldList();
+ BufferStats& stats() { return _stats; }
+ const BufferStats& stats() const { return _stats; }
BufferFreeList& free_list() { return _free_list; }
const BufferFreeList& free_list() const { return _free_list; }
- size_t size() const { return _usedElems.load(std::memory_order_relaxed); }
- size_t capacity() const { return _allocElems.load(std::memory_order_relaxed); }
- size_t remaining() const { return capacity() - size(); }
- void pushed_back(size_t numElems) {
- pushed_back(numElems, 0);
- }
- void pushed_back(size_t numElems, size_t extraBytes) {
- _usedElems.store(size() + numElems, std::memory_order_relaxed);
- _extraUsedBytes.store(getExtraUsedBytes() + extraBytes, std::memory_order_relaxed);
- }
+ size_t size() const { return _stats.size(); }
+ size_t capacity() const { return _stats.capacity(); }
+ size_t remaining() const { return _stats.remaining(); }
void cleanHold(void *buffer, size_t offset, ElemCount numElems) {
- getTypeHandler()->cleanHold(buffer, offset, numElems, BufferTypeBase::CleanContext(_extraUsedBytes, _extraHoldBytes));
+ getTypeHandler()->cleanHold(buffer, offset, numElems,
+ BufferTypeBase::CleanContext(_stats.extra_used_bytes_ref(),
+ _stats.extra_hold_bytes_ref()));
}
void dropBuffer(uint32_t buffer_id, std::atomic<void*>& buffer);
uint32_t getTypeId() const { return _typeId; }
uint32_t getArraySize() const { return _arraySize; }
- size_t getDeadElems() const { return _deadElems.load(std::memory_order_relaxed); }
- size_t getHoldElems() const { return _holdElems.load(std::memory_order_relaxed); }
- size_t getExtraUsedBytes() const { return _extraUsedBytes.load(std::memory_order_relaxed); }
- size_t getExtraHoldBytes() const { return _extraHoldBytes.load(std::memory_order_relaxed); }
bool getCompacting() const { return _compacting; }
void setCompacting() { _compacting = true; }
uint32_t get_used_arrays() const noexcept { return size() / _arraySize; }
@@ -136,14 +119,6 @@ public:
const BufferTypeBase *getTypeHandler() const { return _typeHandler.load(std::memory_order_relaxed); }
BufferTypeBase *getTypeHandler() { return _typeHandler.load(std::memory_order_relaxed); }
- void incDeadElems(size_t value) { _deadElems.store(getDeadElems() + value, std::memory_order_relaxed); }
- void incHoldElems(size_t value) { _holdElems.store(getHoldElems() + value, std::memory_order_relaxed); }
- void decHoldElems(size_t value);
- void incExtraUsedBytes(size_t value) { _extraUsedBytes.store(getExtraUsedBytes() + value, std::memory_order_relaxed); }
- void incExtraHoldBytes(size_t value) {
- _extraHoldBytes.store(getExtraHoldBytes() + value, std::memory_order_relaxed);
- }
-
bool hasDisabledElemHoldList() const { return _disableElemHoldList; }
void resume_primary_buffer(uint32_t buffer_id);
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
index 5b8df719915..90f7507f80f 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
@@ -33,9 +33,9 @@ DataStoreT<RefT>::free_elem_internal(EntryRef ref, size_t numElems, bool was_hel
} else {
assert(state.isOnHold() && was_held);
}
- state.incDeadElems(numElems);
+ state.stats().inc_dead_elems(numElems);
if (was_held) {
- state.decHoldElems(numElems);
+ state.stats().dec_hold_elems(numElems);
}
state.cleanHold(getBuffer(intRef.bufferId()),
intRef.offset() * state.getArraySize(), numElems);
@@ -49,12 +49,12 @@ DataStoreT<RefT>::holdElem(EntryRef ref, size_t numElems, size_t extraBytes)
BufferState &state = getBufferState(intRef.bufferId());
assert(state.isActive());
if (state.hasDisabledElemHoldList()) {
- state.incDeadElems(numElems);
+ state.stats().inc_dead_elems(numElems);
return;
}
_elemHold1List.push_back(ElemHold1ListElem(ref, numElems));
- state.incHoldElems(numElems);
- state.incExtraHoldBytes(extraBytes);
+ state.stats().inc_hold_elems(numElems);
+ state.stats().inc_extra_hold_bytes(extraBytes);
}
template <typename RefT>
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index fbf19972f80..302a1b49219 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -36,7 +36,7 @@ constexpr size_t TOO_DEAD_SLACK = 0x4000u;
bool
primary_buffer_too_dead(const BufferState &state)
{
- size_t deadElems = state.getDeadElems();
+ size_t deadElems = state.stats().dead_elems();
size_t deadBytes = deadElems * state.getArraySize();
return ((deadBytes >= TOO_DEAD_SLACK) && (deadElems * 2 >= state.size()));
}
@@ -275,7 +275,7 @@ DataStoreBase::dropBuffers()
vespalib::MemoryUsage
DataStoreBase::getMemoryUsage() const
{
- MemStats stats = getMemStats();
+ auto stats = getMemStats();
vespalib::MemoryUsage usage;
usage.setAllocatedBytes(stats._allocBytes);
usage.setUsedBytes(stats._usedBytes);
@@ -341,28 +341,10 @@ DataStoreBase::disableElemHoldList()
}
}
-namespace {
-
-void
-add_buffer_state_to_mem_stats(const BufferState& state, size_t elementSize, DataStoreBase::MemStats& stats)
-{
- size_t extra_used_bytes = state.getExtraUsedBytes();
- stats._allocElems += state.capacity();
- stats._usedElems += state.size();
- stats._deadElems += state.getDeadElems();
- stats._holdElems += state.getHoldElems();
- stats._allocBytes += (state.capacity() * elementSize) + extra_used_bytes;
- stats._usedBytes += (state.size() * elementSize) + extra_used_bytes;
- stats._deadBytes += state.getDeadElems() * elementSize;
- stats._holdBytes += (state.getHoldElems() * elementSize) + state.getExtraHoldBytes();
-}
-
-}
-
-DataStoreBase::MemStats
+MemoryStats
DataStoreBase::getMemStats() const
{
- MemStats stats;
+ MemoryStats stats;
for (const BufferState & bState: _states) {
auto typeHandler = bState.getTypeHandler();
@@ -372,11 +354,11 @@ DataStoreBase::getMemStats() const
} else if (state == BufferState::State::ACTIVE) {
size_t elementSize = typeHandler->elementSize();
++stats._activeBuffers;
- add_buffer_state_to_mem_stats(bState, elementSize, stats);
+ bState.stats().add_to_mem_stats(elementSize, stats);
} else if (state == BufferState::State::HOLD) {
size_t elementSize = typeHandler->elementSize();
++stats._holdBuffers;
- add_buffer_state_to_mem_stats(bState, elementSize, stats);
+ bState.stats().add_to_mem_stats(elementSize, stats);
} else {
LOG_ABORT("should not be reached");
}
@@ -398,7 +380,7 @@ DataStoreBase::getAddressSpaceUsage() const
if (bState.isActive()) {
uint32_t arraySize = bState.getArraySize();
usedArrays += bState.size() / arraySize;
- deadArrays += bState.getDeadElems() / arraySize;
+ deadArrays += bState.stats().dead_elems() / arraySize;
limitArrays += bState.capacity() / arraySize;
} else if (bState.isOnHold()) {
uint32_t arraySize = bState.getArraySize();
@@ -489,7 +471,7 @@ DataStoreBase::start_compact_worst_buffers(CompactionSpec compaction_spec, const
uint32_t arraySize = typeHandler->getArraySize();
uint32_t reservedElements = typeHandler->getReservedElements(bufferId);
size_t used_elems = state.size();
- size_t deadElems = state.getDeadElems() - reservedElements;
+ size_t deadElems = state.stats().dead_elems() - reservedElements;
if (compaction_spec.compact_memory()) {
elem_buffers.add(bufferId, used_elems, deadElems);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 8351527f9a2..4038e12efee 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -4,12 +4,13 @@
#include "bufferstate.h"
#include "free_list.h"
+#include "memory_stats.h"
#include <vespa/vespalib/util/address_space.h>
#include <vespa/vespalib/util/generationholder.h>
#include <vespa/vespalib/util/memoryusage.h>
-#include <vector>
-#include <deque>
#include <atomic>
+#include <deque>
+#include <vector>
namespace vespalib::datastore {
@@ -102,52 +103,6 @@ protected:
class BufferHold;
-public:
- class MemStats
- {
- public:
- size_t _allocElems;
- size_t _usedElems;
- size_t _deadElems;
- size_t _holdElems;
- size_t _allocBytes;
- size_t _usedBytes;
- size_t _deadBytes;
- size_t _holdBytes;
- uint32_t _freeBuffers;
- uint32_t _activeBuffers;
- uint32_t _holdBuffers;
-
- MemStats()
- : _allocElems(0),
- _usedElems(0),
- _deadElems(0),
- _holdElems(0),
- _allocBytes(0),
- _usedBytes(0),
- _deadBytes(0),
- _holdBytes(0),
- _freeBuffers(0),
- _activeBuffers(0),
- _holdBuffers(0)
- { }
-
- MemStats& operator+=(const MemStats &rhs) {
- _allocElems += rhs._allocElems;
- _usedElems += rhs._usedElems;
- _deadElems += rhs._deadElems;
- _holdElems += rhs._holdElems;
- _allocBytes += rhs._allocBytes;
- _usedBytes += rhs._usedBytes;
- _deadBytes += rhs._deadBytes;
- _holdBytes += rhs._holdBytes;
- _freeBuffers += rhs._freeBuffers;
- _activeBuffers += rhs._activeBuffers;
- _holdBuffers += rhs._holdBuffers;
- return *this;
- }
- };
-
private:
std::vector<BufferState> _states;
protected:
@@ -301,7 +256,7 @@ public:
void incDead(uint32_t bufferId, size_t deadElems) {
BufferState &state = _states[bufferId];
- state.incDeadElems(deadElems);
+ state.stats().inc_dead_elems(deadElems);
}
/**
@@ -339,7 +294,7 @@ public:
/**
* Returns aggregated memory statistics for all buffers in this data store.
*/
- MemStats getMemStats() const;
+ MemoryStats getMemStats() const;
/**
* Assume that no readers are present while data structure is being initialized.
diff --git a/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp b/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
new file mode 100644
index 00000000000..8e060b4cfb4
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
@@ -0,0 +1,40 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "memory_stats.h"
+
+namespace vespalib::datastore {
+
+MemoryStats::MemoryStats()
+ : _allocElems(0),
+ _usedElems(0),
+ _deadElems(0),
+ _holdElems(0),
+ _allocBytes(0),
+ _usedBytes(0),
+ _deadBytes(0),
+ _holdBytes(0),
+ _freeBuffers(0),
+ _activeBuffers(0),
+ _holdBuffers(0)
+{
+}
+
+MemoryStats&
+MemoryStats::operator+=(const MemoryStats& rhs)
+{
+ _allocElems += rhs._allocElems;
+ _usedElems += rhs._usedElems;
+ _deadElems += rhs._deadElems;
+ _holdElems += rhs._holdElems;
+ _allocBytes += rhs._allocBytes;
+ _usedBytes += rhs._usedBytes;
+ _deadBytes += rhs._deadBytes;
+ _holdBytes += rhs._holdBytes;
+ _freeBuffers += rhs._freeBuffers;
+ _activeBuffers += rhs._activeBuffers;
+ _holdBuffers += rhs._holdBuffers;
+ return *this;
+}
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/datastore/memory_stats.h b/vespalib/src/vespa/vespalib/datastore/memory_stats.h
new file mode 100644
index 00000000000..18d7dd77559
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/memory_stats.h
@@ -0,0 +1,32 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <cstddef>
+#include <cstdint>
+
+namespace vespalib::datastore {
+
+/**
+ * Represents aggregated memory statistics for all buffers in a data store.
+ */
+class MemoryStats
+{
+public:
+ size_t _allocElems;
+ size_t _usedElems;
+ size_t _deadElems;
+ size_t _holdElems;
+ size_t _allocBytes;
+ size_t _usedBytes;
+ size_t _deadBytes;
+ size_t _holdBytes;
+ uint32_t _freeBuffers;
+ uint32_t _activeBuffers;
+ uint32_t _holdBuffers;
+
+ MemoryStats();
+ MemoryStats& operator+=(const MemoryStats& rhs);
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
index 0d67bf71c20..7395ef68a73 100644
--- a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
@@ -28,7 +28,7 @@ RawAllocator<EntryT, RefT>::alloc(size_t numElems, size_t extraElems)
assert((numElems % arraySize) == 0u);
RefT ref((oldBufferSize / arraySize), buffer_id);
EntryT *buffer = _store.getEntryArray<EntryT>(ref, arraySize);
- state.pushed_back(numElems);
+ state.stats().pushed_back(numElems);
return HandleType(ref, buffer);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
index 71ea16bcde2..b5405cd22b5 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
@@ -49,7 +49,7 @@ UniqueStoreStringAllocator<RefT>::allocate(const char *value)
auto handle = _store.template freeListAllocator<WrappedExternalEntryType, UniqueStoreEntryReclaimer<WrappedExternalEntryType>>(0).alloc(std::string(value));
RefT iRef(handle.ref);
auto &state = _store.getBufferState(iRef.bufferId());
- state.incExtraUsedBytes(value_len + 1);
+ state.stats().inc_extra_used_bytes(value_len + 1);
return handle.ref;
}
}
@@ -87,7 +87,7 @@ UniqueStoreStringAllocator<RefT>::move(EntryRef ref)
auto handle = _store.template allocator<WrappedExternalEntryType>(0).alloc(*_store.template getEntry<WrappedExternalEntryType>(iRef));
auto &state = _store.getBufferState(RefT(handle.ref).bufferId());
auto &value = static_cast<const WrappedExternalEntryType *>(handle.data)->value();
- state.incExtraUsedBytes(value.size() + 1);
+ state.stats().inc_extra_used_bytes(value.size() + 1);
return handle.ref;
}
}