aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--searchlib/src/tests/attribute/attribute_test.cpp40
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_test.cpp36
-rw-r--r--searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/document_features_store_test.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistattribute.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/predicate/document_features_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.hpp6
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h4
-rw-r--r--searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp20
-rw-r--r--searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp28
-rw-r--r--searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h8
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp2
-rw-r--r--vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp2
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp2
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp26
-rw-r--r--vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp14
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp146
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp84
-rw-r--r--vespalib/src/tests/datastore/free_list/free_list_test.cpp10
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp41
-rw-r--r--vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp18
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.h4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.h12
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.hpp10
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h6
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp22
-rw-r--r--vespalib/src/vespa/vespalib/datastore/allocator.hpp18
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp5
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_free_list.cpp6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_free_list.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp40
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_stats.h50
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.cpp113
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.h81
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.hpp22
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.cpp120
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h37
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.h12
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.hpp14
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp96
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h20
-rw-r--r--vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/memory_stats.cpp16
-rw-r--r--vespalib/src/vespa/vespalib/datastore/memory_stats.h8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp9
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp18
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp5
65 files changed, 656 insertions, 683 deletions
diff --git a/searchlib/src/tests/attribute/attribute_test.cpp b/searchlib/src/tests/attribute/attribute_test.cpp
index 7eb43faac18..4549291b80f 100644
--- a/searchlib/src/tests/attribute/attribute_test.cpp
+++ b/searchlib/src/tests/attribute/attribute_test.cpp
@@ -912,8 +912,8 @@ AttributeTest::testSingle()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("sv-post-int32", cfg);
ptr->updateStat(true);
- EXPECT_EQ(339020u, ptr->getStatus().getAllocated());
- EXPECT_EQ(101852u, ptr->getStatus().getUsed());
+ EXPECT_EQ(338972u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(101612u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<IntegerAttribute, AttributeVector::largeint_t, int32_t>(ptr, values);
}
@@ -934,8 +934,8 @@ AttributeTest::testSingle()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("sv-post-float", cfg);
ptr->updateStat(true);
- EXPECT_EQ(339020, ptr->getStatus().getAllocated());
- EXPECT_EQ(101852u, ptr->getStatus().getUsed());
+ EXPECT_EQ(338972u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(101612u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<FloatingPointAttribute, double, float>(ptr, values);
}
@@ -947,8 +947,8 @@ AttributeTest::testSingle()
{
AttributePtr ptr = createAttribute("sv-string", Config(BasicType::STRING, CollectionType::SINGLE));
ptr->updateStat(true);
- EXPECT_EQ(117256u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
- EXPECT_EQ(53240u + sizeof_large_string_entry, ptr->getStatus().getUsed());
+ EXPECT_EQ(116528u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
+ EXPECT_EQ(52920u + sizeof_large_string_entry, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<StringAttribute, string, string>(ptr, values);
}
@@ -957,8 +957,8 @@ AttributeTest::testSingle()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("sv-fs-string", cfg);
ptr->updateStat(true);
- EXPECT_EQ(345624u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
- EXPECT_EQ(105176u + sizeof_large_string_entry, ptr->getStatus().getUsed());
+ EXPECT_EQ(344848u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
+ EXPECT_EQ(104664u + sizeof_large_string_entry, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<StringAttribute, string, string>(ptr, values);
}
@@ -1089,8 +1089,8 @@ AttributeTest::testArray()
{
AttributePtr ptr = createAttribute("a-int32", Config(BasicType::INT32, CollectionType::ARRAY));
ptr->updateStat(true);
- EXPECT_EQ(512056u, ptr->getStatus().getAllocated());
- EXPECT_EQ(504392u, ptr->getStatus().getUsed());
+ EXPECT_EQ(495672u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(487944u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<IntegerAttribute, AttributeVector::largeint_t>(ptr, values);
}
@@ -1099,8 +1099,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("flags", cfg);
ptr->updateStat(true);
- EXPECT_EQ(512056u, ptr->getStatus().getAllocated());
- EXPECT_EQ(504392u, ptr->getStatus().getUsed());
+ EXPECT_EQ(495672u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(487944u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<IntegerAttribute, AttributeVector::largeint_t>(ptr, values);
}
@@ -1109,8 +1109,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("a-fs-int32", cfg);
ptr->updateStat(true);
- EXPECT_EQ(868788u, ptr->getStatus().getAllocated());
- EXPECT_EQ(606264u, ptr->getStatus().getUsed());
+ EXPECT_EQ(852308u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(589576u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<IntegerAttribute, AttributeVector::largeint_t>(ptr, values);
}
@@ -1128,8 +1128,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("a-fs-float", cfg);
ptr->updateStat(true);
- EXPECT_EQ(868788u, ptr->getStatus().getAllocated());
- EXPECT_EQ(606264u, ptr->getStatus().getUsed());
+ EXPECT_EQ(852308u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(589576u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<FloatingPointAttribute, double>(ptr, values);
}
@@ -1140,8 +1140,8 @@ AttributeTest::testArray()
{
AttributePtr ptr = createAttribute("a-string", Config(BasicType::STRING, CollectionType::ARRAY));
ptr->updateStat(true);
- EXPECT_EQ(625088u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
- EXPECT_EQ(557632u + sizeof_large_string_entry, ptr->getStatus().getUsed());
+ EXPECT_EQ(607976u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
+ EXPECT_EQ(540864u + sizeof_large_string_entry, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<StringAttribute, string>(ptr, values);
}
@@ -1150,8 +1150,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("afs-string", cfg);
ptr->updateStat(true);
- EXPECT_EQ(875392u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
- EXPECT_EQ(609588u + sizeof_large_string_entry, ptr->getStatus().getUsed());
+ EXPECT_EQ(858184u + sizeof_large_string_entry, ptr->getStatus().getAllocated());
+ EXPECT_EQ(592628u + sizeof_large_string_entry, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<StringAttribute, string>(ptr, values);
}
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
index b57a2f42ea7..69478c09a25 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
@@ -1032,14 +1032,14 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
auto beforeStats = getFeatureStoreMemStats(_fic);
LOG(info,
- "Before feature compaction: allocElems=%zu, usedElems=%zu"
- ", deadElems=%zu, holdElems=%zu"
+ "Before feature compaction: alloc_entries=%zu, used_entries=%zu"
+ ", dead_entries=%zu, hold_entries=%zu"
", freeBuffers=%u, activeBuffers=%u"
", holdBuffers=%u",
- beforeStats._allocElems,
- beforeStats._usedElems,
- beforeStats._deadElems,
- beforeStats._holdElems,
+ beforeStats._alloc_entries,
+ beforeStats._used_entries,
+ beforeStats._dead_entries,
+ beforeStats._hold_entries,
beforeStats._freeBuffers,
beforeStats._activeBuffers,
beforeStats._holdBuffers);
@@ -1052,14 +1052,14 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myCommit(_fic, *_pushThreads);
auto duringStats = getFeatureStoreMemStats(_fic);
LOG(info,
- "During feature compaction: allocElems=%zu, usedElems=%zu"
- ", deadElems=%zu, holdElems=%zu"
+ "During feature compaction: alloc_entries=%zu, used_entries=%zu"
+ ", dead_entries=%zu, hold_entries=%zu"
", freeBuffers=%u, activeBuffers=%u"
", holdBuffers=%u",
- duringStats._allocElems,
- duringStats._usedElems,
- duringStats._deadElems,
- duringStats._holdElems,
+ duringStats._alloc_entries,
+ duringStats._used_entries,
+ duringStats._dead_entries,
+ duringStats._hold_entries,
duringStats._freeBuffers,
duringStats._activeBuffers,
duringStats._holdBuffers);
@@ -1067,14 +1067,14 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myCommit(_fic, *_pushThreads);
auto afterStats = getFeatureStoreMemStats(_fic);
LOG(info,
- "After feature compaction: allocElems=%zu, usedElems=%zu"
- ", deadElems=%zu, holdElems=%zu"
+ "After feature compaction: alloc_entries=%zu, used_entries=%zu"
+ ", dead_entries=%zu, hold_entries=%zu"
", freeBuffers=%u, activeBuffers=%u"
", holdBuffers=%u",
- afterStats._allocElems,
- afterStats._usedElems,
- afterStats._deadElems,
- afterStats._holdElems,
+ afterStats._alloc_entries,
+ afterStats._used_entries,
+ afterStats._dead_entries,
+ afterStats._hold_entries,
afterStats._freeBuffers,
afterStats._activeBuffers,
afterStats._holdBuffers);
diff --git a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
index 8073fb8d232..388b708f9aa 100644
--- a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
@@ -462,8 +462,8 @@ TEST(MemoryIndexTest, require_that_num_docs_and_doc_id_limit_is_returned)
TEST(MemoryIndexTest, require_that_we_understand_the_memory_footprint)
{
- constexpr size_t BASE_ALLOCATED = 361032u;
- constexpr size_t BASE_USED = 151188u;
+ constexpr size_t BASE_ALLOCATED = 360936u;
+ constexpr size_t BASE_USED = 150932u;
{
MySetup setup;
Index index(setup);
diff --git a/searchlib/src/tests/predicate/document_features_store_test.cpp b/searchlib/src/tests/predicate/document_features_store_test.cpp
index 4ac4bdc32f0..ee247f8c74e 100644
--- a/searchlib/src/tests/predicate/document_features_store_test.cpp
+++ b/searchlib/src/tests/predicate/document_features_store_test.cpp
@@ -165,17 +165,17 @@ TEST("require that both features and ranges are removed by 'remove'") {
TEST("require that both features and ranges counts towards memory usage") {
DocumentFeaturesStore features_store(10);
- EXPECT_EQUAL(50136u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(50088u, features_store.getMemoryUsage().usedBytes());
PredicateTreeAnnotations annotations;
annotations.features.push_back(PredicateHash::hash64("foo=100-199"));
features_store.insert(annotations, doc_id);
- EXPECT_EQUAL(50144u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(50096u, features_store.getMemoryUsage().usedBytes());
annotations.features.clear();
annotations.range_features.push_back({"foo", 100, 199});
features_store.insert(annotations, doc_id + 1);
- EXPECT_EQUAL(50240u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(50192u, features_store.getMemoryUsage().usedBytes());
}
TEST("require that DocumentFeaturesStore can be serialized") {
@@ -205,17 +205,17 @@ TEST("require that serialization cleans up wordstore") {
PredicateTreeAnnotations annotations;
annotations.range_features.push_back({"foo", 100, 199});
features_store.insert(annotations, doc_id);
- EXPECT_EQUAL(50232u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(50184u, features_store.getMemoryUsage().usedBytes());
annotations.range_features.push_back({"bar", 100, 199});
features_store.insert(annotations, doc_id + 1);
- EXPECT_EQUAL(50620u, features_store.getMemoryUsage().usedBytes());
- features_store.remove(doc_id + 1);
EXPECT_EQUAL(50572u, features_store.getMemoryUsage().usedBytes());
+ features_store.remove(doc_id + 1);
+ EXPECT_EQUAL(50524u, features_store.getMemoryUsage().usedBytes());
vespalib::DataBuffer buffer;
features_store.serialize(buffer);
DocumentFeaturesStore features_store2(buffer);
- EXPECT_EQUAL(50232u, features_store2.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(50184u, features_store2.getMemoryUsage().usedBytes());
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp
index ea449300aef..1009fa2fb5f 100644
--- a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp
@@ -51,7 +51,7 @@ template <typename B, typename M>
MultiValueNumericPostingAttribute<B, M>::~MultiValueNumericPostingAttribute()
{
this->disableFreeLists();
- this->disableElemHoldList();
+ this->disable_entry_hold_list();
clearAllPostings();
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp
index cd46bbb5a8a..19840b5a474 100644
--- a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp
@@ -27,7 +27,7 @@ template <typename B, typename T>
MultiValueStringPostingAttributeT<B, T>::~MultiValueStringPostingAttributeT()
{
this->disableFreeLists();
- this->disableElemHoldList();
+ this->disable_entry_hold_list();
clearAllPostings();
}
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
index 29440b6ce43..ecf7a46f21e 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
@@ -58,7 +58,7 @@ protected:
void updatePostings(PostingMap &changePost, const vespalib::datastore::EntryComparator &cmp);
void clearAllPostings();
void disableFreeLists() { _postingList.disableFreeLists(); }
- void disableElemHoldList() { _postingList.disableElemHoldList(); }
+ void disable_entry_hold_list() { _postingList.disable_entry_hold_list(); }
void handle_load_posting_lists_and_update_enum_store(enumstore::EnumeratedPostingsLoader& loader);
bool forwardedOnAddDoc(DocId doc, size_t wantSize, size_t wantCapacity);
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
index 94720212faf..2703201b292 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
@@ -231,7 +231,7 @@ PostingStore<DataT>::dropBitVector(EntryRef &ref)
(void) tree;
(void) docFreq;
_bvs.erase(ref.ref());
- _store.holdElem(iRef, 1);
+ _store.hold_entry(iRef);
_status.decBitVectors();
_bvExtraBytes -= bv->writer().extraByteSize();
ref = ref2;
@@ -267,7 +267,7 @@ PostingStore<DataT>::makeBitVector(EntryRef &ref)
if (_enableOnlyBitVector) {
BTreeType *tree = getWTreeEntry(iRef);
tree->clear(_allocator);
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
} else {
bve->_tree = ref;
}
@@ -590,19 +590,19 @@ PostingStore<DataT>::clear(const EntryRef ref)
assert(isBTree(iRef2));
BTreeType *tree = getWTreeEntry(iRef2);
tree->clear(_allocator);
- _store.holdElem(iRef2, 1);
+ _store.hold_entry(iRef2);
}
_bvs.erase(ref.ref());
_status.decBitVectors();
_bvExtraBytes -= bve->_bv->writer().extraByteSize();
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
} else {
BTreeType *tree = getWTreeEntry(iRef);
tree->clear(_allocator);
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
}
} else {
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
index 1775774171d..de4a7157dae 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
@@ -13,7 +13,7 @@ template <typename B>
SingleValueNumericPostingAttribute<B>::~SingleValueNumericPostingAttribute()
{
this->disableFreeLists();
- this->disableElemHoldList();
+ this->disable_entry_hold_list();
clearAllPostings();
}
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
index eef72984e79..1ec9b54a73b 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
@@ -27,7 +27,7 @@ template <typename B>
SingleValueStringPostingAttributeT<B>::~SingleValueStringPostingAttributeT()
{
this->disableFreeLists();
- this->disableElemHoldList();
+ this->disable_entry_hold_list();
clearAllPostings();
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
index 4be3031303e..8dd76a90b14 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
@@ -55,9 +55,9 @@ template <bool interleaved_features>
FieldIndex<interleaved_features>::~FieldIndex()
{
_postingListStore.disableFreeLists();
- _postingListStore.disableElemHoldList();
+ _postingListStore.disable_entry_hold_list();
_dict.disableFreeLists();
- _dict.disableElemHoldList();
+ _dict.disable_entry_hold_list();
// XXX: Kludge
for (DictionaryTree::Iterator it = _dict.begin();
it.valid(); ++it) {
diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
index 604a467a6e6..a6a82ec09f8 100644
--- a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
@@ -102,7 +102,7 @@ DocumentFeaturesStore::DocumentFeaturesStore(DataBuffer &buffer)
DocumentFeaturesStore::~DocumentFeaturesStore() {
_word_index.disableFreeLists();
- _word_index.disableElemHoldList();
+ _word_index.disable_entry_hold_list();
_word_index.getAllocator().freeze();
_word_index.clear();
}
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
index af809b2fa69..af5aae6e519 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
@@ -95,7 +95,7 @@ PredicateIntervalStore::remove(EntryRef ref) {
// BufferState &state = _store.getBufferState(buffer_id);
// uint32_t type_id = state.getTypeId();
// uint32_t size = type_id <= MAX_ARRAY_SIZE ? type_id : 1;
- // _store.holdElem(ref, size);
+ // _store.hold_entries(ref, size);
}
}
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
index c6f640d72ed..9320488f88e 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
@@ -41,7 +41,7 @@ SimpleIndex<Posting, Key, DocId>::insertIntoVectorPosting(vespalib::datastore::E
template <typename Posting, typename Key, typename DocId>
SimpleIndex<Posting, Key, DocId>::~SimpleIndex() {
_btree_posting_lists.disableFreeLists();
- _btree_posting_lists.disableElemHoldList();
+ _btree_posting_lists.disable_entry_hold_list();
for (auto it = _dictionary.begin(); it.valid(); ++it) {
vespalib::datastore::EntryRef ref(it.getData());
@@ -51,13 +51,13 @@ SimpleIndex<Posting, Key, DocId>::~SimpleIndex() {
}
_vector_posting_lists.disableFreeLists();
- _vector_posting_lists.disableElemHoldList();
+ _vector_posting_lists.disable_entry_hold_list();
_vector_posting_lists.clear();
_vector_posting_lists.getAllocator().freeze();
_vector_posting_lists.getAllocator().reclaim_all_memory();
_dictionary.disableFreeLists();
- _dictionary.disableElemHoldList();
+ _dictionary.disable_entry_hold_list();
_dictionary.clear();
_dictionary.getAllocator().freeze();
_dictionary.getAllocator().reclaim_all_memory();
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
index a2bbbe231c3..c51d0ec7fd3 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
@@ -62,10 +62,10 @@ DenseTensorStore::BufferType::BufferType(const TensorSizeCalc &tensorSizeCalc, s
DenseTensorStore::BufferType::~BufferType() = default;
void
-DenseTensorStore::BufferType::cleanHold(void *buffer, size_t offset,
- ElemCount numElems, CleanContext)
+DenseTensorStore::BufferType::clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext)
{
- memset(static_cast<char *>(buffer) + offset, 0, numElems);
+ auto num_elems = num_entries * getArraySize();
+ memset(static_cast<char *>(buffer) + offset * getArraySize(), 0, num_elems);
}
const vespalib::alloc::MemoryAllocator*
@@ -118,7 +118,7 @@ DenseTensorStore::holdTensor(EntryRef ref)
if (!ref.valid()) {
return;
}
- _concreteStore.holdElem(ref, _tensorSizeCalc.alignedSize());
+ _concreteStore.hold_entry(ref);
}
TensorStore::EntryRef
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
index 9e326e0ab1e..0dd483e7f08 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
@@ -44,7 +44,7 @@ public:
public:
BufferType(const TensorSizeCalc &tensorSizeCalc, std::shared_ptr<vespalib::alloc::MemoryAllocator> allocator);
~BufferType() override;
- void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
+ void clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) override;
const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
};
private:
diff --git a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp
index fa13ab6303c..8526138fd31 100644
--- a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp
@@ -30,11 +30,11 @@ DirectTensorStore::TensorBufferType::TensorBufferType()
}
void
-DirectTensorStore::TensorBufferType::cleanHold(void* buffer, size_t offset, ElemCount num_elems, CleanContext clean_ctx)
+DirectTensorStore::TensorBufferType::clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext clean_ctx)
{
TensorSP* elem = static_cast<TensorSP*>(buffer) + offset;
const auto& empty = empty_entry();
- for (size_t i = 0; i < num_elems; ++i) {
+ for (size_t i = 0; i < num_entries; ++i) {
clean_ctx.extraBytesCleaned((*elem)->get_memory_usage().allocatedBytes());
*elem = empty;
++elem;
@@ -69,7 +69,7 @@ DirectTensorStore::holdTensor(EntryRef ref)
}
const auto& tensor = _tensor_store.getEntry(ref);
assert(tensor);
- _tensor_store.holdElem(ref, 1, tensor->get_memory_usage().allocatedBytes());
+ _tensor_store.hold_entry(ref, tensor->get_memory_usage().allocatedBytes());
}
EntryRef
diff --git a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h
index 01084e89776..1230494fe41 100644
--- a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h
+++ b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h
@@ -20,7 +20,7 @@ namespace search::tensor {
*/
class DirectTensorStore : public TensorStore {
private:
- // Note: Must use SP (instead of UP) because of fallbackCopy() and initializeReservedElements() in BufferType,
+ // Note: Must use SP (instead of UP) because of fallback_copy() and initialize_reserved_entries() in BufferType,
// and implementation of move().
using TensorSP = std::shared_ptr<vespalib::eval::Value>;
using TensorStoreType = vespalib::datastore::DataStore<TensorSP>;
@@ -32,7 +32,7 @@ private:
using CleanContext = typename ParentType::CleanContext;
public:
TensorBufferType();
- void cleanHold(void* buffer, size_t offset, ElemCount num_elems, CleanContext clean_ctx) override;
+ void clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext clean_ctx) override;
};
TensorStoreType _tensor_store;
diff --git a/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp
index 18eba079045..5d3b2206703 100644
--- a/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp
@@ -21,10 +21,10 @@ LargeSubspacesBufferType::LargeSubspacesBufferType(const AllocSpec& spec, std::s
LargeSubspacesBufferType::~LargeSubspacesBufferType() = default;
void
-LargeSubspacesBufferType::cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx)
+LargeSubspacesBufferType::clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx)
{
auto elem = static_cast<ArrayType*>(buffer) + offset;
- for (size_t i = 0; i < numElems; ++i) {
+ for (size_t i = 0; i < num_entries; ++i) {
if (!elem->empty()) {
cleanCtx.extraBytesCleaned(elem->size());
_ops.reclaim_labels({elem->data(), elem->size()});
@@ -35,10 +35,10 @@ LargeSubspacesBufferType::cleanHold(void* buffer, size_t offset, ElemCount numEl
}
void
-LargeSubspacesBufferType::destroyElements(void *buffer, ElemCount numElems)
+LargeSubspacesBufferType::destroy_entries(void *buffer, EntryCount num_entries)
{
auto elem = static_cast<ArrayType*>(buffer);
- for (size_t i = 0; i < numElems; ++i) {
+ for (size_t i = 0; i < num_entries; ++i) {
if (!elem->empty()) {
_ops.reclaim_labels({elem->data(), elem->size()});
ArrayType().swap(*elem);
@@ -48,11 +48,11 @@ LargeSubspacesBufferType::destroyElements(void *buffer, ElemCount numElems)
}
void
-LargeSubspacesBufferType::fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems)
+LargeSubspacesBufferType::fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries)
{
auto old_elems = static_cast<const ArrayType*>(oldBuffer);
auto new_elems = static_cast<ArrayType*>(newBuffer);
- for (size_t i = 0; i < numElems; ++i) {
+ for (size_t i = 0; i < num_entries; ++i) {
auto& old_elem = old_elems[i];
new (new_elems + i) ArrayType(old_elem);
if (!old_elem.empty()) {
@@ -62,12 +62,12 @@ LargeSubspacesBufferType::fallbackCopy(void *newBuffer, const void *oldBuffer, E
}
void
-LargeSubspacesBufferType::initializeReservedElements(void *buffer, ElemCount reservedElements)
+LargeSubspacesBufferType::initialize_reserved_entries(void *buffer, EntryCount reserved_entries)
{
- auto new_elems = static_cast<ArrayType*>(buffer);
+ auto new_entries = static_cast<ArrayType*>(buffer);
const auto& empty = empty_entry();
- for (size_t i = 0; i < reservedElements; ++i) {
- new (new_elems + i) ArrayType(empty);
+ for (size_t i = 0; i < reserved_entries; ++i) {
+ new (new_entries + i) ArrayType(empty);
}
}
diff --git a/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h
index cfab8ef20af..8cce08e9d81 100644
--- a/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h
+++ b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h
@@ -30,10 +30,10 @@ class LargeSubspacesBufferType : public vespalib::datastore::BufferType<vespalib
public:
LargeSubspacesBufferType(const AllocSpec& spec, std::shared_ptr<vespalib::alloc::MemoryAllocator> memory_allocator, TensorBufferTypeMapper& type_mapper) noexcept;
~LargeSubspacesBufferType() override;
- void cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
- void destroyElements(void *buffer, ElemCount numElems) override;
- void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) override;
- void initializeReservedElements(void *buffer, ElemCount reservedElements) override;
+ void clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) override;
+ void destroy_entries(void *buffer, EntryCount num_entries) override;
+ void fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries) override;
+ void initialize_reserved_entries(void *buffer, EntryCount reserved_entries) override;
const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
};
diff --git a/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp
index 8ab42980548..7b54182f062 100644
--- a/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp
@@ -19,45 +19,45 @@ SmallSubspacesBufferType::SmallSubspacesBufferType(uint32_t array_size, const Al
SmallSubspacesBufferType::~SmallSubspacesBufferType() = default;
void
-SmallSubspacesBufferType::cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext)
+SmallSubspacesBufferType::clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext)
{
- char* elem = static_cast<char *>(buffer) + offset;
- while (numElems >= getArraySize()) {
+ char* elem = static_cast<char *>(buffer) + offset * getArraySize();
+ while (num_entries >= 1) {
_ops.reclaim_labels(vespalib::ArrayRef<char>(elem, getArraySize()));
elem += getArraySize();
- numElems -= getArraySize();
+ --num_entries;
}
}
void
-SmallSubspacesBufferType::destroyElements(void *buffer, ElemCount numElems)
+SmallSubspacesBufferType::destroy_entries(void *buffer, EntryCount num_entries)
{
char* elem = static_cast<char *>(buffer);
- while (numElems >= getArraySize()) {
+ while (num_entries >= 1) {
_ops.reclaim_labels(vespalib::ArrayRef<char>(elem, getArraySize()));
elem += getArraySize();
- numElems -= getArraySize();
+ --num_entries;
}
}
void
-SmallSubspacesBufferType::fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems)
+SmallSubspacesBufferType::fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries)
{
- if (numElems > 0) {
- memcpy(newBuffer, oldBuffer, numElems);
+ if (num_entries > 0) {
+ memcpy(newBuffer, oldBuffer, num_entries * getArraySize());
}
const char *elem = static_cast<const char *>(oldBuffer);
- while (numElems >= getArraySize()) {
+ while (num_entries >= 1) {
_ops.copied_labels(unconstify(vespalib::ConstArrayRef<char>(elem, getArraySize())));
elem += getArraySize();
- numElems -= getArraySize();
+ --num_entries;
}
}
void
-SmallSubspacesBufferType::initializeReservedElements(void *buffer, ElemCount reservedElements)
+SmallSubspacesBufferType::initialize_reserved_entries(void *buffer, EntryCount reserved_entries)
{
- memset(buffer, 0, reservedElements);
+ memset(buffer, 0, reserved_entries * getArraySize());
}
const vespalib::alloc::MemoryAllocator*
diff --git a/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h
index 5622e9970b8..2f287ef1f3d 100644
--- a/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h
+++ b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h
@@ -30,10 +30,10 @@ public:
SmallSubspacesBufferType& operator=(SmallSubspacesBufferType&&) noexcept = delete;
SmallSubspacesBufferType(uint32_t array_size, const AllocSpec& spec, std::shared_ptr<vespalib::alloc::MemoryAllocator> memory_allocator, TensorBufferTypeMapper& type_mapper) noexcept;
~SmallSubspacesBufferType() override;
- void cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
- void destroyElements(void *buffer, ElemCount numElems) override;
- void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) override;
- void initializeReservedElements(void *buffer, ElemCount reservedElements) override;
+ void clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) override;
+ void destroy_entries(void *buffer, EntryCount num_entries) override;
+ void fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries) override;
+ void initialize_reserved_entries(void *buffer, EntryCount reserved_entries) override;
const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
};
diff --git a/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp b/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
index 1c0f9ec7a59..e24e97ff2a4 100644
--- a/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
+++ b/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
@@ -49,7 +49,7 @@ struct BTreeLockableMap<T>::ValueTraits {
return store.addEntry(value).ref();
}
static void remove_by_wrapped_value(DataStoreType& store, uint64_t value) noexcept {
- store.holdElem(entry_ref_from_value(value), 1);
+ store.hold_entry(entry_ref_from_value(value));
}
static ValueType unwrap_from_key_value(const DataStoreType& store, [[maybe_unused]] uint64_t key, uint64_t value) {
return store.getEntry(entry_ref_from_value(value));
diff --git a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
index 44decb9bf91..31113f2b4f2 100644
--- a/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
+++ b/vespalib/src/tests/btree/btree-stress/btree_stress_test.cpp
@@ -53,7 +53,7 @@ public:
~RealIntStore();
EntryRef add(uint32_t value) { return _store.addEntry(value); }
AtomicEntryRef add_relaxed(uint32_t value) { return AtomicEntryRef(add(value)); }
- void hold(const AtomicEntryRef& ref) { _store.holdElem(ref.load_relaxed(), 1); }
+ void hold(const AtomicEntryRef& ref) { _store.hold_entry(ref.load_relaxed()); }
EntryRef move(EntryRef ref);
void assign_generation(generation_t current_gen) { _store.assign_generation(current_gen); }
void reclaim_memory(generation_t gen) { _store.reclaim_memory(gen); }
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index b8da9ea6042..386d38bac29 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -1065,7 +1065,7 @@ adjustAllocatedBytes(size_t nodeCount, size_t nodeSize)
TEST_F(BTreeTest, require_that_memory_usage_is_calculated)
{
constexpr size_t BASE_ALLOCATED = 28744u;
- constexpr size_t BASE_USED = 24984;
+ constexpr size_t BASE_USED = 24952;
typedef BTreeNodeAllocator<int32_t, int8_t,
btree::NoAggregated,
MyTraits::INTERNAL_SLOTS, MyTraits::LEAF_SLOTS> NodeAllocator;
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index f1dbb5b132e..8b368d05d90 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -99,14 +99,14 @@ struct ArrayStoreTest : public TestT
}
void assertBufferState(EntryRef ref, const MemStats& expStats) {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
- EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
- EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
+ EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_entries());
+ EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_entries());
}
void assert_buffer_stats(EntryRef ref, const TestBufferStats& exp_stats) {
const auto& state = store.bufferState(ref);
EXPECT_EQ(exp_stats._used, state.size());
- EXPECT_EQ(exp_stats._hold, state.stats().hold_elems());
- EXPECT_EQ(exp_stats._dead, state.stats().dead_elems());
+ EXPECT_EQ(exp_stats._hold, state.stats().hold_entries());
+ EXPECT_EQ(exp_stats._dead, state.stats().dead_entries());
EXPECT_EQ(exp_stats._extra_used, state.stats().extra_used_bytes());
EXPECT_EQ(exp_stats._extra_hold, state.stats().extra_hold_bytes());
}
@@ -214,8 +214,8 @@ TEST_P(NumberStoreTest, control_static_sizes) {
EXPECT_EQ(240u + sizeof_deque, sizeof(NumberStoreTest::ArrayStoreType::DataStoreType));
EXPECT_EQ(104u, sizeof(NumberStoreTest::ArrayStoreType::SmallBufferType));
MemoryUsage usage = store.getMemoryUsage();
- EXPECT_EQ(202120u, usage.allocatedBytes());
- EXPECT_EQ(197752u, usage.usedBytes());
+ EXPECT_EQ(202116u, usage.allocatedBytes());
+ EXPECT_EQ(197688u, usage.usedBytes());
}
TEST_P(NumberStoreTest, add_and_get_small_arrays_of_trivial_type)
@@ -246,15 +246,15 @@ TEST_F(StringStoreTest, add_and_get_large_arrays_of_non_trivial_type)
assertAdd({"ddd", "eee", "ffff", "gggg", "hhhh"});
}
-TEST_P(NumberStoreTest, elements_are_put_on_hold_when_a_small_array_is_removed)
+TEST_P(NumberStoreTest, entries_are_put_on_hold_when_a_small_array_is_removed)
{
EntryRef ref = add({1,2,3});
- assertBufferState(ref, MemStats().used(3).hold(0));
+ assertBufferState(ref, MemStats().used(1).hold(0));
store.remove(ref);
- assertBufferState(ref, MemStats().used(3).hold(3));
+ assertBufferState(ref, MemStats().used(1).hold(1));
}
-TEST_P(NumberStoreTest, elements_are_put_on_hold_when_a_large_array_is_removed)
+TEST_P(NumberStoreTest, entries_are_put_on_hold_when_a_large_array_is_removed)
{
EntryRef ref = add({1,2,3,4});
// Note: The first buffer has the first element reserved -> we expect 2 elements used here.
@@ -319,7 +319,7 @@ test_compaction(NumberStoreBasicTest &f)
f.remove(f.add({5,5}));
f.reclaim_memory();
f.assertBufferState(size1Ref, MemStats().used(1).dead(0));
- f.assertBufferState(size2Ref, MemStats().used(4).dead(2));
+ f.assertBufferState(size2Ref, MemStats().used(2).dead(1));
f.assertBufferState(size3Ref, MemStats().used(2).dead(1)); // Note: First element is reserved
uint32_t size1BufferId = f.getBufferId(size1Ref);
uint32_t size2BufferId = f.getBufferId(size2Ref);
@@ -363,8 +363,8 @@ void testCompaction(NumberStoreTest &f, bool compactMemory, bool compactAddressS
f.remove(f.add({7}));
f.reclaim_memory();
f.assertBufferState(size1Ref, MemStats().used(3).dead(2));
- f.assertBufferState(size2Ref, MemStats().used(2).dead(0));
- f.assertBufferState(size3Ref, MemStats().used(6).dead(3));
+ f.assertBufferState(size2Ref, MemStats().used(1).dead(0));
+ f.assertBufferState(size3Ref, MemStats().used(2).dead(1));
uint32_t size1BufferId = f.getBufferId(size1Ref);
uint32_t size2BufferId = f.getBufferId(size2Ref);
uint32_t size3BufferId = f.getBufferId(size3Ref);
diff --git a/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp b/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp
index 09b2590a5f3..fec8d5949f8 100644
--- a/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp
+++ b/vespalib/src/tests/datastore/buffer_stats/buffer_stats_test.cpp
@@ -9,10 +9,10 @@ using namespace vespalib::datastore;
TEST(BufferStatsTest, buffer_stats_to_memory_stats)
{
InternalBufferStats buf;
- buf.set_alloc_elems(17);
+ buf.set_alloc_entries(17);
buf.pushed_back(7);
- buf.set_dead_elems(5);
- buf.set_hold_elems(3);
+ buf.set_dead_entries(5);
+ buf.set_hold_entries(3);
buf.inc_extra_used_bytes(13);
buf.inc_extra_hold_bytes(11);
@@ -20,10 +20,10 @@ TEST(BufferStatsTest, buffer_stats_to_memory_stats)
constexpr size_t es = 8;
buf.add_to_mem_stats(es, mem);
- EXPECT_EQ(17, mem._allocElems);
- EXPECT_EQ(7, mem._usedElems);
- EXPECT_EQ(5, mem._deadElems);
- EXPECT_EQ(3, mem._holdElems);
+ EXPECT_EQ(17, mem._alloc_entries);
+ EXPECT_EQ(7, mem._used_entries);
+ EXPECT_EQ(5, mem._dead_entries);
+ EXPECT_EQ(3, mem._hold_entries);
EXPECT_EQ(17 * es + 13, mem._allocBytes);
EXPECT_EQ(7 * es + 13, mem._usedBytes);
EXPECT_EQ(5 * es, mem._deadBytes);
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
index c6824ef0e18..9f7535a3676 100644
--- a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -12,35 +12,35 @@ constexpr uint32_t NUM_ENTRIES_FOR_NEW_BUFFER(0);
struct Setup {
uint32_t _min_entries;
- std::atomic<ElemCount> _usedElems;
- ElemCount _neededElems;
- std::atomic<ElemCount> _deadElems;
+ std::atomic<EntryCount> _used_entries;
+ EntryCount _needed_entries;
+ std::atomic<EntryCount> _dead_entries;
uint32_t _bufferId;
float _allocGrowFactor;
bool _resizing;
Setup()
: _min_entries(0),
- _usedElems(0),
- _neededElems(0),
- _deadElems(0),
+ _used_entries(0),
+ _needed_entries(0),
+ _dead_entries(0),
_bufferId(1),
_allocGrowFactor(0.5),
_resizing(false)
{}
Setup(const Setup& rhs) noexcept;
Setup &min_entries(uint32_t value) { _min_entries = value; return *this; }
- Setup &used(size_t value) { _usedElems = value; return *this; }
- Setup &needed(size_t value) { _neededElems = value; return *this; }
- Setup &dead(size_t value) { _deadElems = value; return *this; }
+ Setup &used(size_t value) { _used_entries = value; return *this; }
+ Setup &needed(size_t value) { _needed_entries = value; return *this; }
+ Setup &dead(size_t value) { _dead_entries = value; return *this; }
Setup &bufferId(uint32_t value) { _bufferId = value; return *this; }
Setup &resizing(bool value) { _resizing = value; return *this; }
};
Setup::Setup(const Setup& rhs) noexcept
: _min_entries(rhs._min_entries),
- _usedElems(rhs._usedElems.load(std::memory_order_relaxed)),
- _neededElems(rhs._neededElems),
- _deadElems(rhs._deadElems.load(std::memory_order_relaxed)),
+ _used_entries(rhs._used_entries.load(std::memory_order_relaxed)),
+ _needed_entries(rhs._needed_entries),
+ _dead_entries(rhs._dead_entries.load(std::memory_order_relaxed)),
_bufferId(rhs._bufferId),
_allocGrowFactor(rhs._allocGrowFactor),
_resizing(rhs._resizing)
@@ -61,121 +61,121 @@ struct Fixture {
}
~Fixture() {
for (auto& setup : setups) {
- bufferType.onHold(setup._bufferId, &setup._usedElems, &setup._deadElems);
- bufferType.onFree(setup._usedElems);
+ bufferType.on_hold(setup._bufferId, &setup._used_entries, &setup._dead_entries);
+ bufferType.on_free(setup._used_entries);
}
}
Setup& curr_setup() {
return setups.back();
}
void add_setup(const Setup& setup_in) {
- // The buffer type stores pointers to ElemCount (from Setup) and we must ensure these do not move in memory.
+ // The buffer type stores pointers to EntryCount (from Setup) and we must ensure these do not move in memory.
assert(setups.size() < setups.capacity());
setups.push_back(setup_in);
}
void onActive() {
- bufferType.onActive(curr_setup()._bufferId, &curr_setup()._usedElems, &curr_setup()._deadElems, &buffer[0]);
+ bufferType.on_active(curr_setup()._bufferId, &curr_setup()._used_entries, &curr_setup()._dead_entries, &buffer[0]);
}
- size_t arraysToAlloc() {
- return bufferType.calcArraysToAlloc(curr_setup()._bufferId, curr_setup()._neededElems, curr_setup()._resizing);
+ size_t entries_to_alloc() {
+ return bufferType.calc_entries_to_alloc(curr_setup()._bufferId, curr_setup()._needed_entries, curr_setup()._resizing);
}
- void assertArraysToAlloc(size_t exp) {
+ void assert_entries_to_alloc(size_t exp) {
onActive();
- EXPECT_EQUAL(exp, arraysToAlloc());
+ EXPECT_EQUAL(exp, entries_to_alloc());
}
};
void
-assertArraysToAlloc(size_t exp, const Setup &setup)
+assert_entries_to_alloc(size_t exp, const Setup &setup)
{
Fixture f(setup);
- f.assertArraysToAlloc(exp);
+ f.assert_entries_to_alloc(exp);
}
-TEST("require that complete arrays are allocated")
+TEST("require that entries are allocated")
{
- TEST_DO(assertArraysToAlloc(1, Setup().needed(1)));
- TEST_DO(assertArraysToAlloc(1, Setup().needed(2)));
- TEST_DO(assertArraysToAlloc(1, Setup().needed(3)));
- TEST_DO(assertArraysToAlloc(1, Setup().needed(4)));
- TEST_DO(assertArraysToAlloc(2, Setup().needed(5)));
+ TEST_DO(assert_entries_to_alloc(1, Setup().needed(1)));
+ TEST_DO(assert_entries_to_alloc(2, Setup().needed(2)));
+ TEST_DO(assert_entries_to_alloc(3, Setup().needed(3)));
+ TEST_DO(assert_entries_to_alloc(4, Setup().needed(4)));
+ TEST_DO(assert_entries_to_alloc(5, Setup().needed(5)));
}
-TEST("require that reserved elements are taken into account when not resizing")
+TEST("require that reserved entries are taken into account when not resizing")
{
- TEST_DO(assertArraysToAlloc(2, Setup().needed(1).bufferId(0)));
- TEST_DO(assertArraysToAlloc(2, Setup().needed(4).bufferId(0)));
- TEST_DO(assertArraysToAlloc(3, Setup().needed(5).bufferId(0)));
+ TEST_DO(assert_entries_to_alloc(2, Setup().needed(1).bufferId(0)));
+ TEST_DO(assert_entries_to_alloc(5, Setup().needed(4).bufferId(0)));
+ TEST_DO(assert_entries_to_alloc(6, Setup().needed(5).bufferId(0)));
}
-TEST("require that arrays to alloc is based on currently used elements (no resizing)")
+TEST("require that entries to alloc is based on currently used entries (no resizing)")
{
- TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(4)));
- TEST_DO(assertArraysToAlloc(4, Setup().used(8 * 4).needed(4)));
+ TEST_DO(assert_entries_to_alloc(2, Setup().used(4).needed(1)));
+ TEST_DO(assert_entries_to_alloc(4, Setup().used(8).needed(1)));
}
-TEST("require that arrays to alloc is based on currently used elements (with resizing)")
+TEST("require that entries to alloc is based on currently used entries (with resizing)")
{
- TEST_DO(assertArraysToAlloc(4 + 2, Setup().used(4 * 4).needed(4).resizing(true)));
- TEST_DO(assertArraysToAlloc(8 + 4, Setup().used(8 * 4).needed(4).resizing(true)));
- TEST_DO(assertArraysToAlloc(4 + 3, Setup().used(4 * 4).needed(3 * 4).resizing(true)));
+ TEST_DO(assert_entries_to_alloc(4 + 2, Setup().used(4).needed(1).resizing(true)));
+ TEST_DO(assert_entries_to_alloc(8 + 4, Setup().used(8).needed(1).resizing(true)));
+ TEST_DO(assert_entries_to_alloc(4 + 3, Setup().used(4).needed(3).resizing(true)));
}
-TEST("require that arrays to alloc always contain elements needed")
+TEST("require that entries to alloc always contain entries needed")
{
- TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(2 * 4)));
- TEST_DO(assertArraysToAlloc(3, Setup().used(4 * 4).needed(3 * 4)));
- TEST_DO(assertArraysToAlloc(4, Setup().used(4 * 4).needed(4 * 4)));
+ TEST_DO(assert_entries_to_alloc(2, Setup().used(4).needed(2)));
+ TEST_DO(assert_entries_to_alloc(3, Setup().used(4).needed(3)));
+ TEST_DO(assert_entries_to_alloc(4, Setup().used(4).needed(4)));
}
-TEST("require that arrays to alloc is capped to max arrays")
+TEST("require that entries to alloc is capped to max entries")
{
- TEST_DO(assertArraysToAlloc(127, Setup().used(254 * 4).needed(4)));
- TEST_DO(assertArraysToAlloc(128, Setup().used(256 * 4).needed(4)));
- TEST_DO(assertArraysToAlloc(128, Setup().used(258 * 4).needed(8)));
+ TEST_DO(assert_entries_to_alloc(127, Setup().used(254).needed(1)));
+ TEST_DO(assert_entries_to_alloc(128, Setup().used(256).needed(1)));
+ TEST_DO(assert_entries_to_alloc(128, Setup().used(258).needed(2)));
}
TEST("require that arrays to alloc is capped to min arrays")
{
- TEST_DO(assertArraysToAlloc(16, Setup().used(30 * 4).needed(4).min_entries(16)));
- TEST_DO(assertArraysToAlloc(16, Setup().used(32 * 4).needed(4).min_entries(16)));
- TEST_DO(assertArraysToAlloc(17, Setup().used(34 * 4).needed(4).min_entries(16)));
+ TEST_DO(assert_entries_to_alloc(16, Setup().used(30).needed(1).min_entries(16)));
+ TEST_DO(assert_entries_to_alloc(16, Setup().used(32).needed(1).min_entries(16)));
+ TEST_DO(assert_entries_to_alloc(17, Setup().used(34).needed(1).min_entries(16)));
}
-TEST("arrays to alloc considers used elements across all active buffers of same type (no resizing)")
+TEST("entries to alloc considers used entries across all active buffers of same type (no resizing)")
{
- Fixture f(Setup().used(6 * 4));
- f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4).bufferId(2));
- f.assertArraysToAlloc((6 + 8) * 0.5);
- f.add_setup(Setup().used(10 * 4).bufferId(3));
- f.assertArraysToAlloc((6 + 8 + 10) * 0.5);
+ Fixture f(Setup().used(6));
+ f.assert_entries_to_alloc(6 * 0.5);
+ f.add_setup(Setup().used(8).bufferId(2));
+ f.assert_entries_to_alloc((6 + 8) * 0.5);
+ f.add_setup(Setup().used(10).bufferId(3));
+ f.assert_entries_to_alloc((6 + 8 + 10) * 0.5);
}
-TEST("arrays to alloc considers used elements across all active buffers of same type when resizing")
+TEST("entries to alloc considers used entries across all active buffers of same type when resizing")
{
- Fixture f(Setup().used(6 * 4));
- f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4).resizing(true).bufferId(2));
- f.assertArraysToAlloc(8 + (6 + 8) * 0.5);
+ Fixture f(Setup().used(6));
+ f.assert_entries_to_alloc(6 * 0.5);
+ f.add_setup(Setup().used(8).resizing(true).bufferId(2));
+ f.assert_entries_to_alloc(8 + (6 + 8) * 0.5);
}
-TEST("arrays to alloc considers (and subtracts) dead elements across all active buffers of same type (no resizing)")
+TEST("entries to alloc considers (and subtracts) dead entries across all active buffers of same type (no resizing)")
{
- Fixture f(Setup().used(6 * 4).dead(2 * 4));
- f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4).bufferId(2));
- f.assertArraysToAlloc((6 - 2 + 12 - 4) * 0.5);
- f.add_setup(Setup().used(20 * 4).dead(6 * 4).bufferId(3));
- f.assertArraysToAlloc((6 - 2 + 12 - 4 + 20 - 6) * 0.5);
+ Fixture f(Setup().used(6).dead(2));
+ f.assert_entries_to_alloc((6 - 2) * 0.5);
+ f.add_setup(Setup().used(12).dead(4).bufferId(2));
+ f.assert_entries_to_alloc((6 - 2 + 12 - 4) * 0.5);
+ f.add_setup(Setup().used(20).dead(6).bufferId(3));
+ f.assert_entries_to_alloc((6 - 2 + 12 - 4 + 20 - 6) * 0.5);
}
TEST("arrays to alloc considers (and subtracts) dead elements across all active buffers of same type when resizing")
{
- Fixture f(Setup().used(6 * 4).dead(2 * 4));
- f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true).bufferId(2));
- f.assertArraysToAlloc(12 + (6 - 2 + 12 - 4) * 0.5);
+ Fixture f(Setup().used(6).dead(2));
+ f.assert_entries_to_alloc((6 - 2) * 0.5);
+ f.add_setup(Setup().used(12).dead(4).resizing(true).bufferId(2));
+ f.assert_entries_to_alloc(12 + (6 - 2 + 12 - 4) * 0.5);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 533c06634b1..f108c15c98c 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -26,8 +26,8 @@ public:
void holdBuffer(uint32_t bufferId) {
ParentType::holdBuffer(bufferId);
}
- void holdElem(EntryRef ref, uint64_t len) {
- ParentType::holdElem(ref, len);
+ void hold_entry(EntryRef ref) {
+ ParentType::hold_entry(ref);
}
void assign_generation(generation_t current_gen) {
ParentType::assign_generation(current_gen);
@@ -143,10 +143,10 @@ void
assertMemStats(const MemoryStats &exp,
const MemoryStats &act)
{
- EXPECT_EQ(exp._allocElems, act._allocElems);
- EXPECT_EQ(exp._usedElems, act._usedElems);
- EXPECT_EQ(exp._deadElems, act._deadElems);
- EXPECT_EQ(exp._holdElems, act._holdElems);
+ EXPECT_EQ(exp._alloc_entries, act._alloc_entries);
+ EXPECT_EQ(exp._used_entries, act._used_entries);
+ EXPECT_EQ(exp._dead_entries, act._dead_entries);
+ EXPECT_EQ(exp._hold_entries, act._hold_entries);
EXPECT_EQ(exp._freeBuffers, act._freeBuffers);
EXPECT_EQ(exp._activeBuffers, act._activeBuffers);
EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
@@ -304,13 +304,13 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
{
MyStore s;
MyRef r1 = s.addEntry(1);
- s.holdElem(r1, 1);
+ s.hold_entry(r1);
s.assign_generation(10);
MyRef r2 = s.addEntry(2);
- s.holdElem(r2, 1);
+ s.hold_entry(r2);
s.assign_generation(20);
MyRef r3 = s.addEntry(3);
- s.holdElem(r3, 1);
+ s.hold_entry(r3);
s.assign_generation(30);
EXPECT_EQ(1, s.getEntry(r1));
EXPECT_EQ(2, s.getEntry(r2));
@@ -358,11 +358,11 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists)
MyStore s;
s.enableFreeLists();
auto r1 = s.addEntry(1);
- s.holdElem(r1, 1);
+ s.hold_entry(r1);
s.assign_generation(10);
auto r2 = s.addEntry(2);
expect_successive_refs(r1, r2);
- s.holdElem(r2, 1);
+ s.hold_entry(r2);
s.assign_generation(20);
s.reclaim_entry_refs(11);
auto r3 = s.addEntry(3); // reuse r1
@@ -392,8 +392,8 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
auto h1 = allocator.alloc(1);
auto h2 = allocator.alloc(1);
expect_successive_handles(h1, h2);
- s.holdElem(h1.ref, 3);
- s.holdElem(h2.ref, 3);
+ s.hold_entry(h1.ref);
+ s.hold_entry(h2.ref);
s.assign_generation(10);
s.reclaim_entry_refs(11);
@@ -412,10 +412,10 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
MemoryStats m;
- m._allocElems = MyRef::offsetSize();
- m._usedElems = 1; // ref = 0 is reserved
- m._deadElems = 1; // ref = 0 is reserved
- m._holdElems = 0;
+ m._alloc_entries = MyRef::offsetSize();
+ m._used_entries = 1; // ref = 0 is reserved
+ m._dead_entries = 1; // ref = 0 is reserved
+ m._hold_entries = 0;
m._activeBuffers = 1;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
@@ -423,7 +423,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
// add entry
MyRef r = s.addEntry(10);
- m._usedElems++;
+ m._used_entries++;
assertMemStats(m, s.getMemStats());
// hold buffer
@@ -431,9 +431,9 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
s.addEntry(30);
s.holdBuffer(r.bufferId());
s.assign_generation(100);
- m._usedElems += 2;
- m._holdElems = m._usedElems;
- m._deadElems = 0;
+ m._used_entries += 2;
+ m._hold_entries = m._used_entries;
+ m._dead_entries = 0;
m._activeBuffers--;
m._holdBuffers++;
assertMemStats(m, s.getMemStats());
@@ -441,17 +441,17 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
// new active buffer
s.switch_primary_buffer();
s.addEntry(40);
- m._allocElems += MyRef::offsetSize();
- m._usedElems++;
+ m._alloc_entries += MyRef::offsetSize();
+ m._used_entries++;
m._activeBuffers++;
m._freeBuffers--;
// trim hold buffer
s.reclaim_memory(101);
- m._allocElems -= MyRef::offsetSize();
- m._usedElems = 1;
- m._deadElems = 0;
- m._holdElems = 0;
+ m._alloc_entries -= MyRef::offsetSize();
+ m._used_entries = 1;
+ m._dead_entries = 0;
+ m._hold_entries = 0;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
assertMemStats(m, s.getMemStats());
@@ -466,7 +466,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{ // increase extra hold bytes
auto prev_stats = s.getMemStats();
- s.get_active_buffer_state().hold_elems(0, 30);
+ s.get_active_buffer_state().hold_entries(0, 30);
auto curr_stats = s.getMemStats();
EXPECT_EQ(prev_stats._holdBytes + 30, curr_stats._holdBytes);
}
@@ -475,7 +475,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
TEST(DataStoreTest, require_that_memory_usage_is_calculated)
{
constexpr size_t BASE_ALLOCATED = 4228;
- constexpr size_t BASE_USED = 308;
+ constexpr size_t BASE_USED = 292;
MyStore s;
MyRef r = s.addEntry(10);
s.addEntry(20);
@@ -494,7 +494,7 @@ TEST(DataStoreTest, require_that_memory_usage_is_calculated)
TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
{
constexpr size_t BASE_ALLOCATED = 4228;
- constexpr size_t BASE_USED = 308;
+ constexpr size_t BASE_USED = 292;
MyStore s;
MyRef r1 = s.addEntry(10);
MyRef r2 = s.addEntry(20);
@@ -505,14 +505,14 @@ TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
EXPECT_EQ(4 * sizeof(int) + BASE_USED, m.usedBytes());
EXPECT_EQ(1 * sizeof(int), m.deadBytes());
EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
- s.holdElem(r1, 1);
+ s.hold_entry(r1);
m = s.getMemoryUsage();
EXPECT_EQ(MyRef::offsetSize() * sizeof(int) + BASE_ALLOCATED, m.allocatedBytes());
EXPECT_EQ(4 * sizeof(int) + BASE_USED, m.usedBytes());
EXPECT_EQ(1 * sizeof(int), m.deadBytes());
EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
- s.disableElemHoldList();
- s.holdElem(r2, 1);
+ s.disable_entry_hold_list();
+ s.hold_entry(r2);
m = s.getMemoryUsage();
EXPECT_EQ(MyRef::offsetSize() * sizeof(int) + BASE_ALLOCATED, m.allocatedBytes());
EXPECT_EQ(4 * sizeof(int) + BASE_USED, m.usedBytes());
@@ -596,12 +596,12 @@ TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_alloc
* 4) Cap bytes to alloc to the max offset EntryRef can handle.
* The max bytes to alloc is: max_entries * arraySize * elementSize.
*/
- assertGrowStats<uint8_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint8_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
- assertGrowStats<uint8_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
- assertGrowStats<uint32_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint32_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
- assertGrowStats<uint32_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
+ assertGrowStats<uint8_t>({2730,5461,5461,21845,21845,32768,32768,32768,32768,32768,32768,32768}, 3);
+ assertGrowStats<uint8_t>({3276,3276,13107,13107,26214,26214,32768,32768,32768,32768,32768,32768}, 5);
+ assertGrowStats<uint8_t>({2340,4681,4681,18724,18724,32768,32768,32768,32768,32768,32768,32768}, 7);
+ assertGrowStats<uint32_t>({2730,5461,5461,21845,21845,32768,32768,32768,32768,32768,32768,32768}, 3);
+ assertGrowStats<uint32_t>({3276,3276,13107,13107,26214,26214,32768,32768,32768,32768,32768,32768}, 5);
+ assertGrowStats<uint32_t>({2340,4681,4681,18724,18724,32768,32768,32768,32768,32768,32768,32768}, 7);
}
namespace {
@@ -669,7 +669,7 @@ TEST(DataStoreTest, control_static_sizes) {
EXPECT_EQ(24, sizeof(FreeList));
EXPECT_EQ(56, sizeof(BufferFreeList));
EXPECT_EQ(1, sizeof(BufferState::State));
- EXPECT_EQ(144, sizeof(BufferState));
+ EXPECT_EQ(128, sizeof(BufferState));
BufferState bs;
EXPECT_EQ(0, bs.size());
}
@@ -685,11 +685,11 @@ void test_free_element_to_held_buffer(bool before_hold_buffer)
EXPECT_EQ(1u, s.primary_buffer_id());
if (before_hold_buffer) {
- s.holdElem(ref, 1);
+ s.hold_entry(ref);
}
s.holdBuffer(0); // hold last buffer
if (!before_hold_buffer) {
- ASSERT_DEATH({ s.holdElem(ref, 1); }, "isActive\\(\\)");
+ ASSERT_DEATH({ s.hold_entry(ref); }, "isActive\\(\\)");
}
s.assign_generation(100);
s.reclaim_memory(101);
diff --git a/vespalib/src/tests/datastore/free_list/free_list_test.cpp b/vespalib/src/tests/datastore/free_list/free_list_test.cpp
index 44e11b2316b..8532c7bc4c7 100644
--- a/vespalib/src/tests/datastore/free_list/free_list_test.cpp
+++ b/vespalib/src/tests/datastore/free_list/free_list_test.cpp
@@ -13,14 +13,14 @@ constexpr uint32_t array_size = 6;
struct FreeListTest : public testing::Test
{
FreeList list;
- std::atomic<ElemCount> dead_elems;
+ std::atomic<EntryCount> dead_entries;
std::vector<BufferFreeList> bufs;
FreeListTest()
: list(),
bufs()
{
for (size_t i = 0; i < 3; ++i) {
- bufs.emplace_back(dead_elems);
+ bufs.emplace_back(dead_entries);
bufs.back().set_array_size(array_size);
}
}
@@ -126,13 +126,13 @@ TEST_F(FreeListTest, buffer_free_list_can_be_disabled_and_detached_when_not_curr
EXPECT_TRUE(list.empty());
}
-TEST_F(FreeListTest, dead_elems_count_is_updated_when_popping_an_entry)
+TEST_F(FreeListTest, dead_entries_count_is_updated_when_popping_an_entry)
{
enable(0);
push_entry({10, 0});
- dead_elems.store(18, std::memory_order_relaxed);
+ dead_entries.store(18, std::memory_order_relaxed);
pop_entry();
- EXPECT_EQ(18 - array_size, dead_elems.load(std::memory_order_relaxed));
+ EXPECT_EQ(17, dead_entries.load(std::memory_order_relaxed));
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
index 5ccf9a8908c..4f917079516 100644
--- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -96,8 +96,8 @@ struct TestBase : public ::testing::Test {
}
void assertBufferState(EntryRef ref, const TestBufferStats expStats) const {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
- EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
- EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
+ EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_entries());
+ EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_entries());
}
void assertStoreContent() const {
for (const auto &elem : refStore) {
@@ -147,10 +147,7 @@ struct TestBase : public ::testing::Test {
auto getBuilder(uint32_t uniqueValuesHint) { return store.getBuilder(uniqueValuesHint); }
auto getEnumerator(bool sort_unique_values) { return store.getEnumerator(sort_unique_values); }
size_t get_reserved(EntryRef ref) {
- return store.bufferState(ref).getTypeHandler()->getReservedElements(getBufferId(ref));
- }
- size_t get_array_size(EntryRef ref) {
- return store.bufferState(ref).getArraySize();
+ return store.bufferState(ref).getTypeHandler()->get_reserved_entries(getBufferId(ref));
}
};
@@ -309,29 +306,27 @@ TYPED_TEST(TestBase, can_add_and_get_values)
}
}
-TYPED_TEST(TestBase, elements_are_put_on_hold_when_value_is_removed)
+TYPED_TEST(TestBase, entries_are_put_on_hold_when_value_is_removed)
{
EntryRef ref = this->add(this->values()[0]);
size_t reserved = this->get_reserved(ref);
- size_t array_size = this->get_array_size(ref);
- this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(1 + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(1 + reserved).hold(1).dead(reserved));
}
-TYPED_TEST(TestBase, elements_are_reference_counted)
+TYPED_TEST(TestBase, entries_are_reference_counted)
{
EntryRef ref = this->add(this->values()[0]);
EntryRef ref2 = this->add(this->values()[0]);
EXPECT_EQ(ref.ref(), ref2.ref());
- // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ // Note: The first buffer have the first entry reserved -> we expect 2 entries used here.
size_t reserved = this->get_reserved(ref);
- size_t array_size = this->get_array_size(ref);
- this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(1 + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(1 + reserved).hold(0).dead(reserved));
this->store.remove(ref);
- this->assertBufferState(ref, TestBufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
+ this->assertBufferState(ref, TestBufferStats().used(1 + reserved).hold(1).dead(reserved));
}
TEST_F(SmallOffsetNumberTest, new_underlying_buffer_is_allocated_when_current_is_full)
@@ -360,8 +355,7 @@ TYPED_TEST(TestBase, store_can_be_compacted)
this->remove(this->add(this->values()[2]));
this->reclaim_memory();
size_t reserved = this->get_reserved(val0Ref);
- size_t array_size = this->get_array_size(val0Ref);
- this->assertBufferState(val0Ref, TestBufferStats().used(reserved + 3 * array_size).dead(reserved + array_size));
+ this->assertBufferState(val0Ref, TestBufferStats().used(reserved + 3).dead(reserved + 1));
uint32_t val1BufferId = this->getBufferId(val0Ref);
EXPECT_EQ(2u, this->refStore.size());
@@ -389,8 +383,7 @@ TYPED_TEST(TestBase, store_can_be_instantiated_with_builder)
EntryRef val0Ref = builder.mapEnumValueToEntryRef(1);
EntryRef val1Ref = builder.mapEnumValueToEntryRef(2);
size_t reserved = this->get_reserved(val0Ref);
- size_t array_size = this->get_array_size(val0Ref);
- this->assertBufferState(val0Ref, TestBufferStats().used(2 * array_size + reserved).dead(reserved)); // Note: First element is reserved
+ this->assertBufferState(val0Ref, TestBufferStats().used(2 + reserved).dead(reserved)); // Note: First entry is reserved
EXPECT_TRUE(val0Ref.valid());
EXPECT_TRUE(val1Ref.valid());
EXPECT_NE(val0Ref.ref(), val1Ref.ref());
@@ -472,13 +465,13 @@ TEST_F(DoubleTest, nan_is_handled)
TEST_F(DoubleTest, control_memory_usage) {
static constexpr size_t sizeof_deque = vespalib::datastore::DataStoreBase::sizeof_entry_ref_hold_list_deque;
EXPECT_EQ(368u + sizeof_deque, sizeof(store));
- EXPECT_EQ(144u, sizeof(BufferState));
+ EXPECT_EQ(128u, sizeof(BufferState));
EXPECT_EQ(28740u, store.get_values_memory_usage().allocatedBytes());
- EXPECT_EQ(24804u, store.get_values_memory_usage().usedBytes());
+ EXPECT_EQ(24788u, store.get_values_memory_usage().usedBytes());
EXPECT_EQ(126952u, store.get_dictionary_memory_usage().allocatedBytes());
- EXPECT_EQ(25248u, store.get_dictionary_memory_usage().usedBytes());
+ EXPECT_EQ(25216u, store.get_dictionary_memory_usage().usedBytes());
EXPECT_EQ(155692u, store.getMemoryUsage().allocatedBytes());
- EXPECT_EQ(50052, store.getMemoryUsage().usedBytes());
+ EXPECT_EQ(50004, store.getMemoryUsage().usedBytes());
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
index 7d4451556c8..8ea7f807f56 100644
--- a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
@@ -62,8 +62,8 @@ struct TestBase : public ::testing::Test {
void assert_buffer_state(EntryRef ref, const TestBufferStats expStats) {
auto & stats = buffer_state(ref).stats();
EXPECT_EQ(expStats._used, buffer_state(ref).size());
- EXPECT_EQ(expStats._hold, stats.hold_elems());
- EXPECT_EQ(expStats._dead, stats.dead_elems());
+ EXPECT_EQ(expStats._hold, stats.hold_entries());
+ EXPECT_EQ(expStats._dead, stats.dead_entries());
EXPECT_EQ(expStats._extra_used, stats.extra_used_bytes());
EXPECT_EQ(expStats._extra_hold, stats.extra_hold_bytes());
}
@@ -83,14 +83,14 @@ TEST_F(StringTest, can_add_and_get_values)
assert_add(spaces1000.c_str());
}
-TEST_F(StringTest, elements_are_put_on_hold_when_value_is_removed)
+TEST_F(StringTest, entries_are_put_on_hold_when_value_is_removed)
{
EntryRef ref = add(small.c_str());
- assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(0));
+ assert_buffer_state(ref, TestBufferStats().used(1).hold(0).dead(0));
remove(ref);
- assert_buffer_state(ref, TestBufferStats().used(16).hold(16).dead(0));
+ assert_buffer_state(ref, TestBufferStats().used(1).hold(1).dead(0));
reclaim_memory();
- assert_buffer_state(ref, TestBufferStats().used(16).hold(0).dead(16));
+ assert_buffer_state(ref, TestBufferStats().used(1).hold(0).dead(1));
}
TEST_F(StringTest, extra_bytes_used_is_tracked)
@@ -139,7 +139,7 @@ TEST_F(StringTest, free_list_is_used_when_enabled)
EntryRef ref4 = add(spaces1000.c_str());
EXPECT_EQ(ref1, ref3);
EXPECT_EQ(ref2, ref4);
- assert_buffer_state(ref1, TestBufferStats().used(16).hold(0).dead(0));
+ assert_buffer_state(ref1, TestBufferStats().used(1).hold(0).dead(0));
assert_buffer_state(ref2, TestBufferStats().used(2).hold(0).dead(1).extra_used(1001));
}
@@ -155,7 +155,7 @@ TEST_F(StringTest, free_list_is_not_used_when_disabled)
EntryRef ref4 = add(spaces1000.c_str());
EXPECT_NE(ref1, ref3);
EXPECT_NE(ref2, ref4);
- assert_buffer_state(ref1, TestBufferStats().used(32).hold(0).dead(16));
+ assert_buffer_state(ref1, TestBufferStats().used(2).hold(0).dead(1));
assert_buffer_state(ref2, TestBufferStats().used(3).hold(0).dead(2).extra_used(1001));
}
@@ -173,7 +173,7 @@ TEST_F(StringTest, free_list_is_never_used_for_move_on_compact)
EntryRef ref6 = move_on_compact(ref2);
EXPECT_NE(ref5, ref3);
EXPECT_NE(ref6, ref4);
- assert_buffer_state(ref1, TestBufferStats().used(48).hold(0).dead(16));
+ assert_buffer_state(ref1, TestBufferStats().used(3).hold(0).dead(1));
assert_buffer_state(ref2, TestBufferStats().used(4).hold(0).dead(2).extra_used(2002));
}
diff --git a/vespalib/src/vespa/vespalib/btree/btree.h b/vespalib/src/vespa/vespalib/btree/btree.h
index 32b538b65ec..c2f5aac01b7 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.h
+++ b/vespalib/src/vespa/vespalib/btree/btree.h
@@ -61,9 +61,9 @@ public:
}
void
- disableElemHoldList()
+ disable_entry_hold_list()
{
- _alloc.disableElemHoldList();
+ _alloc.disable_entry_hold_list();
}
// Inherit doc from BTreeRoot
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index 784e95e3817..b537602c703 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -60,8 +60,8 @@ public:
_nodeStore.disableFreeLists();
}
- void disableElemHoldList() {
- _nodeStore.disableElemHoldList();
+ void disable_entry_hold_list() {
+ _nodeStore.disable_entry_hold_list();
}
/**
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
index a38b68afe73..d23c8fc2054 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.hpp
@@ -162,7 +162,7 @@ holdNode(BTreeNode::Ref nodeRef,
InternalNodeType *node)
{
if (node->getFrozen()) {
- _nodeStore.holdElem(nodeRef);
+ _nodeStore.hold_entry(nodeRef);
} else {
node->clean();
_internalHoldUntilFreeze.push_back(nodeRef);
@@ -178,7 +178,7 @@ holdNode(BTreeNode::Ref nodeRef,
LeafNodeType *node)
{
if (node->getFrozen()) {
- _nodeStore.holdElem(nodeRef);
+ _nodeStore.hold_entry(nodeRef);
} else {
node->clean();
_leafHoldUntilFreeze.push_back(nodeRef);
@@ -235,7 +235,7 @@ freeze()
InternalNodeType *inode = mapInternalRef(i);
(void) inode;
assert(inode->getFrozen());
- _nodeStore.holdElem(i);
+ _nodeStore.hold_entry(i);
}
_internalHoldUntilFreeze.clear();
}
@@ -245,7 +245,7 @@ freeze()
LeafNodeType *lnode = mapLeafRef(i);
(void) lnode;
assert(lnode->getFrozen());
- _nodeStore.holdElem(i);
+ _nodeStore.hold_entry(i);
}
_leafHoldUntilFreeze.clear();
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
index 77ebfc96546..38bf4e5ed4e 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
@@ -29,16 +29,16 @@ class BTreeNodeBufferType : public datastore::BufferType<EntryType, FrozenBtreeN
using ParentType = datastore::BufferType<EntryType, FrozenBtreeNode<EntryType>>;
using ParentType::empty_entry;
using ParentType::_arraySize;
- using ElemCount = typename ParentType::ElemCount;
+ using EntryCount = typename ParentType::EntryCount;
using CleanContext = typename ParentType::CleanContext;
public:
BTreeNodeBufferType(uint32_t min_entries, uint32_t max_entries)
: ParentType(1, min_entries, max_entries)
{ }
- void initializeReservedElements(void *buffer, ElemCount reservedElements) override;
+ void initialize_reserved_entries(void *buffer, EntryCount reserved_entries) override;
- void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
+ void clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) override;
};
@@ -79,7 +79,7 @@ public:
~BTreeNodeStore();
void disableFreeLists() { _store.disableFreeLists(); }
- void disableElemHoldList() { _store.disableElemHoldList(); }
+ void disable_entry_hold_list() { _store.disable_entry_hold_list(); }
static bool isValidRef(EntryRef ref) { return ref.valid(); }
@@ -152,8 +152,8 @@ public:
return _store.freeListAllocator<InternalNodeType, BTreeNodeReclaimer>(NODETYPE_INTERNAL).alloc(rhs);
}
- void holdElem(EntryRef ref) {
- _store.holdElem(ref, 1);
+ void hold_entry(EntryRef ref) {
+ _store.hold_entry(ref);
}
std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
index a1ffb4d445d..99054f35d61 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
@@ -11,11 +11,11 @@ namespace vespalib::btree {
template <typename EntryType>
void
-BTreeNodeBufferType<EntryType>::initializeReservedElements(void *buffer, ElemCount reservedElements)
+BTreeNodeBufferType<EntryType>::initialize_reserved_entries(void *buffer, EntryCount reserved_entries)
{
- ParentType::initializeReservedElements(buffer, reservedElements);
+ ParentType::initialize_reserved_entries(buffer, reserved_entries);
EntryType *e = static_cast<EntryType *>(buffer);
- for (size_t j = reservedElements; j != 0; --j) {
+ for (size_t j = reserved_entries; j != 0; --j) {
e->freeze();
++e;
}
@@ -24,10 +24,10 @@ BTreeNodeBufferType<EntryType>::initializeReservedElements(void *buffer, ElemCou
template <typename EntryType>
void
-BTreeNodeBufferType<EntryType>::cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext)
+BTreeNodeBufferType<EntryType>::clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext)
{
EntryType *e = static_cast<EntryType *>(buffer) + offset;
- for (size_t j = numElems; j != 0; --j) {
+ for (size_t j = num_entries; j != 0; --j) {
e->cleanFrozen();
++e;
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index fc991b2e295..7dd839f1529 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -105,9 +105,9 @@ public:
_allocator.disableFreeLists();
}
- void disableElemHoldList() {
- _store.disableElemHoldList();
- _allocator.disableElemHoldList();
+ void disable_entry_hold_list() {
+ _store.disable_entry_hold_list();
+ _allocator.disable_entry_hold_list();
}
BTreeTypeRefPair allocNewBTree() {
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index 7a5334593b8..90c302af5e4 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -156,7 +156,7 @@ makeTree(EntryRef &ref,
lNode->freeze();
BTreeTypeRefPair tPair(allocBTree());
tPair.data->setRoots(lPair.ref);
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
ref = tPair.ref;
}
@@ -176,7 +176,7 @@ makeArray(EntryRef &ref, EntryRef root, LeafNodeType *leafNode)
kd->setData(leafNode->getData(idx));
}
assert(kd == kPair.data + clusterSize);
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
if (!leafNode->getFrozen()) {
leafNode->freeze();
}
@@ -255,7 +255,7 @@ insert(EntryRef &ref,
kd->setData(i->getData());
}
assert(kd == kPair.data + clusterSize + 1);
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
ref = kPair.ref;
return true;
}
@@ -284,7 +284,7 @@ insert(EntryRef &ref,
lNode->freeze();
BTreeTypeRefPair tPair(allocBTree());
tPair.data->setRoots(lPair.ref); // allow immediate access to readers
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
ref = tPair.ref;
return true;
#endif
@@ -339,7 +339,7 @@ remove(EntryRef &ref,
if (oldi == olde || comp(key, oldi->_key))
return false; // not found
if (clusterSize == 1) {
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
ref = EntryRef();
return true;
}
@@ -357,7 +357,7 @@ remove(EntryRef &ref,
kd->setData(i->getData());
}
assert(kd == kPair.data + clusterSize - 1);
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
ref = kPair.ref;
return true;
}
@@ -670,7 +670,7 @@ applyCluster(EntryRef &ref,
if (newSizeMin <= clusterLimit) {
uint32_t newSize = getNewClusterSize(ob, oe, a, ae, r, re, comp);
if (newSize == 0) {
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
ref = EntryRef();
return true;
}
@@ -678,7 +678,7 @@ applyCluster(EntryRef &ref,
KeyDataTypeRefPair kPair(allocKeyData(newSize));
applyCluster(ob, oe, kPair.data, kPair.data + newSize,
a, ae, r, re, comp);
- _store.holdElem(ref, clusterSize);
+ _store.hold_entry(ref);
ref = kPair.ref;
return true;
}
@@ -735,7 +735,7 @@ normalizeTree(EntryRef &ref,
{
EntryRef root = tree->getRoot();
if (!NodeAllocatorType::isValidRef(root)) {
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
ref = EntryRef();
return;
}
@@ -798,10 +798,8 @@ clear(const EntryRef ref)
if (clusterSize == 0) {
BTreeType *tree = getWTreeEntry(iRef);
tree->clear(_allocator);
- _store.holdElem(ref, 1);
- } else {
- _store.holdElem(ref, clusterSize);
}
+ _store.hold_entry(ref);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/allocator.hpp b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
index 83087a3286c..fa97ef9a5f5 100644
--- a/vespalib/src/vespa/vespalib/datastore/allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
@@ -41,14 +41,12 @@ Allocator<EntryT, RefT>::allocArray(ConstArrayRef array)
BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
assert(state.getArraySize() == array.size());
- size_t oldBufferSize = state.size();
- assert((oldBufferSize % array.size()) == 0);
- RefT ref((oldBufferSize / array.size()), buffer_id);
+ RefT ref(state.size(), buffer_id);
EntryT *buf = _store.template getEntryArray<EntryT>(ref, array.size());
for (size_t i = 0; i < array.size(); ++i) {
new (static_cast<void *>(buf + i)) EntryT(array[i]);
}
- state.stats().pushed_back(array.size());
+ state.stats().pushed_back(1);
return HandleType(ref, buf);
}
@@ -60,15 +58,13 @@ Allocator<EntryT, RefT>::allocArray()
uint32_t buffer_id = _store.primary_buffer_id(_typeId);
BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
- size_t oldBufferSize = state.size();
- auto size = state.getArraySize();
- assert((oldBufferSize % size) == 0);
- RefT ref((oldBufferSize / size), buffer_id);
- EntryT *buf = _store.template getEntryArray<EntryT>(ref, size);
- for (size_t i = 0; i < size; ++i) {
+ RefT ref(state.size(), buffer_id);
+ auto array_size = state.getArraySize();
+ EntryT *buf = _store.template getEntryArray<EntryT>(ref, array_size);
+ for (size_t i = 0; i < array_size; ++i) {
new (static_cast<void *>(buf + i)) EntryT();
}
- state.stats().pushed_back(size);
+ state.stats().pushed_back(1);
return HandleType(ref, buf);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index 40d750b399d..8e9fe779ba9 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -148,10 +148,9 @@ ArrayStore<ElemT, RefT, TypeMapperT>::remove(EntryRef ref)
RefT internalRef(ref);
uint32_t typeId = _store.getTypeId(internalRef.bufferId());
if (typeId != _largeArrayTypeId) {
- size_t arraySize = _mapper.get_array_size(typeId);
- _store.holdElem(ref, arraySize);
+ _store.hold_entry(ref);
} else {
- _store.holdElem(ref, 1, sizeof(ElemT) * get(ref).size());
+ _store.hold_entry(ref, sizeof(ElemT) * get(ref).size());
}
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_free_list.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_free_list.cpp
index 224ed4b0c8f..a44959c7811 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_free_list.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_free_list.cpp
@@ -20,8 +20,8 @@ BufferFreeList::detach()
_free_list->detach(*this);
}
-BufferFreeList::BufferFreeList(std::atomic<ElemCount>& dead_elems)
- : _dead_elems(dead_elems),
+BufferFreeList::BufferFreeList(std::atomic<EntryCount>& dead_entries)
+ : _dead_entries(dead_entries),
_array_size(0),
_free_list(),
_free_refs()
@@ -66,7 +66,7 @@ BufferFreeList::pop_entry() {
if (empty()) {
detach();
}
- _dead_elems.store(_dead_elems.load(std::memory_order_relaxed) - _array_size, std::memory_order_relaxed);
+ _dead_entries.store(_dead_entries.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
return ret;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_free_list.h b/vespalib/src/vespa/vespalib/datastore/buffer_free_list.h
index 148ddd8db88..c570c8e3103 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_free_list.h
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_free_list.h
@@ -19,7 +19,7 @@ class BufferFreeList {
private:
using EntryRefArray = vespalib::Array<EntryRef>;
- std::atomic<ElemCount>& _dead_elems;
+ std::atomic<EntryCount>& _dead_entries;
uint32_t _array_size;
FreeList* _free_list;
EntryRefArray _free_refs;
@@ -28,7 +28,7 @@ private:
void detach();
public:
- BufferFreeList(std::atomic<ElemCount>& dead_elems);
+ BufferFreeList(std::atomic<EntryCount>& dead_entrie);
~BufferFreeList();
BufferFreeList(BufferFreeList&&) = default; // Needed for emplace_back() during setup.
BufferFreeList(const BufferFreeList&) = delete;
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
index 8d97414626e..0d96e3f6d47 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_stats.cpp
@@ -6,27 +6,27 @@
namespace vespalib::datastore {
BufferStats::BufferStats()
- : _alloc_elems(0),
- _used_elems(0),
- _hold_elems(0),
- _dead_elems(0),
+ : _alloc_entries(0),
+ _used_entries(0),
+ _hold_entries(0),
+ _dead_entries(0),
_extra_used_bytes(0),
_extra_hold_bytes(0)
{
}
void
-BufferStats::add_to_mem_stats(size_t element_size, MemoryStats& stats) const
+BufferStats::add_to_mem_stats(size_t entry_size, MemoryStats& stats) const
{
size_t extra_used = extra_used_bytes();
- stats._allocElems += capacity();
- stats._usedElems += size();
- stats._deadElems += dead_elems();
- stats._holdElems += hold_elems();
- stats._allocBytes += (capacity() * element_size) + extra_used;
- stats._usedBytes += (size() * element_size) + extra_used;
- stats._deadBytes += dead_elems() * element_size;
- stats._holdBytes += (hold_elems() * element_size) + extra_hold_bytes();
+ stats._alloc_entries += capacity();
+ stats._used_entries += size();
+ stats._dead_entries += dead_entries();
+ stats._hold_entries += hold_entries();
+ stats._allocBytes += (capacity() * entry_size) + extra_used;
+ stats._usedBytes += (size() * entry_size) + extra_used;
+ stats._deadBytes += dead_entries() * entry_size;
+ stats._holdBytes += (hold_entries() * entry_size) + extra_hold_bytes();
}
InternalBufferStats::InternalBufferStats()
@@ -37,20 +37,20 @@ InternalBufferStats::InternalBufferStats()
void
InternalBufferStats::clear()
{
- _alloc_elems.store(0, std::memory_order_relaxed);
- _used_elems.store(0, std::memory_order_relaxed);
- _hold_elems.store(0, std::memory_order_relaxed);
- _dead_elems.store(0, std::memory_order_relaxed);
+ _alloc_entries.store(0, std::memory_order_relaxed);
+ _used_entries.store(0, std::memory_order_relaxed);
+ _hold_entries.store(0, std::memory_order_relaxed);
+ _dead_entries.store(0, std::memory_order_relaxed);
_extra_used_bytes.store(0, std::memory_order_relaxed);
_extra_hold_bytes.store(0, std::memory_order_relaxed);
}
void
-InternalBufferStats::dec_hold_elems(size_t value)
+InternalBufferStats::dec_hold_entries(size_t value)
{
- ElemCount elems = hold_elems();
+ EntryCount elems = hold_entries();
assert(elems >= value);
- _hold_elems.store(elems - value, std::memory_order_relaxed);
+ _hold_entries.store(elems - value, std::memory_order_relaxed);
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_stats.h b/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
index 66f8b532c41..1974efa97ec 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_stats.h
@@ -13,16 +13,16 @@ namespace vespalib::datastore {
*/
class BufferStats {
protected:
- // The number of elements that are allocated in the buffer.
- std::atomic<ElemCount> _alloc_elems;
- // The number of elements (of the allocated) that are used: _used_elems <= _alloc_elems.
- std::atomic<ElemCount> _used_elems;
- // The number of elements (of the used) that are on hold: _hold_elems <= _used_elems.
- // "On hold" is a transitionary state used when removing elements.
- std::atomic<ElemCount> _hold_elems;
- // The number of elements (of the used) that are dead: _dead_elems <= _used_elems.
- // A dead element was first on hold, and is now available for reuse in the free list (if enabled).
- std::atomic<ElemCount> _dead_elems;
+ // The number of entries that are allocated in the buffer.
+ std::atomic<EntryCount> _alloc_entries;
+ // The number of entries (of the allocated) that are used: _used_entries <= _alloc_entries.
+ std::atomic<EntryCount> _used_entries;
+ // The number of entries (of the used) that are on hold: _hold_entries <= _used_entries.
+ // "On hold" is a transitionary state used when removing entries.
+ std::atomic<EntryCount> _hold_entries;
+ // The number of entries (of the used) that are dead: _dead_entries <= _used_entries.
+ // A dead entry was first on hold, and is now available for reuse in the free list (if enabled).
+ std::atomic<EntryCount> _dead_entries;
// Number of bytes that are heap allocated (and used) by elements that are stored in this buffer.
// For simple types this is always 0.
@@ -34,22 +34,22 @@ protected:
public:
BufferStats();
- size_t size() const { return _used_elems.load(std::memory_order_relaxed); }
- size_t capacity() const { return _alloc_elems.load(std::memory_order_relaxed); }
+ size_t size() const { return _used_entries.load(std::memory_order_relaxed); }
+ size_t capacity() const { return _alloc_entries.load(std::memory_order_relaxed); }
size_t remaining() const { return capacity() - size(); }
- void pushed_back(size_t num_elems) {
- _used_elems.store(size() + num_elems, std::memory_order_relaxed);
+ void pushed_back(size_t num_entries) {
+ _used_entries.store(size() + num_entries, std::memory_order_relaxed);
}
- size_t dead_elems() const { return _dead_elems.load(std::memory_order_relaxed); }
- size_t hold_elems() const { return _hold_elems.load(std::memory_order_relaxed); }
+ size_t dead_entries() const { return _dead_entries.load(std::memory_order_relaxed); }
+ size_t hold_entries() const { return _hold_entries.load(std::memory_order_relaxed); }
size_t extra_used_bytes() const { return _extra_used_bytes.load(std::memory_order_relaxed); }
size_t extra_hold_bytes() const { return _extra_hold_bytes.load(std::memory_order_relaxed); }
void inc_extra_used_bytes(size_t value) { _extra_used_bytes.store(extra_used_bytes() + value, std::memory_order_relaxed); }
- void add_to_mem_stats(size_t element_size, MemoryStats& stats) const;
+ void add_to_mem_stats(size_t entry_size, MemoryStats& stats) const;
};
/**
@@ -59,15 +59,15 @@ class InternalBufferStats : public BufferStats {
public:
InternalBufferStats();
void clear();
- void set_alloc_elems(size_t value) { _alloc_elems.store(value, std::memory_order_relaxed); }
- void set_dead_elems(size_t value) { _dead_elems.store(value, std::memory_order_relaxed); }
- void set_hold_elems(size_t value) { _hold_elems.store(value, std::memory_order_relaxed); }
- void inc_dead_elems(size_t value) { _dead_elems.store(dead_elems() + value, std::memory_order_relaxed); }
- void inc_hold_elems(size_t value) { _hold_elems.store(hold_elems() + value, std::memory_order_relaxed); }
- void dec_hold_elems(size_t value);
+ void set_alloc_entries(size_t value) { _alloc_entries.store(value, std::memory_order_relaxed); }
+ void set_dead_entries(size_t value) { _dead_entries.store(value, std::memory_order_relaxed); }
+ void set_hold_entries(size_t value) { _hold_entries.store(value, std::memory_order_relaxed); }
+ void inc_dead_entries(size_t value) { _dead_entries.store(dead_entries() + value, std::memory_order_relaxed); }
+ void inc_hold_entries(size_t value) { _hold_entries.store(hold_entries() + value, std::memory_order_relaxed); }
+ void dec_hold_entries(size_t value);
void inc_extra_hold_bytes(size_t value) { _extra_hold_bytes.store(extra_hold_bytes() + value, std::memory_order_relaxed); }
- std::atomic<ElemCount>& used_elems_ref() { return _used_elems; }
- std::atomic<ElemCount>& dead_elems_ref() { return _dead_elems; }
+ std::atomic<EntryCount>& used_entries_ref() { return _used_entries; }
+ std::atomic<EntryCount>& dead_entries_ref() { return _dead_entries; }
std::atomic<size_t>& extra_used_bytes_ref() { return _extra_used_bytes; }
std::atomic<size_t>& extra_hold_bytes_ref() { return _extra_hold_bytes; }
};
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
index d7abee3f993..0d43ede9e62 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
@@ -37,7 +37,7 @@ BufferTypeBase::BufferTypeBase(uint32_t arraySize,
_num_entries_for_new_buffer(std::min(num_entries_for_new_buffer, max_entries)),
_allocGrowFactor(allocGrowFactor),
_holdBuffers(0),
- _holdUsedElems(0),
+ _hold_used_entries(0),
_aggr_counts(),
_active_buffers()
{
@@ -53,59 +53,59 @@ BufferTypeBase::BufferTypeBase(uint32_t arraySize,
BufferTypeBase::~BufferTypeBase()
{
assert(_holdBuffers == 0);
- assert(_holdUsedElems == 0);
+ assert(_hold_used_entries == 0);
assert(_aggr_counts.empty());
assert(_active_buffers.empty());
}
-ElemCount
-BufferTypeBase::getReservedElements(uint32_t bufferId) const
+EntryCount
+BufferTypeBase::get_reserved_entries(uint32_t bufferId) const
{
- return bufferId == 0 ? _arraySize : 0u;
+ return bufferId == 0 ? 1u : 0u;
}
void
-BufferTypeBase::onActive(uint32_t bufferId, std::atomic<ElemCount>* usedElems, std::atomic<ElemCount>* deadElems, void* buffer)
+BufferTypeBase::on_active(uint32_t bufferId, std::atomic<EntryCount>* used_entries, std::atomic<EntryCount>* dead_entries, void* buffer)
{
- _aggr_counts.add_buffer(usedElems, deadElems);
+ _aggr_counts.add_buffer(used_entries, dead_entries);
assert(std::find(_active_buffers.begin(), _active_buffers.end(), bufferId) == _active_buffers.end());
_active_buffers.emplace_back(bufferId);
- size_t reservedElems = getReservedElements(bufferId);
- if (reservedElems != 0u) {
- initializeReservedElements(buffer, reservedElems);
- *usedElems = reservedElems;
- *deadElems = reservedElems;
+ auto reserved_entries = get_reserved_entries(bufferId);
+ if (reserved_entries != 0u) {
+ initialize_reserved_entries(buffer, reserved_entries);
+ *used_entries = reserved_entries;
+ *dead_entries = reserved_entries;
}
}
void
-BufferTypeBase::onHold(uint32_t buffer_id, const std::atomic<ElemCount>* usedElems, const std::atomic<ElemCount>* deadElems)
+BufferTypeBase::on_hold(uint32_t buffer_id, const std::atomic<EntryCount>* used_entries, const std::atomic<EntryCount>* dead_entries)
{
++_holdBuffers;
auto itr = std::find(_active_buffers.begin(), _active_buffers.end(), buffer_id);
assert(itr != _active_buffers.end());
_active_buffers.erase(itr);
- _aggr_counts.remove_buffer(usedElems, deadElems);
- _holdUsedElems += *usedElems;
+ _aggr_counts.remove_buffer(used_entries, dead_entries);
+ _hold_used_entries += *used_entries;
}
void
-BufferTypeBase::onFree(ElemCount usedElems)
+BufferTypeBase::on_free(EntryCount used_entries)
{
--_holdBuffers;
- assert(_holdUsedElems >= usedElems);
- _holdUsedElems -= usedElems;
+ assert(_hold_used_entries >= used_entries);
+ _hold_used_entries -= used_entries;
}
void
-BufferTypeBase::resume_primary_buffer(uint32_t buffer_id, std::atomic<ElemCount>* used_elems, std::atomic<ElemCount>* dead_elems)
+BufferTypeBase::resume_primary_buffer(uint32_t buffer_id, std::atomic<EntryCount>* used_entries, std::atomic<EntryCount>* dead_entries)
{
auto itr = std::find(_active_buffers.begin(), _active_buffers.end(), buffer_id);
assert(itr != _active_buffers.end());
_active_buffers.erase(itr);
_active_buffers.emplace_back(buffer_id);
- _aggr_counts.remove_buffer(used_elems, dead_elems);
- _aggr_counts.add_buffer(used_elems, dead_elems);
+ _aggr_counts.remove_buffer(used_entries, dead_entries);
+ _aggr_counts.add_buffer(used_entries, dead_entries);
}
const alloc::MemoryAllocator*
@@ -115,7 +115,7 @@ BufferTypeBase::get_memory_allocator() const
}
void
-BufferTypeBase::clampMaxArrays(uint32_t max_entries)
+BufferTypeBase::clamp_max_entries(uint32_t max_entries)
{
_max_entries = std::min(_max_entries, max_entries);
_min_entries = std::min(_min_entries, _max_entries);
@@ -123,9 +123,9 @@ BufferTypeBase::clampMaxArrays(uint32_t max_entries)
}
size_t
-BufferTypeBase::calcArraysToAlloc(uint32_t bufferId, ElemCount elemsNeeded, bool resizing) const
+BufferTypeBase::calc_entries_to_alloc(uint32_t bufferId, EntryCount free_entries_needed, bool resizing) const
{
- size_t reservedElems = getReservedElements(bufferId);
+ size_t reserved_entries = get_reserved_entries(bufferId);
BufferCounts last_bc;
BufferCounts bc;
if (resizing) {
@@ -134,40 +134,37 @@ BufferTypeBase::calcArraysToAlloc(uint32_t bufferId, ElemCount elemsNeeded, bool
}
}
bc = _aggr_counts.all_buffers();
- assert((bc.used_elems % _arraySize) == 0);
- assert((bc.dead_elems % _arraySize) == 0);
- assert(bc.used_elems >= bc.dead_elems);
- size_t neededArrays = (elemsNeeded + (resizing ? last_bc.used_elems : reservedElems) + _arraySize - 1) / _arraySize;
-
- size_t liveArrays = (bc.used_elems - bc.dead_elems) / _arraySize;
- size_t growArrays = (liveArrays * _allocGrowFactor);
- size_t usedArrays = last_bc.used_elems / _arraySize;
- size_t wantedArrays = std::max((resizing ? usedArrays : 0u) + growArrays,
- static_cast<size_t>(_min_entries));
-
- size_t result = wantedArrays;
- if (result < neededArrays) {
- result = neededArrays;
+ assert(bc.used_entries >= bc.dead_entries);
+ size_t needed_entries = static_cast<size_t>(free_entries_needed) + (resizing ? last_bc.used_entries : reserved_entries);
+ size_t live_entries = (bc.used_entries - bc.dead_entries);
+ size_t grow_entries = (live_entries * _allocGrowFactor);
+ size_t used_entries = last_bc.used_entries;
+ size_t wanted_entries = std::max((resizing ? used_entries : 0u) + grow_entries,
+ static_cast<size_t>(_min_entries));
+
+ size_t result = wanted_entries;
+ if (result < needed_entries) {
+ result = needed_entries;
}
if (result > _max_entries) {
result = _max_entries;
}
- if (result < neededArrays) {
+ if (result < needed_entries) {
vespalib::asciistream s;
s << "BufferTypeBase::calcArraysToAlloc(" <<
"bufferId=" << bufferId <<
- ",elemsNeeeded=" << elemsNeeded <<
+ ",free_entries_needed=" << free_entries_needed <<
",resizing=" << (resizing ? "true" : "false") << ")" <<
- " wantedArrays=" << wantedArrays <<
+ " wanted_entries=" << wanted_entries <<
", _arraySize=" << _arraySize <<
", _max_entries=" << _max_entries <<
- ", reservedElems=" << reservedElems <<
- ", liveArrays=" << liveArrays <<
- ", growArrays=" << growArrays <<
- ", usedArrays=" << usedArrays <<
+ ", reserved_entries=" << reserved_entries <<
+ ", live_entries=" << live_entries <<
+ ", grow_entries=" << grow_entries <<
+ ", used_entries=" << used_entries <<
", typeid(*this).name=\"" << typeid(*this).name() << "\"" <<
- ", newArrays=" << result <<
- " < neededArrays=" << neededArrays;;
+ ", new_entries=" << result <<
+ " < needed_entries=" << needed_entries;
throw vespalib::OverflowException(s.c_str());
}
return result;
@@ -194,22 +191,22 @@ BufferTypeBase::AggregatedBufferCounts::AggregatedBufferCounts()
}
void
-BufferTypeBase::AggregatedBufferCounts::add_buffer(const std::atomic<ElemCount>* used_elems, const std::atomic<ElemCount>* dead_elems)
+BufferTypeBase::AggregatedBufferCounts::add_buffer(const std::atomic<EntryCount>* used_entries, const std::atomic<EntryCount>* dead_entries)
{
for (const auto& elem : _counts) {
- assert(elem.used_ptr != used_elems);
- assert(elem.dead_ptr != dead_elems);
+ assert(elem.used_ptr != used_entries);
+ assert(elem.dead_ptr != dead_entries);
}
- _counts.emplace_back(used_elems, dead_elems);
+ _counts.emplace_back(used_entries, dead_entries);
}
void
-BufferTypeBase::AggregatedBufferCounts::remove_buffer(const std::atomic<ElemCount>* used_elems, const std::atomic<ElemCount>* dead_elems)
+BufferTypeBase::AggregatedBufferCounts::remove_buffer(const std::atomic<EntryCount>* used_entries, const std::atomic<EntryCount>* dead_entries)
{
auto itr = std::find_if(_counts.begin(), _counts.end(),
- [=](const auto& elem){ return elem.used_ptr == used_elems; });
+ [=](const auto& elem){ return elem.used_ptr == used_entries; });
assert(itr != _counts.end());
- assert(itr->dead_ptr == dead_elems);
+ assert(itr->dead_ptr == dead_entries);
_counts.erase(itr);
}
@@ -219,8 +216,8 @@ BufferTypeBase::AggregatedBufferCounts::last_buffer() const
BufferCounts result;
assert(!_counts.empty());
const auto& last = _counts.back();
- result.used_elems += last.used_ptr->load(std::memory_order_relaxed);
- result.dead_elems += last.dead_ptr->load(std::memory_order_relaxed);
+ result.used_entries += last.used_ptr->load(std::memory_order_relaxed);
+ result.dead_entries += last.dead_ptr->load(std::memory_order_relaxed);
return result;
}
@@ -229,8 +226,8 @@ BufferTypeBase::AggregatedBufferCounts::all_buffers() const
{
BufferCounts result;
for (const auto& elem : _counts) {
- result.used_elems += elem.used_ptr->load(std::memory_order_relaxed);
- result.dead_elems += elem.dead_ptr->load(std::memory_order_relaxed);
+ result.used_entries += elem.used_ptr->load(std::memory_order_relaxed);
+ result.dead_entries += elem.dead_ptr->load(std::memory_order_relaxed);
}
return result;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.h b/vespalib/src/vespa/vespalib/datastore/buffer_type.h
index 40b78d2bb9b..ea52b026228 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.h
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.h
@@ -10,7 +10,7 @@ namespace vespalib::alloc { class MemoryAllocator; }
namespace vespalib::datastore {
-using ElemCount = uint64_t;
+using EntryCount = uint32_t;
/**
* Abstract class used to manage allocation and de-allocation of a specific data type in underlying memory buffers in a data store.
@@ -22,7 +22,7 @@ using ElemCount = uint64_t;
class BufferTypeBase
{
public:
- using ElemCount = vespalib::datastore::ElemCount;
+ using EntryCount = vespalib::datastore::EntryCount;
class CleanContext {
private:
std::atomic<size_t> &_extraUsedBytes;
@@ -43,49 +43,48 @@ public:
BufferTypeBase(uint32_t arraySize, uint32_t min_entries, uint32_t max_entries,
uint32_t num_entries_for_new_buffer, float allocGrowFactor) noexcept;
virtual ~BufferTypeBase();
- virtual void destroyElements(void *buffer, ElemCount numElems) = 0;
- virtual void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) = 0;
+ virtual void destroy_entries(void *buffer, EntryCount num_entries) = 0;
+ virtual void fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries) = 0;
/**
- * Return number of reserved elements at start of buffer, to avoid
- * invalid reference and handle data at negative offset (alignment
- * hacks) as used by dense tensor store.
+ * Return number of reserved entries at start of buffer, to avoid
+ * invalid reference.
*/
- virtual ElemCount getReservedElements(uint32_t bufferId) const;
+ virtual EntryCount get_reserved_entries(uint32_t bufferId) const;
/**
* Initialize reserved elements at start of buffer.
*/
- virtual void initializeReservedElements(void *buffer, ElemCount reservedElements) = 0;
- virtual size_t elementSize() const = 0;
- virtual void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) = 0;
+ virtual void initialize_reserved_entries(void *buffer, EntryCount reserved_entries) = 0;
+ virtual size_t entry_size() const = 0; // Size of entry measured in bytes
+ virtual void clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) = 0;
size_t getArraySize() const { return _arraySize; }
- virtual void onActive(uint32_t bufferId, std::atomic<ElemCount>* usedElems, std::atomic<ElemCount>* deadElems, void* buffer);
- void onHold(uint32_t buffer_id, const std::atomic<ElemCount>* usedElems, const std::atomic<ElemCount>* deadElems);
- virtual void onFree(ElemCount usedElems);
- void resume_primary_buffer(uint32_t buffer_id, std::atomic<ElemCount>* used_elems, std::atomic<ElemCount>* dead_elems);
+ virtual void on_active(uint32_t bufferId, std::atomic<EntryCount>* used_entries, std::atomic<EntryCount>* dead_entries, void* buffer);
+ void on_hold(uint32_t buffer_id, const std::atomic<EntryCount>* used_entries, const std::atomic<EntryCount>* dead_entries);
+ virtual void on_free(EntryCount used_entries);
+ void resume_primary_buffer(uint32_t buffer_id, std::atomic<EntryCount>* used_entries, std::atomic<EntryCount>* dead_entries);
virtual const alloc::MemoryAllocator* get_memory_allocator() const;
/**
- * Calculate number of arrays to allocate for new buffer given how many elements are needed.
+ * Calculate number of entries to allocate for new buffer given how many free entries are needed.
*/
- virtual size_t calcArraysToAlloc(uint32_t bufferId, ElemCount elementsNeeded, bool resizing) const;
+ virtual size_t calc_entries_to_alloc(uint32_t bufferId, EntryCount free_entries_needed, bool resizing) const;
- void clampMaxArrays(uint32_t max_entries);
+ void clamp_max_entries(uint32_t max_entries);
uint32_t get_active_buffers_count() const { return _active_buffers.size(); }
const std::vector<uint32_t>& get_active_buffers() const noexcept { return _active_buffers; }
- size_t getMaxArrays() const { return _max_entries; }
+ size_t get_max_entries() const { return _max_entries; }
uint32_t get_scaled_num_entries_for_new_buffer() const;
uint32_t get_num_entries_for_new_buffer() const noexcept { return _num_entries_for_new_buffer; }
protected:
struct BufferCounts {
- ElemCount used_elems;
- ElemCount dead_elems;
- BufferCounts() : used_elems(0), dead_elems(0) {}
- BufferCounts(ElemCount used_elems_in, ElemCount dead_elems_in)
- : used_elems(used_elems_in), dead_elems(dead_elems_in)
+ EntryCount used_entries;
+ EntryCount dead_entries;
+ BufferCounts() : used_entries(0), dead_entries(0) {}
+ BufferCounts(EntryCount used_entries_in, EntryCount dead_entries_in)
+ : used_entries(used_entries_in), dead_entries(dead_entries_in)
{}
};
@@ -94,33 +93,33 @@ protected:
*/
class AggregatedBufferCounts {
private:
- struct Element {
- const std::atomic<ElemCount>* used_ptr;
- const std::atomic<ElemCount>* dead_ptr;
- Element() noexcept : used_ptr(nullptr), dead_ptr(nullptr) {}
- Element(const std::atomic<ElemCount>* used_ptr_in, const std::atomic<ElemCount>* dead_ptr_in) noexcept
+ struct ActiveBufferCounts {
+ const std::atomic<EntryCount>* used_ptr;
+ const std::atomic<EntryCount>* dead_ptr;
+ ActiveBufferCounts() noexcept : used_ptr(nullptr), dead_ptr(nullptr) {}
+ ActiveBufferCounts(const std::atomic<EntryCount>* used_ptr_in, const std::atomic<EntryCount>* dead_ptr_in) noexcept
: used_ptr(used_ptr_in), dead_ptr(dead_ptr_in)
{}
};
- std::vector<Element> _counts;
+ std::vector<ActiveBufferCounts> _counts;
public:
AggregatedBufferCounts();
- void add_buffer(const std::atomic<ElemCount>* used_elems, const std::atomic<ElemCount>* dead_elems);
- void remove_buffer(const std::atomic<ElemCount>* used_elems, const std::atomic<ElemCount>* dead_elems);
+ void add_buffer(const std::atomic<EntryCount>* used_entries, const std::atomic<EntryCount>* dead_entries);
+ void remove_buffer(const std::atomic<EntryCount>* used_entries, const std::atomic<EntryCount>* dead_entries);
BufferCounts last_buffer() const;
BufferCounts all_buffers() const;
bool empty() const { return _counts.empty(); }
};
uint32_t _arraySize; // Number of elements in an allocation unit
- uint32_t _min_entries; // Minimum number of arrays to allocate in a buffer
- uint32_t _max_entries; // Maximum number of arrays to allocate in a buffer
- // Number of arrays needed before allocating a new buffer instead of just resizing the first one
+ uint32_t _min_entries; // Minimum number of entries to allocate in a buffer
+ uint32_t _max_entries; // Maximum number of entries to allocate in a buffer
+ // Number of entries needed before allocating a new buffer instead of just resizing the first one
uint32_t _num_entries_for_new_buffer;
float _allocGrowFactor;
uint32_t _holdBuffers;
- size_t _holdUsedElems; // Number of used elements in all held buffers for this type.
+ size_t _hold_used_entries; // Number of used entries in all held buffers for this type.
AggregatedBufferCounts _aggr_counts;
std::vector<uint32_t> _active_buffers;
};
@@ -147,11 +146,11 @@ public:
BufferType(uint32_t arraySize, uint32_t min_entries, uint32_t max_entries,
uint32_t num_entries_for_new_buffer, float allocGrowFactor) noexcept;
~BufferType() override;
- void destroyElements(void *buffer, ElemCount numElems) override;
- void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) override;
- void initializeReservedElements(void *buffer, ElemCount reservedElements) override;
- void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCxt) override;
- size_t elementSize() const override { return sizeof(ElemType); }
+ void destroy_entries(void *buffer, EntryCount num_entries) override;
+ void fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries) override;
+ void initialize_reserved_entries(void *buffer, EntryCount reserved_entries) override;
+ void clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext cleanCxt) override;
+ size_t entry_size() const override { return sizeof(ElemType) * _arraySize; }
};
extern template class BufferType<char>;
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.hpp b/vespalib/src/vespa/vespalib/datastore/buffer_type.hpp
index 304b558dac2..60acca5ff39 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.hpp
@@ -22,10 +22,11 @@ BufferType<ElemT, EmptyT>::~BufferType() = default;
template <typename ElemT, typename EmptyT>
void
-BufferType<ElemT, EmptyT>::destroyElements(void *buffer, ElemCount numElems)
+BufferType<ElemT, EmptyT>::destroy_entries(void *buffer, EntryCount num_entries)
{
+ auto num_elems = num_entries * getArraySize();
ElemType *e = static_cast<ElemType *>(buffer);
- for (size_t j = numElems; j != 0; --j) {
+ for (size_t j = num_elems; j != 0; --j) {
e->~ElemType();
++e;
}
@@ -33,11 +34,12 @@ BufferType<ElemT, EmptyT>::destroyElements(void *buffer, ElemCount numElems)
template <typename ElemT, typename EmptyT>
void
-BufferType<ElemT, EmptyT>::fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems)
+BufferType<ElemT, EmptyT>::fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries)
{
+ auto num_elems = num_entries * getArraySize();
ElemType *d = static_cast<ElemType *>(newBuffer);
const ElemType *s = static_cast<const ElemType *>(oldBuffer);
- for (size_t j = numElems; j != 0; --j) {
+ for (size_t j = num_elems; j != 0; --j) {
new (static_cast<void *>(d)) ElemType(*s);
++s;
++d;
@@ -46,11 +48,12 @@ BufferType<ElemT, EmptyT>::fallbackCopy(void *newBuffer, const void *oldBuffer,
template <typename ElemT, typename EmptyT>
void
-BufferType<ElemT, EmptyT>::initializeReservedElements(void *buffer, ElemCount reservedElems)
+BufferType<ElemT, EmptyT>::initialize_reserved_entries(void *buffer, EntryCount reserved_entries)
{
+ auto reserved_elems = reserved_entries * getArraySize();
ElemType *e = static_cast<ElemType *>(buffer);
const auto& empty = empty_entry();
- for (size_t j = reservedElems; j != 0; --j) {
+ for (size_t j = reserved_elems; j != 0; --j) {
new (static_cast<void *>(e)) ElemType(empty);
++e;
}
@@ -58,11 +61,12 @@ BufferType<ElemT, EmptyT>::initializeReservedElements(void *buffer, ElemCount re
template <typename ElemT, typename EmptyT>
void
-BufferType<ElemT, EmptyT>::cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext)
+BufferType<ElemT, EmptyT>::clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext)
{
- ElemType *e = static_cast<ElemType *>(buffer) + offset;
+ auto num_elems = num_entries * getArraySize();
+ ElemType *e = static_cast<ElemType *>(buffer) + offset * getArraySize();
const auto& empty = empty_entry();
- for (size_t j = numElems; j != 0; --j) {
+ for (size_t j = num_elems; j != 0; --j) {
*e = empty;
++e;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
index 47fba1ef697..10617289018 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
@@ -12,13 +12,13 @@ namespace vespalib::datastore {
BufferState::BufferState()
: _stats(),
- _free_list(_stats.dead_elems_ref()),
+ _free_list(_stats.dead_entries_ref()),
_typeHandler(nullptr),
_buffer(Alloc::alloc(0, MemoryAllocator::HUGEPAGE_SIZE)),
_arraySize(0),
_typeId(0),
_state(State::FREE),
- _disableElemHoldList(false),
+ _disable_entry_hold_list(false),
_compacting(false)
{
}
@@ -28,15 +28,15 @@ BufferState::~BufferState()
assert(getState() == State::FREE);
assert(!_free_list.enabled());
assert(_free_list.empty());
- assert(_stats.hold_elems() == 0);
+ assert(_stats.hold_entries() == 0);
}
namespace {
struct AllocResult {
- size_t elements;
+ size_t entries;
size_t bytes;
- AllocResult(size_t elements_, size_t bytes_) : elements(elements_), bytes(bytes_) {}
+ AllocResult(size_t entries_, size_t bytes_) : entries(entries_), bytes(bytes_) {}
};
size_t
@@ -57,30 +57,30 @@ roundUpToMatchAllocator(size_t sz)
}
AllocResult
-calcAllocation(uint32_t bufferId,
- BufferTypeBase &typeHandler,
- size_t elementsNeeded,
- bool resizing)
+calc_allocation(uint32_t bufferId,
+ BufferTypeBase &typeHandler,
+ size_t free_entries_needed,
+ bool resizing)
{
- size_t allocArrays = typeHandler.calcArraysToAlloc(bufferId, elementsNeeded, resizing);
- size_t allocElements = allocArrays * typeHandler.getArraySize();
- size_t allocBytes = roundUpToMatchAllocator(allocElements * typeHandler.elementSize());
- size_t maxAllocBytes = typeHandler.getMaxArrays() * typeHandler.getArraySize() * typeHandler.elementSize();
+ size_t alloc_entries = typeHandler.calc_entries_to_alloc(bufferId, free_entries_needed, resizing);
+ size_t entry_size = typeHandler.entry_size();
+ size_t allocBytes = roundUpToMatchAllocator(alloc_entries * entry_size);
+ size_t maxAllocBytes = typeHandler.get_max_entries() * entry_size;
if (allocBytes > maxAllocBytes) {
// Ensure that allocated bytes does not exceed the maximum handled by this type.
allocBytes = maxAllocBytes;
}
- size_t adjustedAllocElements = (allocBytes / typeHandler.elementSize());
- return AllocResult(adjustedAllocElements, allocBytes);
+ size_t adjusted_alloc_entries = allocBytes / entry_size;
+ return AllocResult(adjusted_alloc_entries, allocBytes);
}
}
void
-BufferState::onActive(uint32_t bufferId, uint32_t typeId,
- BufferTypeBase *typeHandler,
- size_t elementsNeeded,
- std::atomic<void*>& buffer)
+BufferState::on_active(uint32_t bufferId, uint32_t typeId,
+ BufferTypeBase *typeHandler,
+ size_t free_entries_needed,
+ std::atomic<void*>& buffer)
{
assert(buffer.load(std::memory_order_relaxed) == nullptr);
assert(_buffer.get() == nullptr);
@@ -88,30 +88,30 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
assert(_typeHandler == nullptr);
assert(capacity() == 0);
assert(size() == 0);
- assert(_stats.dead_elems() == 0u);
- assert(_stats.hold_elems() == 0);
+ assert(_stats.dead_entries() == 0u);
+ assert(_stats.hold_entries() == 0);
assert(_stats.extra_used_bytes() == 0);
assert(_stats.extra_hold_bytes() == 0);
assert(_free_list.empty());
- size_t reservedElements = typeHandler->getReservedElements(bufferId);
- (void) reservedElements;
- AllocResult alloc = calcAllocation(bufferId, *typeHandler, elementsNeeded, false);
- assert(alloc.elements >= reservedElements + elementsNeeded);
+ size_t reserved_entries = typeHandler->get_reserved_entries(bufferId);
+ (void) reserved_entries;
+ AllocResult alloc = calc_allocation(bufferId, *typeHandler, free_entries_needed, false);
+ assert(alloc.entries >= reserved_entries + free_entries_needed);
auto allocator = typeHandler->get_memory_allocator();
_buffer = (allocator != nullptr) ? Alloc::alloc_with_allocator(allocator) : Alloc::alloc(0, MemoryAllocator::HUGEPAGE_SIZE);
_buffer.create(alloc.bytes).swap(_buffer);
- assert(_buffer.get() != nullptr || alloc.elements == 0u);
+ assert(_buffer.get() != nullptr || alloc.entries == 0u);
buffer.store(_buffer.get(), std::memory_order_release);
- _stats.set_alloc_elems(alloc.elements);
+ _stats.set_alloc_entries(alloc.entries);
_typeHandler.store(typeHandler, std::memory_order_release);
assert(typeId <= std::numeric_limits<uint16_t>::max());
_typeId = typeId;
_arraySize = typeHandler->getArraySize();
_free_list.set_array_size(_arraySize);
_state.store(State::ACTIVE, std::memory_order_release);
- typeHandler->onActive(bufferId, &_stats.used_elems_ref(), &_stats.dead_elems_ref(),
- buffer.load(std::memory_order::relaxed));
+ typeHandler->on_active(bufferId, &_stats.used_entries_ref(), &_stats.dead_entries_ref(),
+ buffer.load(std::memory_order::relaxed));
}
void
@@ -121,11 +121,11 @@ BufferState::onHold(uint32_t buffer_id)
assert(getTypeHandler() != nullptr);
_state.store(State::HOLD, std::memory_order_release);
_compacting = false;
- assert(_stats.dead_elems() <= size());
- assert(_stats.hold_elems() <= (size() - _stats.dead_elems()));
- _stats.set_dead_elems(0);
- _stats.set_hold_elems(size());
- getTypeHandler()->onHold(buffer_id, &_stats.used_elems_ref(), &_stats.dead_elems_ref());
+ assert(_stats.dead_entries() <= size());
+ assert(_stats.hold_entries() <= (size() - _stats.dead_entries()));
+ _stats.set_dead_entries(0);
+ _stats.set_hold_entries(size());
+ getTypeHandler()->on_hold(buffer_id, &_stats.used_entries_ref(), &_stats.dead_entries_ref());
_free_list.disable();
}
@@ -135,11 +135,11 @@ BufferState::onFree(std::atomic<void*>& buffer)
assert(buffer.load(std::memory_order_relaxed) == _buffer.get());
assert(getState() == State::HOLD);
assert(_typeHandler != nullptr);
- assert(_stats.dead_elems() <= size());
- assert(_stats.hold_elems() == (size() - _stats.dead_elems()));
- getTypeHandler()->destroyElements(buffer, size());
+ assert(_stats.dead_entries() <= size());
+ assert(_stats.hold_entries() == (size() - _stats.dead_entries()));
+ getTypeHandler()->destroy_entries(buffer, size());
Alloc::alloc().swap(_buffer);
- getTypeHandler()->onFree(size());
+ getTypeHandler()->on_free(size());
buffer.store(nullptr, std::memory_order_release);
_stats.clear();
_state.store(State::FREE, std::memory_order_release);
@@ -148,7 +148,7 @@ BufferState::onFree(std::atomic<void*>& buffer)
_free_list.set_array_size(_arraySize);
assert(!_free_list.enabled());
assert(_free_list.empty());
- _disableElemHoldList = false;
+ _disable_entry_hold_list = false;
}
@@ -171,67 +171,67 @@ BufferState::dropBuffer(uint32_t buffer_id, std::atomic<void*>& buffer)
}
void
-BufferState::disable_elem_hold_list()
+BufferState::disable_entry_hold_list()
{
- _disableElemHoldList = true;
+ _disable_entry_hold_list = true;
}
bool
-BufferState::hold_elems(size_t num_elems, size_t extra_bytes)
+BufferState::hold_entries(size_t num_entries, size_t extra_bytes)
{
assert(isActive());
- if (_disableElemHoldList) {
+ if (_disable_entry_hold_list) {
// The elements are directly marked as dead as they are not put on hold.
- _stats.inc_dead_elems(num_elems);
+ _stats.inc_dead_entries(num_entries);
return true;
}
- _stats.inc_hold_elems(num_elems);
+ _stats.inc_hold_entries(num_entries);
_stats.inc_extra_hold_bytes(extra_bytes);
return false;
}
void
-BufferState::free_elems(EntryRef ref, size_t num_elems, size_t ref_offset)
+BufferState::free_entries(EntryRef ref, size_t num_entries, size_t ref_offset)
{
if (isActive()) {
- if (_free_list.enabled() && (num_elems == getArraySize())) {
+ if (_free_list.enabled() && (num_entries == 1)) {
_free_list.push_entry(ref);
}
} else {
assert(isOnHold());
}
- _stats.inc_dead_elems(num_elems);
- _stats.dec_hold_elems(num_elems);
- getTypeHandler()->cleanHold(_buffer.get(), (ref_offset * _arraySize), num_elems,
- BufferTypeBase::CleanContext(_stats.extra_used_bytes_ref(),
- _stats.extra_hold_bytes_ref()));
+ _stats.inc_dead_entries(num_entries);
+ _stats.dec_hold_entries(num_entries);
+ getTypeHandler()->clean_hold(_buffer.get(), ref_offset, num_entries,
+ BufferTypeBase::CleanContext(_stats.extra_used_bytes_ref(),
+ _stats.extra_hold_bytes_ref()));
}
void
-BufferState::fallbackResize(uint32_t bufferId,
- size_t elementsNeeded,
+BufferState::fallback_resize(uint32_t bufferId,
+ size_t free_entries_needed,
std::atomic<void*>& buffer,
Alloc &holdBuffer)
{
assert(getState() == State::ACTIVE);
assert(_typeHandler != nullptr);
assert(holdBuffer.get() == nullptr);
- AllocResult alloc = calcAllocation(bufferId, *_typeHandler, elementsNeeded, true);
- assert(alloc.elements >= size() + elementsNeeded);
- assert(alloc.elements > capacity());
+ AllocResult alloc = calc_allocation(bufferId, *_typeHandler, free_entries_needed, true);
+ assert(alloc.entries >= size() + free_entries_needed);
+ assert(alloc.entries > capacity());
Alloc newBuffer = _buffer.create(alloc.bytes);
- getTypeHandler()->fallbackCopy(newBuffer.get(), buffer.load(std::memory_order_relaxed), size());
+ getTypeHandler()->fallback_copy(newBuffer.get(), buffer.load(std::memory_order_relaxed), size());
holdBuffer.swap(_buffer);
std::atomic_thread_fence(std::memory_order_release);
_buffer = std::move(newBuffer);
buffer.store(_buffer.get(), std::memory_order_release);
- _stats.set_alloc_elems(alloc.elements);
+ _stats.set_alloc_entries(alloc.entries);
}
void
BufferState::resume_primary_buffer(uint32_t buffer_id)
{
- getTypeHandler()->resume_primary_buffer(buffer_id, &_stats.used_elems_ref(), &_stats.dead_elems_ref());
+ getTypeHandler()->resume_primary_buffer(buffer_id, &_stats.used_entries_ref(), &_stats.dead_entries_ref());
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index 5b98099ed69..299219c168b 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -23,8 +23,8 @@ namespace vespalib::datastore {
* It is kept in this state until all reader threads are no longer accessing the buffer.
* Finally, it transitions back to FREE via onFree() and memory is de-allocated.
*
- * This class also supports use of free lists, where previously allocated elements in the buffer can be re-used.
- * First the element is put on hold, then on the free list (counted as dead) to be re-used.
+ * This class also supports use of free lists, where previously allocated entries in the buffer can be re-used.
+ * First the entry is put on hold, then on the free list (counted as dead) to be re-used.
*/
class BufferState
{
@@ -45,7 +45,7 @@ private:
uint32_t _arraySize;
uint16_t _typeId;
std::atomic<State> _state;
- bool _disableElemHoldList : 1;
+ bool _disable_entry_hold_list : 1;
bool _compacting : 1;
public:
@@ -62,14 +62,14 @@ public:
/**
* Transition from FREE to ACTIVE state.
*
- * @param bufferId Id of buffer to be active.
- * @param typeId Registered data type id for buffer.
- * @param typeHandler Type handler for registered data type.
- * @param elementsNeeded Number of elements needed to be free in the memory allocated.
- * @param buffer Start of allocated buffer return value.
+ * @param bufferId Id of buffer to be active.
+ * @param typeId Registered data type id for buffer.
+ * @param typeHandler Type handler for registered data type.
+ * @param free_entries_needed Number of elements needed to be free in the memory allocated.
+ * @param buffer Start of allocated buffer return value.
*/
- void onActive(uint32_t bufferId, uint32_t typeId, BufferTypeBase *typeHandler,
- size_t elementsNeeded, std::atomic<void*>& buffer);
+ void on_active(uint32_t bufferId, uint32_t typeId, BufferTypeBase *typeHandler,
+ size_t free_entries_needed, std::atomic<void*>& buffer);
/**
* Transition from ACTIVE to HOLD state.
@@ -82,24 +82,24 @@ public:
void onFree(std::atomic<void*>& buffer);
/**
- * Disable hold of elements, just mark elements as dead without cleanup.
+ * Disable hold of entries, just mark entries as dead without cleanup.
* Typically used when tearing down data structure in a controlled manner.
*/
- void disable_elem_hold_list();
+ void disable_entry_hold_list();
/**
* Update stats to reflect that the given elements are put on hold.
- * Returns true if element hold list is disabled for this buffer.
+ * Returns true if entry hold list is disabled for this buffer.
*/
- bool hold_elems(size_t num_elems, size_t extra_bytes);
+ bool hold_entries(size_t num_entries, size_t extra_bytes);
/**
- * Free the given elements and update stats accordingly.
+ * Free the given entries and update stats accordingly.
*
* The given entry ref is put on the free list (if enabled).
- * Hold cleaning of elements is executed on the buffer type.
+ * Hold cleaning of entries is executed on the buffer type.
*/
- void free_elems(EntryRef ref, size_t num_elems, size_t ref_offset);
+ void free_entries(EntryRef ref, size_t num_entries, size_t ref_offset);
BufferStats& stats() { return _stats; }
const BufferStats& stats() const { return _stats; }
@@ -115,8 +115,7 @@ public:
uint32_t getArraySize() const { return _arraySize; }
bool getCompacting() const { return _compacting; }
void setCompacting() { _compacting = true; }
- uint32_t get_used_arrays() const noexcept { return size() / _arraySize; }
- void fallbackResize(uint32_t bufferId, size_t elementsNeeded, std::atomic<void*>& buffer, Alloc &holdBuffer);
+ void fallback_resize(uint32_t bufferId, size_t free_entries_needed, std::atomic<void*>& buffer, Alloc &holdBuffer);
bool isActive(uint32_t typeId) const {
return (isActive() && (_typeId == typeId));
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.h b/vespalib/src/vespa/vespalib/datastore/datastore.h
index 4fcc154f944..f81348ce287 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.h
@@ -27,7 +27,7 @@ template <typename RefT = EntryRefT<22> >
class DataStoreT : public DataStoreBase
{
private:
- void free_elem_internal(EntryRef ref, size_t numElems);
+ void free_entry_internal(EntryRef ref, size_t num_entries);
public:
using RefType = RefT;
@@ -38,12 +38,12 @@ public:
~DataStoreT() override;
/**
- * Hold element(s).
+ * Hold entries.
*/
- void holdElem(EntryRef ref, size_t numElems) {
- holdElem(ref, numElems, 0);
- }
- void holdElem(EntryRef ref, size_t numElems, size_t extraBytes);
+ void hold_entry(EntryRef ref) { hold_entries(ref, 1, 0); }
+ void hold_entry(EntryRef ref, size_t extra_bytes) { hold_entries(ref, 1, extra_bytes); }
+ void hold_entries(EntryRef ref, size_t num_entries) { hold_entries(ref, num_entries, 0); }
+ void hold_entries(EntryRef ref, size_t num_entries, size_t extraBytes);
void reclaim_entry_refs(generation_t oldest_used_gen) override;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
index bfb63954875..b21a5954eee 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
@@ -22,21 +22,21 @@ DataStoreT<RefT>::~DataStoreT() = default;
template <typename RefT>
void
-DataStoreT<RefT>::free_elem_internal(EntryRef ref, size_t numElems)
+DataStoreT<RefT>::free_entry_internal(EntryRef ref, size_t num_entries)
{
RefType intRef(ref);
BufferState &state = getBufferState(intRef.bufferId());
- state.free_elems(ref, numElems, intRef.offset());
+ state.free_entries(ref, num_entries, intRef.offset());
}
template <typename RefT>
void
-DataStoreT<RefT>::holdElem(EntryRef ref, size_t numElems, size_t extraBytes)
+DataStoreT<RefT>::hold_entries(EntryRef ref, size_t num_entries, size_t extraBytes)
{
RefType intRef(ref);
BufferState &state = getBufferState(intRef.bufferId());
- if (!state.hold_elems(numElems, extraBytes)) {
- _entry_ref_hold_list.insert({ref, numElems});
+ if (!state.hold_entries(num_entries, extraBytes)) {
+ _entry_ref_hold_list.insert({ref, num_entries});
}
}
@@ -45,7 +45,7 @@ void
DataStoreT<RefT>::reclaim_entry_refs(generation_t oldest_used_gen)
{
_entry_ref_hold_list.reclaim(oldest_used_gen, [this](const auto& elem) {
- free_elem_internal(elem.ref, elem.num_elems);
+ free_entry_internal(elem.ref, elem.num_entries);
});
}
@@ -54,7 +54,7 @@ void
DataStoreT<RefT>::reclaim_all_entry_refs()
{
_entry_ref_hold_list.reclaim_all([this](const auto& elem) {
- free_elem_internal(elem.ref, elem.num_elems);
+ free_entry_internal(elem.ref, elem.num_entries);
});
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index efb7645412e..75ffe855a32 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -40,18 +40,18 @@ constexpr size_t TOO_DEAD_SLACK = 0x4000u;
bool
primary_buffer_too_dead(const BufferState &state)
{
- size_t deadElems = state.stats().dead_elems();
- size_t deadBytes = deadElems * state.getArraySize();
- return ((deadBytes >= TOO_DEAD_SLACK) && (deadElems * 2 >= state.size()));
+ size_t dead_entries = state.stats().dead_entries();
+ size_t deadBytes = dead_entries * state.getTypeHandler()->entry_size();
+ return ((deadBytes >= TOO_DEAD_SLACK) && (dead_entries * 2 >= state.size()));
}
}
-DataStoreBase::FallbackHold::FallbackHold(size_t bytesSize, BufferState::Alloc &&buffer, size_t usedElems,
+DataStoreBase::FallbackHold::FallbackHold(size_t bytesSize, BufferState::Alloc &&buffer, size_t used_entries,
BufferTypeBase *typeHandler, uint32_t typeId)
: GenerationHeldBase(bytesSize),
_buffer(std::move(buffer)),
- _usedElems(usedElems),
+ _used_entries(used_entries),
_typeHandler(typeHandler),
_typeId(typeId)
{
@@ -59,7 +59,7 @@ DataStoreBase::FallbackHold::FallbackHold(size_t bytesSize, BufferState::Alloc &
DataStoreBase::FallbackHold::~FallbackHold()
{
- _typeHandler->destroyElements(_buffer.get(), _usedElems);
+ _typeHandler->destroy_entries(_buffer.get(), _used_entries);
}
class DataStoreBase::BufferHold : public GenerationHeldBase {
@@ -94,7 +94,7 @@ DataStoreBase::DataStoreBase(uint32_t numBuffers, uint32_t offset_bits, size_t m
_hold_buffer_count(0u),
_offset_bits(offset_bits),
_freeListsEnabled(false),
- _disableElemHoldList(false),
+ _disable_entry_hold_list(false),
_initializing(false)
{
}
@@ -146,8 +146,7 @@ DataStoreBase::consider_grow_active_buffer(uint32_t type_id, size_t entries_need
if (checked_active_buffers < min_active_buffers) {
return false;
}
- auto array_size = type_handler->getArraySize();
- if (entries_needed * array_size + min_used > type_handler->getMaxArrays() * array_size) {
+ if (entries_needed + min_used > type_handler->get_max_entries()) {
return false;
}
if (min_buffer_id != buffer_id) {
@@ -184,14 +183,12 @@ void
DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t entries_needed)
{
auto typeHandler = _typeHandlers[typeId];
- uint32_t arraySize = typeHandler->getArraySize();
size_t num_entries_for_new_buffer = typeHandler->get_scaled_num_entries_for_new_buffer();
- size_t numElemsForNewBuffer = num_entries_for_new_buffer * arraySize;
uint32_t bufferId = primary_buffer_id(typeId);
- if (entries_needed * arraySize + getBufferState(bufferId).size() >= numElemsForNewBuffer) {
+ if (entries_needed + getBufferState(bufferId).size() >= num_entries_for_new_buffer) {
if (consider_grow_active_buffer(typeId, entries_needed)) {
bufferId = primary_buffer_id(typeId);
- if (entries_needed * arraySize > getBufferState(bufferId).remaining()) {
+ if (entries_needed > getBufferState(bufferId).remaining()) {
fallback_resize(bufferId, entries_needed);
}
} else {
@@ -219,7 +216,7 @@ DataStoreBase::addType(BufferTypeBase *typeHandler)
{
uint32_t typeId = _primary_buffer_ids.size();
assert(typeId == _typeHandlers.size());
- typeHandler->clampMaxArrays(_max_entries);
+ typeHandler->clamp_max_entries(_max_entries);
_primary_buffer_ids.push_back(0);
_typeHandlers.push_back(typeHandler);
_free_lists.emplace_back();
@@ -328,12 +325,12 @@ DataStoreBase::disableFreeLists()
}
void
-DataStoreBase::disableElemHoldList()
+DataStoreBase::disable_entry_hold_list()
{
for_each_buffer([](BufferState & state) {
- if (!state.isFree()) state.disable_elem_hold_list();
+ if (!state.isFree()) state.disable_entry_hold_list();
});
- _disableElemHoldList = true;
+ _disable_entry_hold_list = true;
}
MemoryStats
@@ -351,13 +348,13 @@ DataStoreBase::getMemStats() const
if ((state == BufferState::State::FREE) || (typeHandler == nullptr)) {
++stats._freeBuffers;
} else if (state == BufferState::State::ACTIVE) {
- size_t elementSize = typeHandler->elementSize();
+ size_t entry_size = typeHandler->entry_size();
++stats._activeBuffers;
- bState->stats().add_to_mem_stats(elementSize, stats);
+ bState->stats().add_to_mem_stats(entry_size, stats);
} else if (state == BufferState::State::HOLD) {
- size_t elementSize = typeHandler->elementSize();
+ size_t entry_size = typeHandler->entry_size();
++stats._holdBuffers;
- bState->stats().add_to_mem_stats(elementSize, stats);
+ bState->stats().add_to_mem_stats(entry_size, stats);
} else {
LOG_ABORT("should not be reached");
}
@@ -373,28 +370,26 @@ vespalib::AddressSpace
DataStoreBase::getAddressSpaceUsage() const
{
uint32_t buffer_id_limit = get_bufferid_limit_acquire();
- size_t usedArrays = 0;
- size_t deadArrays = 0;
- size_t limitArrays = size_t(_max_entries) * (getMaxNumBuffers() - buffer_id_limit);
+ size_t used_entries = 0;
+ size_t dead_entries = 0;
+ size_t limit_entries = size_t(_max_entries) * (getMaxNumBuffers() - buffer_id_limit);
for (uint32_t bufferId = 0; bufferId < buffer_id_limit; ++bufferId) {
const BufferState * bState = _buffers[bufferId].get_state_acquire();
assert(bState != nullptr);
if (bState->isFree()) {
- limitArrays += _max_entries;
+ limit_entries += _max_entries;
} else if (bState->isActive()) {
- uint32_t arraySize = bState->getArraySize();
- usedArrays += bState->size() / arraySize;
- deadArrays += bState->stats().dead_elems() / arraySize;
- limitArrays += bState->capacity() / arraySize;
+ used_entries += bState->size();
+ dead_entries += bState->stats().dead_entries();
+ limit_entries += bState->capacity();
} else if (bState->isOnHold()) {
- uint32_t arraySize = bState->getArraySize();
- usedArrays += bState->size() / arraySize;
- limitArrays += bState->capacity() / arraySize;
+ used_entries += bState->size();
+ limit_entries += bState->capacity();
} else {
LOG_ABORT("should not be reached");
}
}
- return {usedArrays, deadArrays, limitArrays};
+ return {used_entries, dead_entries, limit_entries};
}
void
@@ -407,8 +402,8 @@ DataStoreBase::on_active(uint32_t bufferId, uint32_t typeId, size_t entries_need
BufferState *state = bufferMeta.get_state_relaxed();
if (state == nullptr) {
BufferState & newState = _stash.create<BufferState>();
- if (_disableElemHoldList) {
- newState.disable_elem_hold_list();
+ if (_disable_entry_hold_list) {
+ newState.disable_entry_hold_list();
}
if ( ! _freeListsEnabled) {
newState.disable_free_list();
@@ -418,9 +413,7 @@ DataStoreBase::on_active(uint32_t bufferId, uint32_t typeId, size_t entries_need
_bufferIdLimit.store(bufferId + 1, std::memory_order_release);
}
assert(state->isFree());
- auto type_handler = _typeHandlers[typeId];
- size_t array_size = type_handler->getArraySize();
- state->onActive(bufferId, typeId, type_handler, entries_needed * array_size, bufferMeta.get_atomic_buffer());
+ state->on_active(bufferId, typeId, _typeHandlers[typeId], entries_needed, bufferMeta.get_atomic_buffer());
bufferMeta.setTypeId(typeId);
bufferMeta.setArraySize(state->getArraySize());
if (_freeListsEnabled && state->isActive() && !state->getCompacting()) {
@@ -442,14 +435,13 @@ DataStoreBase::fallback_resize(uint32_t bufferId, size_t entries_needed)
{
BufferState &state = getBufferState(bufferId);
BufferState::Alloc toHoldBuffer;
- size_t oldUsedElems = state.size();
- size_t oldAllocElems = state.capacity();
- size_t elementSize = state.getTypeHandler()->elementSize();
- size_t array_size = state.getTypeHandler()->getArraySize();
- state.fallbackResize(bufferId, entries_needed * array_size, _buffers[bufferId].get_atomic_buffer(), toHoldBuffer);
- auto hold = std::make_unique<FallbackHold>(oldAllocElems * elementSize,
+ size_t old_used_entries = state.size();
+ size_t old_alloc_entries = state.capacity();
+ size_t entry_size = state.getTypeHandler()->entry_size();
+ state.fallback_resize(bufferId, entries_needed, _buffers[bufferId].get_atomic_buffer(), toHoldBuffer);
+ auto hold = std::make_unique<FallbackHold>(old_alloc_entries * entry_size,
std::move(toHoldBuffer),
- oldUsedElems,
+ old_used_entries,
state.getTypeHandler(),
state.getTypeId());
if (!_initializing) {
@@ -468,7 +460,7 @@ DataStoreBase::markCompacting(uint32_t bufferId)
}
assert(!state.getCompacting());
state.setCompacting();
- state.disable_elem_hold_list();
+ state.disable_entry_hold_list();
state.disable_free_list();
inc_compaction_count();
}
@@ -495,15 +487,15 @@ DataStoreBase::start_compact_worst_buffers(CompactionSpec compaction_spec, const
free_buffers++;
} else if (state->isActive()) {
auto typeHandler = state->getTypeHandler();
- uint32_t arraySize = typeHandler->getArraySize();
- uint32_t reservedElements = typeHandler->getReservedElements(bufferId);
- size_t used_elems = state->size();
- size_t deadElems = state->stats().dead_elems() - reservedElements;
+ uint32_t reserved_entries = typeHandler->get_reserved_entries(bufferId);
+ size_t used_entries = state->size();
+ size_t dead_entries = state->stats().dead_entries() - reserved_entries;
+ size_t entry_size = typeHandler->entry_size();
if (compaction_spec.compact_memory()) {
- elem_buffers.add(bufferId, used_elems, deadElems);
+ elem_buffers.add(bufferId, used_entries * entry_size, dead_entries * entry_size);
}
if (compaction_spec.compact_address_space()) {
- array_buffers.add(bufferId, used_elems / arraySize, deadElems / arraySize);
+ array_buffers.add(bufferId, used_entries, dead_entries);
}
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 8c5d5b1aee4..e5a38e3fd41 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -43,7 +43,7 @@ public:
*/
void ensure_buffer_capacity(uint32_t typeId, size_t entries_needed) {
auto &state = getBufferState(primary_buffer_id(typeId));
- if (entries_needed * state.getArraySize() > state.remaining()) [[unlikely]] {
+ if (entries_needed > state.remaining()) [[unlikely]] {
switch_or_grow_primary_buffer(typeId, entries_needed);
}
}
@@ -128,7 +128,7 @@ public:
/**
* Enable free list management.
- * This only works for fixed size elements.
+ * This only works for fixed size entries.
*/
void enableFreeLists();
@@ -136,7 +136,7 @@ public:
* Disable free list management.
*/
void disableFreeLists();
- void disableElemHoldList();
+ void disable_entry_hold_list();
bool has_free_lists_enabled() const { return _freeListsEnabled; }
@@ -178,7 +178,7 @@ public:
bool has_held_buffers() const noexcept { return _hold_buffer_count != 0u; }
/**
- * Trim elem hold list, freeing elements that no longer needs to be held.
+ * Trim entry hold list, freeing entries that no longer needs to be held.
*
* @param oldest_used_gen the oldest generation that is still used.
*/
@@ -192,11 +192,11 @@ protected:
struct EntryRefHoldElem {
EntryRef ref;
- size_t num_elems;
+ size_t num_entries;
- EntryRefHoldElem(EntryRef ref_in, size_t num_elems_in)
+ EntryRefHoldElem(EntryRef ref_in, size_t num_entries_in)
: ref(ref_in),
- num_elems(num_elems_in)
+ num_entries(num_entries_in)
{}
};
@@ -216,11 +216,11 @@ private:
{
public:
BufferState::Alloc _buffer;
- size_t _usedElems;
+ size_t _used_entries;
BufferTypeBase *_typeHandler;
uint32_t _typeId;
- FallbackHold(size_t bytesSize, BufferState::Alloc &&buffer, size_t usedElems,
+ FallbackHold(size_t bytesSize, BufferState::Alloc &&buffer, size_t used_entries,
BufferTypeBase *typeHandler, uint32_t typeId);
~FallbackHold() override;
@@ -275,7 +275,7 @@ private:
uint32_t _hold_buffer_count;
const uint8_t _offset_bits;
bool _freeListsEnabled;
- bool _disableElemHoldList;
+ bool _disable_entry_hold_list;
bool _initializing;
};
diff --git a/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.h b/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.h
index 6fd5d694fb1..e2718b94cd2 100644
--- a/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.h
+++ b/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.h
@@ -31,7 +31,7 @@ public:
{
}
~LargeArrayBufferType() override;
- void cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
+ void clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) override;
const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
};
diff --git a/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.hpp b/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.hpp
index 0165efd8f32..72a2662991b 100644
--- a/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/large_array_buffer_type.hpp
@@ -19,11 +19,11 @@ LargeArrayBufferType<ElemT>::~LargeArrayBufferType() = default;
template <typename ElemT>
void
-LargeArrayBufferType<ElemT>::cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx)
+LargeArrayBufferType<ElemT>::clean_hold(void* buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx)
{
ArrayType* elem = static_cast<ArrayType*>(buffer) + offset;
const auto& empty = empty_entry();
- for (size_t i = 0; i < numElems; ++i) {
+ for (size_t i = 0; i < num_entries; ++i) {
cleanCtx.extraBytesCleaned(sizeof(ElemT) * elem->size());
*elem = empty;
++elem;
diff --git a/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp b/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
index 8e060b4cfb4..5cb04796c5b 100644
--- a/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/memory_stats.cpp
@@ -5,10 +5,10 @@
namespace vespalib::datastore {
MemoryStats::MemoryStats()
- : _allocElems(0),
- _usedElems(0),
- _deadElems(0),
- _holdElems(0),
+ : _alloc_entries(0),
+ _used_entries(0),
+ _dead_entries(0),
+ _hold_entries(0),
_allocBytes(0),
_usedBytes(0),
_deadBytes(0),
@@ -22,10 +22,10 @@ MemoryStats::MemoryStats()
MemoryStats&
MemoryStats::operator+=(const MemoryStats& rhs)
{
- _allocElems += rhs._allocElems;
- _usedElems += rhs._usedElems;
- _deadElems += rhs._deadElems;
- _holdElems += rhs._holdElems;
+ _alloc_entries += rhs._alloc_entries;
+ _used_entries += rhs._used_entries;
+ _dead_entries += rhs._dead_entries;
+ _hold_entries += rhs._hold_entries;
_allocBytes += rhs._allocBytes;
_usedBytes += rhs._usedBytes;
_deadBytes += rhs._deadBytes;
diff --git a/vespalib/src/vespa/vespalib/datastore/memory_stats.h b/vespalib/src/vespa/vespalib/datastore/memory_stats.h
index 18d7dd77559..72a570dd625 100644
--- a/vespalib/src/vespa/vespalib/datastore/memory_stats.h
+++ b/vespalib/src/vespa/vespalib/datastore/memory_stats.h
@@ -13,10 +13,10 @@ namespace vespalib::datastore {
class MemoryStats
{
public:
- size_t _allocElems;
- size_t _usedElems;
- size_t _deadElems;
- size_t _holdElems;
+ size_t _alloc_entries;
+ size_t _used_entries;
+ size_t _dead_entries;
+ size_t _hold_entries;
size_t _allocBytes;
size_t _usedBytes;
size_t _deadBytes;
diff --git a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
index a086423747a..9de361a8b19 100644
--- a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
@@ -22,12 +22,9 @@ RawAllocator<EntryT, RefT>::alloc(size_t num_entries, size_t extra_entries)
uint32_t buffer_id = _store.primary_buffer_id(_typeId);
BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
- size_t oldBufferSize = state.size();
- // Must perform scaling ourselves, according to array size
- size_t arraySize = state.getArraySize();
- RefT ref((oldBufferSize / arraySize), buffer_id);
- EntryT *buffer = _store.getEntryArray<EntryT>(ref, arraySize);
- state.stats().pushed_back(num_entries * arraySize);
+ RefT ref(state.size(), buffer_id);
+ EntryT *buffer = _store.getEntryArray<EntryT>(ref, state.getArraySize());
+ state.stats().pushed_back(num_entries);
return HandleType(ref, buffer);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index 52b0798543f..0efaf04b26e 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -106,7 +106,7 @@ private:
_mapping.resize(data_store.get_bufferid_limit_relaxed());
for (const auto bufferId : _compacting_buffers->get_buffer_ids()) {
BufferState &state = data_store.getBufferState(bufferId);
- _mapping[bufferId].resize(state.get_used_arrays());
+ _mapping[bufferId].resize(state.size());
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
index 8ad11b18218..49d2631e73f 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
@@ -43,7 +43,7 @@ template <typename EntryT, typename RefT>
void
UniqueStoreAllocator<EntryT, RefT>::hold(EntryRef ref)
{
- _store.holdElem(ref, 1);
+ _store.hold_entry(ref);
}
template <typename EntryT, typename RefT>
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
index 32513d09c72..4a517521d77 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
@@ -44,7 +44,7 @@ UniqueStoreEnumerator<RefT>::allocate_enum_values(DataStoreBase & store)
{
_enumValues.resize(store.get_bufferid_limit_relaxed());
store.for_each_active_buffer([this](uint32_t buffer_id, const BufferState & state) {
- _enumValues[buffer_id].resize(state.get_used_arrays());
+ _enumValues[buffer_id].resize(state.size());
});
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp
index 1d3ba27d6bf..9f2105b7f08 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp
@@ -42,28 +42,28 @@ UniqueStoreSmallStringBufferType::UniqueStoreSmallStringBufferType(uint32_t arra
UniqueStoreSmallStringBufferType::~UniqueStoreSmallStringBufferType() = default;
void
-UniqueStoreSmallStringBufferType::destroyElements(void *, ElemCount)
+UniqueStoreSmallStringBufferType::destroy_entries(void *, EntryCount)
{
static_assert(std::is_trivially_destructible<UniqueStoreSmallStringEntry>::value,
"UniqueStoreSmallStringEntry must be trivially destructable");
}
void
-UniqueStoreSmallStringBufferType::fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems)
+UniqueStoreSmallStringBufferType::fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount num_entries)
{
static_assert(std::is_trivially_copyable<UniqueStoreSmallStringEntry>::value,
"UniqueStoreSmallStringEntry must be trivially copyable");
- if (numElems > 0) {
- memcpy(newBuffer, oldBuffer, numElems);
+ if (num_entries > 0) {
+ memcpy(newBuffer, oldBuffer, num_entries * getArraySize());
}
}
void
-UniqueStoreSmallStringBufferType::cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext)
+UniqueStoreSmallStringBufferType::clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext)
{
- void *e = static_cast<char *>(buffer) + offset;
- void *e_end = static_cast<char *>(e) + numElems;
size_t array_size = getArraySize();
+ void *e = static_cast<char *>(buffer) + offset * array_size;
+ void *e_end = static_cast<char *>(e) + num_entries * array_size;
while (e < e_end) {
static_cast<UniqueStoreSmallStringEntry *>(e)->clean_hold(array_size);
e = static_cast<char *>(e) + array_size;
@@ -86,10 +86,10 @@ UniqueStoreExternalStringBufferType::UniqueStoreExternalStringBufferType(uint32_
UniqueStoreExternalStringBufferType::~UniqueStoreExternalStringBufferType() = default;
void
-UniqueStoreExternalStringBufferType::cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx)
+UniqueStoreExternalStringBufferType::clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx)
{
UniqueStoreEntry<std::string> *elem = static_cast<UniqueStoreEntry<std::string> *>(buffer) + offset;
- for (size_t i = 0; i < numElems; ++i) {
+ for (size_t i = 0; i < num_entries; ++i) {
cleanCtx.extraBytesCleaned(elem->value().size() + 1);
std::string().swap(elem->value());
++elem;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
index a85b73f423d..d3348950891 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
@@ -62,9 +62,9 @@ class UniqueStoreSmallStringBufferType : public BufferType<char> {
public:
UniqueStoreSmallStringBufferType(uint32_t array_size, uint32_t max_arrays, std::shared_ptr<vespalib::alloc::MemoryAllocator> memory_allocator);
~UniqueStoreSmallStringBufferType() override;
- void destroyElements(void *, ElemCount) override;
- void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) override;
- void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext) override;
+ void destroy_entries(void *, EntryCount) override;
+ void fallback_copy(void *newBuffer, const void *oldBuffer, EntryCount numElems) override;
+ void clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext) override;
const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
};
@@ -76,7 +76,7 @@ class UniqueStoreExternalStringBufferType : public BufferType<UniqueStoreEntry<s
public:
UniqueStoreExternalStringBufferType(uint32_t array_size, uint32_t max_arrays, std::shared_ptr<vespalib::alloc::MemoryAllocator> memory_allocator);
~UniqueStoreExternalStringBufferType() override;
- void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
+ void clean_hold(void *buffer, size_t offset, EntryCount num_entries, CleanContext cleanCtx) override;
const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
};
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
index c7cf8208615..4ff8e1e1ab4 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
@@ -61,11 +61,10 @@ UniqueStoreStringAllocator<RefT>::hold(EntryRef ref)
RefT iRef(ref);
uint32_t type_id = _store.getTypeId(iRef.bufferId());
if (type_id != 0) {
- size_t array_size = string_allocator::array_sizes[type_id - 1];
- _store.holdElem(ref, array_size);
+ _store.hold_entry(ref);
} else {
auto &value = _store.template getEntry<WrappedExternalEntryType>(iRef)->value();
- _store.holdElem(ref, 1, value.size() + 1);
+ _store.hold_entry(ref, value.size() + 1);
}
}