aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--searchlib/src/tests/attribute/postinglist/postinglist.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/word_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/serialized_tensor_store.cpp2
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp32
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/allocator.hpp18
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp86
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h44
-rw-r--r--vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp2
20 files changed, 112 insertions, 108 deletions
diff --git a/searchlib/src/tests/attribute/postinglist/postinglist.cpp b/searchlib/src/tests/attribute/postinglist/postinglist.cpp
index 751fa72c349..d5bcad56495 100644
--- a/searchlib/src/tests/attribute/postinglist/postinglist.cpp
+++ b/searchlib/src/tests/attribute/postinglist/postinglist.cpp
@@ -597,7 +597,7 @@ AttributePostingListTest::doCompactEnumStore(Tree &tree,
// Freelists already disabled due to variable sized data
}
}
- valueHandle.switchActiveBuffer(0, 0u);
+ valueHandle.switch_primary_buffer(0, 0u);
for (; i.valid(); ++i)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
index cbe2233dedc..0473a7b2915 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
@@ -70,7 +70,7 @@ PostingStore<DataT>::PostingStore(EnumPostingTree &dict, Status &status,
{
// TODO: Add type for bitvector
_store.addType(&_bvType);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
_store.enableFreeLists();
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp
index ef5c2ea6773..1cda712dd86 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp
@@ -98,7 +98,7 @@ CompactWordsStore::Store::Store()
_typeId(0)
{
_store.addType(&_type);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
}
CompactWordsStore::Store::~Store()
diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
index 73c26ef3fbd..1a24615fdf0 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
@@ -88,7 +88,7 @@ FeatureStore::FeatureStore(const Schema &schema)
_fieldsParams[it.getIndex()].setSchemaParams(_schema, it.getIndex());
}
_store.addType(&_type);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
}
FeatureStore::~FeatureStore()
diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
index 1e644ce2533..e22ce88ebbf 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
@@ -16,7 +16,7 @@ WordStore::WordStore()
_typeId(0)
{
_store.addType(&_type);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
}
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
index 5c16199dffd..28c82cb7a97 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
@@ -26,7 +26,7 @@ PredicateIntervalStore::PredicateIntervalStore()
// This order determines type ids.
_store.addType(&_size1Type);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
}
PredicateIntervalStore::~PredicateIntervalStore() {
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
index 36a803aa806..e99ba196224 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
@@ -77,7 +77,7 @@ DenseTensorStore::DenseTensorStore(const ValueType &type, std::unique_ptr<vespal
{
_emptySpace.resize(getBufSize(), 0);
_store.addType(&_bufferType);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
_store.enableFreeLists();
}
diff --git a/searchlib/src/vespa/searchlib/tensor/serialized_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/serialized_tensor_store.cpp
index 7045c82935c..f55b51875b2 100644
--- a/searchlib/src/vespa/searchlib/tensor/serialized_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/serialized_tensor_store.cpp
@@ -23,7 +23,7 @@ SerializedTensorStore::SerializedTensorStore()
RefType::offsetSize() / RefType::align(1))
{
_store.addType(&_bufferType);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
}
SerializedTensorStore::~SerializedTensorStore()
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 2024a440627..f3f43fb575b 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -17,7 +17,7 @@ using vespalib::alloc::MemoryAllocator;
class MyStore : public DataStore<int, EntryRefT<3, 2> > {
private:
using ParentType = DataStore<int, EntryRefT<3, 2> >;
- using ParentType::_activeBufferIds;
+ using ParentType::_primary_buffer_ids;
public:
MyStore() {}
explicit MyStore(std::unique_ptr<BufferType<int>> type)
@@ -44,12 +44,12 @@ public:
void enableFreeLists() {
ParentType::enableFreeLists();
}
- void switchActiveBuffer() {
- ParentType::switchActiveBuffer(0, 0u);
+ void switch_primary_buffer() {
+ ParentType::switch_primary_buffer(0, 0u);
}
- size_t activeBufferId() const { return _activeBufferIds[0]; }
+ size_t primary_buffer_id() const { return _primary_buffer_ids[0]; }
BufferState& get_active_buffer_state() {
- return ParentType::getBufferState(activeBufferId());
+ return ParentType::getBufferState(primary_buffer_id());
}
};
@@ -76,7 +76,7 @@ public:
{
(void) _store.addType(&_firstType);
_typeId = _store.addType(&_type);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
}
~GrowStore() { _store.dropBuffers(); }
@@ -244,20 +244,20 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
{
MyStore s;
EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId());
- s.switchActiveBuffer();
- EXPECT_EQ(1u, s.activeBufferId());
+ s.switch_primary_buffer();
+ EXPECT_EQ(1u, s.primary_buffer_id());
s.holdBuffer(0); // hold last buffer
s.transferHoldLists(10);
EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
- s.switchActiveBuffer();
- EXPECT_EQ(2u, s.activeBufferId());
+ s.switch_primary_buffer();
+ EXPECT_EQ(2u, s.primary_buffer_id());
s.holdBuffer(1); // hold last buffer
s.transferHoldLists(20);
EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
- s.switchActiveBuffer();
- EXPECT_EQ(3u, s.activeBufferId());
+ s.switch_primary_buffer();
+ EXPECT_EQ(3u, s.primary_buffer_id());
s.holdBuffer(2); // hold last buffer
s.transferHoldLists(30);
@@ -275,8 +275,8 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
EXPECT_TRUE(s.getBufferState(2).size() != 0);
EXPECT_TRUE(s.getBufferState(3).size() != 0);
- s.switchActiveBuffer();
- EXPECT_EQ(0u, s.activeBufferId());
+ s.switch_primary_buffer();
+ EXPECT_EQ(0u, s.primary_buffer_id());
EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
s.trimHoldLists(41);
EXPECT_TRUE(s.getBufferState(0).size() != 0);
@@ -429,7 +429,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
assertMemStats(m, s.getMemStats());
// new active buffer
- s.switchActiveBuffer();
+ s.switch_primary_buffer();
s.addEntry(40);
m._allocElems += MyRef::offsetSize();
m._usedElems++;
@@ -623,7 +623,7 @@ TEST(DataStoreTest, can_set_memory_allocator)
auto ref2 = s.addEntry(43);
EXPECT_EQ(0u, MyRef(ref2).bufferId());
EXPECT_EQ(AllocStats(2, 0), stats);
- s.switchActiveBuffer();
+ s.switch_primary_buffer();
EXPECT_EQ(AllocStats(3, 0), stats);
s.holdBuffer(0);
s.transferHoldLists(10);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
index b4ad927b618..747c1108b32 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
@@ -41,7 +41,7 @@ BTreeNodeStore()
{
_store.addType(&_internalNodeType);
_store.addType(&_leafNodeType);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
_store.enableFreeLists();
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index 8c8c73ba87a..bd7331bc996 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -48,7 +48,7 @@ BTreeStore(bool init)
_store.addType(&_small8Type);
_store.addType(&_treeType);
if (init) {
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
_store.enableFreeLists();
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/allocator.hpp b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
index e038f4a39d5..a5ebed3f3fc 100644
--- a/vespalib/src/vespa/vespalib/datastore/allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/allocator.hpp
@@ -21,11 +21,11 @@ typename Allocator<EntryT, RefT>::HandleType
Allocator<EntryT, RefT>::alloc(Args && ... args)
{
_store.ensureBufferCapacity(_typeId, 1);
- uint32_t activeBufferId = _store.getActiveBufferId(_typeId);
- BufferState &state = _store.getBufferState(activeBufferId);
+ uint32_t buffer_id = _store.get_primary_buffer_id(_typeId);
+ BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
size_t oldBufferSize = state.size();
- RefT ref(oldBufferSize, activeBufferId);
+ RefT ref(oldBufferSize, buffer_id);
EntryT *entry = _store.getEntry<EntryT>(ref);
new (static_cast<void *>(entry)) EntryT(std::forward<Args>(args)...);
state.pushed_back(1);
@@ -37,13 +37,13 @@ typename Allocator<EntryT, RefT>::HandleType
Allocator<EntryT, RefT>::allocArray(ConstArrayRef array)
{
_store.ensureBufferCapacity(_typeId, array.size());
- uint32_t activeBufferId = _store.getActiveBufferId(_typeId);
- BufferState &state = _store.getBufferState(activeBufferId);
+ uint32_t buffer_id = _store.get_primary_buffer_id(_typeId);
+ BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
assert(state.getArraySize() == array.size());
size_t oldBufferSize = state.size();
assert((oldBufferSize % array.size()) == 0);
- RefT ref((oldBufferSize / array.size()), activeBufferId);
+ RefT ref((oldBufferSize / array.size()), buffer_id);
EntryT *buf = _store.template getEntryArray<EntryT>(ref, array.size());
for (size_t i = 0; i < array.size(); ++i) {
new (static_cast<void *>(buf + i)) EntryT(array[i]);
@@ -57,13 +57,13 @@ typename Allocator<EntryT, RefT>::HandleType
Allocator<EntryT, RefT>::allocArray(size_t size)
{
_store.ensureBufferCapacity(_typeId, size);
- uint32_t activeBufferId = _store.getActiveBufferId(_typeId);
- BufferState &state = _store.getBufferState(activeBufferId);
+ uint32_t buffer_id = _store.get_primary_buffer_id(_typeId);
+ BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
assert(state.getArraySize() == size);
size_t oldBufferSize = state.size();
assert((oldBufferSize % size) == 0);
- RefT ref((oldBufferSize / size), activeBufferId);
+ RefT ref((oldBufferSize / size), buffer_id);
EntryT *buf = _store.template getEntryArray<EntryT>(ref, size);
for (size_t i = 0; i < size; ++i) {
new (static_cast<void *>(buf + i)) EntryT();
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index c1fa14cac3c..5409c21594c 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -54,7 +54,7 @@ ArrayStore<EntryT, RefT>::ArrayStore(const ArrayStoreConfig &cfg)
_largeArrayType(cfg.specForSize(0))
{
initArrayTypes(cfg);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
if (cfg.enable_free_lists()) {
_store.enableFreeLists();
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.h b/vespalib/src/vespa/vespalib/datastore/datastore.h
index c2cfca520af..fa8e734b005 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.h
@@ -95,11 +95,11 @@ class DataStore : public DataStoreT<RefT>
protected:
typedef DataStoreT<RefT> ParentType;
using ParentType::ensureBufferCapacity;
- using ParentType::_activeBufferIds;
+ using ParentType::_primary_buffer_ids;
using ParentType::_freeListLists;
using ParentType::getEntry;
using ParentType::dropBuffers;
- using ParentType::initActiveBuffers;
+ using ParentType::init_primary_buffers;
using ParentType::addType;
using BufferTypeUP = std::unique_ptr<BufferType<EntryType>>;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
index 42146eab9aa..dd99b025907 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
@@ -149,7 +149,7 @@ DataStore<EntryType, RefT>::DataStore(BufferTypeUP type)
_type(std::move(type))
{
addType(_type.get());
- initActiveBuffers();
+ init_primary_buffers();
}
template <typename EntryType, typename RefT>
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index 4b1349d8801..2ec8cb3bda3 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -2,6 +2,7 @@
#include "datastore.h"
#include <vespa/vespalib/util/array.hpp>
+#include <vespa/vespalib/util/stringfmt.h>
#include <limits>
#include <cassert>
@@ -14,25 +15,25 @@ namespace vespalib::datastore {
namespace {
-/*
- * Minimum dead bytes in active write buffer before switching to new
- * active write buffer even if another active buffer has more dead
- * bytes due to considering the active write buffer as too dead.
+/**
+ * Minimum dead bytes in primary write buffer before switching to new
+ * primary write buffer even if another active buffer has more dead
+ * bytes due to considering the primary write buffer as too dead.
*/
-constexpr size_t TOODEAD_SLACK = 0x4000u;
+constexpr size_t TOO_DEAD_SLACK = 0x4000u;
-/*
- * Check if active write buffer is too dead for further use, i.e. if it
+/**
+ * Check if primary write buffer is too dead for further use, i.e. if it
* is likely to be the worst buffer at next compaction. If so, filling it
* up completely will be wasted work, as data will have to be moved again
* rather soon.
*/
bool
-activeWriteBufferTooDead(const BufferState &state)
+primary_buffer_too_dead(const BufferState &state)
{
size_t deadElems = state.getDeadElems();
size_t deadBytes = deadElems * state.getArraySize();
- return ((deadBytes >= TOODEAD_SLACK) && (deadElems * 2 >= state.size()));
+ return ((deadBytes >= TOO_DEAD_SLACK) && (deadElems * 2 >= state.size()));
}
}
@@ -75,7 +76,7 @@ public:
DataStoreBase::DataStoreBase(uint32_t numBuffers, size_t maxArrays)
: _buffers(numBuffers),
- _activeBufferIds(),
+ _primary_buffer_ids(),
_states(numBuffers),
_typeHandlers(),
_freeListLists(),
@@ -99,65 +100,66 @@ DataStoreBase::~DataStoreBase()
}
void
-DataStoreBase::switchActiveBuffer(uint32_t typeId, size_t elemsNeeded)
+DataStoreBase::switch_primary_buffer(uint32_t typeId, size_t elemsNeeded)
{
- size_t activeBufferId = _activeBufferIds[typeId];
+ size_t buffer_id = _primary_buffer_ids[typeId];
for (size_t i = 0; i < getNumBuffers(); ++i) {
// start using next buffer
- activeBufferId = nextBufferId(activeBufferId);
- if (_states[activeBufferId].isFree()) {
+ buffer_id = nextBufferId(buffer_id);
+ if (_states[buffer_id].isFree()) {
break;
}
}
- if (!_states[activeBufferId].isFree()) {
- LOG_ABORT("did not find free buffer");
+ if (!_states[buffer_id].isFree()) {
+ LOG_ABORT(vespalib::make_string("switch_primary_buffer(%u, %zu): did not find a free buffer",
+ typeId, elemsNeeded).c_str());
}
- onActive(activeBufferId, typeId, elemsNeeded);
- _activeBufferIds[typeId] = activeBufferId;
+ onActive(buffer_id, typeId, elemsNeeded);
+ _primary_buffer_ids[typeId] = buffer_id;
}
void
-DataStoreBase::switchOrGrowActiveBuffer(uint32_t typeId, size_t elemsNeeded)
+DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded)
{
auto typeHandler = _typeHandlers[typeId];
uint32_t arraySize = typeHandler->getArraySize();
size_t numArraysForNewBuffer = typeHandler->getNumArraysForNewBuffer();
size_t numEntriesForNewBuffer = numArraysForNewBuffer * arraySize;
- uint32_t bufferId = _activeBufferIds[typeId];
+ uint32_t bufferId = _primary_buffer_ids[typeId];
if (elemsNeeded + _states[bufferId].size() >= numEntriesForNewBuffer) {
// Don't try to resize existing buffer, new buffer will be large enough
- switchActiveBuffer(typeId, elemsNeeded);
+ switch_primary_buffer(typeId, elemsNeeded);
} else {
fallbackResize(bufferId, elemsNeeded);
}
}
void
-DataStoreBase::initActiveBuffers()
+DataStoreBase::init_primary_buffers()
{
- uint32_t numTypes = _activeBufferIds.size();
+ uint32_t numTypes = _primary_buffer_ids.size();
for (uint32_t typeId = 0; typeId < numTypes; ++typeId) {
- size_t activeBufferId = 0;
+ size_t buffer_id = 0;
for (size_t i = 0; i < getNumBuffers(); ++i) {
- if (_states[activeBufferId].isFree()) {
+ if (_states[buffer_id].isFree()) {
break;
}
// start using next buffer
- activeBufferId = nextBufferId(activeBufferId);
+ buffer_id = nextBufferId(buffer_id);
}
- assert(_states[activeBufferId].isFree());
- onActive(activeBufferId, typeId, 0u);
- _activeBufferIds[typeId] = activeBufferId;
+ assert(_states[buffer_id].isFree());
+ onActive(buffer_id, typeId, 0u);
+ _primary_buffer_ids[typeId] = buffer_id;
}
}
uint32_t
DataStoreBase::addType(BufferTypeBase *typeHandler)
{
- uint32_t typeId = _activeBufferIds.size();
+ uint32_t typeId = _primary_buffer_ids.size();
assert(typeId == _typeHandlers.size());
typeHandler->clampMaxArrays(_maxArrays);
- _activeBufferIds.push_back(0);
+ _primary_buffer_ids.push_back(0);
_typeHandlers.push_back(typeHandler);
_freeListLists.push_back(BufferState::FreeListList());
return typeId;
@@ -383,7 +385,7 @@ DataStoreBase::startCompact(uint32_t typeId)
disableFreeList(bufferId);
}
}
- switchActiveBuffer(typeId, 0u);
+ switch_primary_buffer(typeId, 0u);
inc_compaction_count();
return toHold;
}
@@ -421,19 +423,19 @@ DataStoreBase::fallbackResize(uint32_t bufferId, size_t elemsNeeded)
uint32_t
DataStoreBase::startCompactWorstBuffer(uint32_t typeId)
{
- uint32_t activeBufferId = getActiveBufferId(typeId);
+ uint32_t buffer_id = get_primary_buffer_id(typeId);
const BufferTypeBase *typeHandler = _typeHandlers[typeId];
assert(typeHandler->getActiveBuffers() >= 1u);
if (typeHandler->getActiveBuffers() == 1u) {
// Single active buffer for type, no need for scan
- _states[activeBufferId].setCompacting();
- _states[activeBufferId].disableElemHoldList();
- disableFreeList(activeBufferId);
- switchActiveBuffer(typeId, 0u);
- return activeBufferId;
+ _states[buffer_id].setCompacting();
+ _states[buffer_id].disableElemHoldList();
+ disableFreeList(buffer_id);
+ switch_primary_buffer(typeId, 0u);
+ return buffer_id;
}
// Multiple active buffers for type, must perform full scan
- return startCompactWorstBuffer(activeBufferId,
+ return startCompactWorstBuffer(buffer_id,
[=](const BufferState &state) { return state.isActive(typeId); });
}
@@ -462,9 +464,9 @@ DataStoreBase::markCompacting(uint32_t bufferId)
{
auto &state = getBufferState(bufferId);
uint32_t typeId = state.getTypeId();
- uint32_t activeBufferId = getActiveBufferId(typeId);
- if ((bufferId == activeBufferId) || activeWriteBufferTooDead(getBufferState(activeBufferId))) {
- switchActiveBuffer(typeId, 0u);
+ uint32_t buffer_id = get_primary_buffer_id(typeId);
+ if ((bufferId == buffer_id) || primary_buffer_too_dead(getBufferState(buffer_id))) {
+ switch_primary_buffer(typeId, 0u);
}
state.setCompacting();
state.disableElemHoldList();
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 77eb3857f27..2617973d239 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -55,7 +55,9 @@ private:
};
std::vector<BufferAndTypeId> _buffers; // For fast mapping with known types
protected:
- std::vector<uint32_t> _activeBufferIds; // typeId -> active buffer
+ // Provides a mapping from typeId -> primary buffer for that type.
+ // The primary buffer is used for allocations of new element(s) if no available slots are found in free lists.
+ std::vector<uint32_t> _primary_buffer_ids;
void * getBuffer(uint32_t bufferId) { return _buffers[bufferId].getBuffer(); }
@@ -176,10 +178,10 @@ protected:
}
/**
- * Get the active buffer for the given type id.
+ * Get the primary buffer for the given type id.
*/
- void *activeBuffer(uint32_t typeId) {
- return _buffers[_activeBufferIds[typeId]].getBuffer();
+ void* primary_buffer(uint32_t typeId) {
+ return _buffers[_primary_buffer_ids[typeId]].getBuffer();
}
/**
@@ -196,20 +198,20 @@ protected:
void markCompacting(uint32_t bufferId);
public:
uint32_t addType(BufferTypeBase *typeHandler);
- void initActiveBuffers();
+ void init_primary_buffers();
/**
- * Ensure that active buffer has a given number of elements free at end.
+ * Ensure that the primary buffer for the given type has a given number of elements free at end.
* Switch to new buffer if current buffer is too full.
*
- * @param typeId registered data type for buffer.
- * @param elemsNeeded Number of elements needed to be free
+ * @param typeId Registered data type for buffer.
+ * @param elemsNeeded Number of elements needed to be free.
*/
void ensureBufferCapacity(uint32_t typeId, size_t elemsNeeded) {
if (__builtin_expect(elemsNeeded >
- _states[_activeBufferIds[typeId]].remaining(),
+ _states[_primary_buffer_ids[typeId]].remaining(),
false)) {
- switchOrGrowActiveBuffer(typeId, elemsNeeded);
+ switch_or_grow_primary_buffer(typeId, elemsNeeded);
}
}
@@ -221,24 +223,24 @@ public:
void holdBuffer(uint32_t bufferId);
/**
- * Switch to new active buffer, typically in preparation for compaction
- * or when current active buffer no longer has free space.
+ * Switch to a new primary buffer, typically in preparation for compaction
+ * or when the current primary buffer no longer has free space.
*
- * @param typeId registered data type for buffer.
- * @param elemsNeeded Number of elements needed to be free
+ * @param typeId Registered data type for buffer.
+ * @param elemsNeeded Number of elements needed to be free.
*/
- void switchActiveBuffer(uint32_t typeId, size_t elemsNeeded);
+ void switch_primary_buffer(uint32_t typeId, size_t elemsNeeded);
- void switchOrGrowActiveBuffer(uint32_t typeId, size_t elemsNeeded);
+ void switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded);
vespalib::MemoryUsage getMemoryUsage() const;
vespalib::AddressSpace getAddressSpaceUsage() const;
/**
- * Get active buffer id for the given type id.
+ * Get the primary buffer id for the given type id.
*/
- uint32_t getActiveBufferId(uint32_t typeId) const { return _activeBufferIds[typeId]; }
+ uint32_t get_primary_buffer_id(uint32_t typeId) const { return _primary_buffer_ids[typeId]; }
const BufferState &getBufferState(uint32_t bufferId) const { return _states[bufferId]; }
BufferState &getBufferState(uint32_t bufferId) { return _states[bufferId]; }
uint32_t getNumBuffers() const { return _numBuffers; }
@@ -340,11 +342,11 @@ public:
private:
/**
- * Switch buffer state to active.
+ * Switch buffer state to active for the given buffer.
*
* @param bufferId Id of buffer to be active.
- * @param typeId registered data type for buffer.
- * @param elemsNeeded Number of elements needed to be free
+ * @param typeId Registered data type for buffer.
+ * @param elemsNeeded Number of elements needed to be free.
*/
void onActive(uint32_t bufferId, uint32_t typeId, size_t elemsNeeded);
diff --git a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
index 43a793d69f7..797566f923d 100644
--- a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
@@ -19,13 +19,13 @@ typename RawAllocator<EntryT, RefT>::HandleType
RawAllocator<EntryT, RefT>::alloc(size_t numElems, size_t extraElems)
{
_store.ensureBufferCapacity(_typeId, numElems + extraElems);
- uint32_t activeBufferId = _store.getActiveBufferId(_typeId);
- BufferState &state = _store.getBufferState(activeBufferId);
+ uint32_t buffer_id = _store.get_primary_buffer_id(_typeId);
+ BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
size_t oldBufferSize = state.size();
if (RefT::isAlignedType) {
// AlignedEntryRef constructor scales down offset by alignment
- RefT ref(oldBufferSize, activeBufferId);
+ RefT ref(oldBufferSize, buffer_id);
EntryT *buffer = _store.getEntry<EntryT>(ref);
state.pushed_back(numElems);
return HandleType(ref, buffer);
@@ -33,7 +33,7 @@ RawAllocator<EntryT, RefT>::alloc(size_t numElems, size_t extraElems)
// Must perform scaling ourselves, according to array size
size_t arraySize = state.getArraySize();
assert((numElems % arraySize) == 0u);
- RefT ref((oldBufferSize / arraySize), activeBufferId);
+ RefT ref((oldBufferSize / arraySize), buffer_id);
EntryT *buffer = _store.getEntryArray<EntryT>(ref, arraySize);
state.pushed_back(numElems);
return HandleType(ref, buffer);
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
index 7260a50609f..6eb9315cdf5 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.hpp
@@ -20,7 +20,7 @@ UniqueStoreAllocator<EntryT, RefT>::UniqueStoreAllocator()
{
auto typeId = _store.addType(&_typeHandler);
assert(typeId == 0u);
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
_store.enableFreeLists();
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
index 2fe12238342..b41e3187144 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
@@ -23,7 +23,7 @@ UniqueStoreStringAllocator<RefT>::UniqueStoreStringAllocator()
assert(type_id == exp_type_id);
++exp_type_id;
}
- _store.initActiveBuffers();
+ _store.init_primary_buffers();
_store.enableFreeLists();
}