aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2021-08-13 11:15:10 +0200
committerTor Egge <Tor.Egge@online.no>2021-08-13 11:15:10 +0200
commita6d6f48d21b63f9a3aa8af2f04a23c71692b879a (patch)
treed2cd13716c6883f145276a0ab71c89e91716442f /vespalib
parent1f49eabb261f4db4a47ec47d1971d4927cab867f (diff)
Consider reusing active buffer.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp6
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp14
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp40
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.cpp31
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.h8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.cpp14
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h5
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp56
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h1
9 files changed, 138 insertions, 37 deletions
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 417d8b80d87..562ecaaecfa 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -150,13 +150,13 @@ TEST("require that we test with trivial and non-trivial types")
TEST_F("control static sizes", NumberFixture(3)) {
#ifdef _LIBCPP_VERSION
- EXPECT_EQUAL(400u, sizeof(f.store));
+ EXPECT_EQUAL(424u, sizeof(f.store));
EXPECT_EQUAL(296u, sizeof(NumberFixture::ArrayStoreType::DataStoreType));
#else
- EXPECT_EQUAL(432u, sizeof(f.store));
+ EXPECT_EQUAL(456u, sizeof(f.store));
EXPECT_EQUAL(328u, sizeof(NumberFixture::ArrayStoreType::DataStoreType));
#endif
- EXPECT_EQUAL(72u, sizeof(NumberFixture::ArrayStoreType::SmallArrayType));
+ EXPECT_EQUAL(96u, sizeof(NumberFixture::ArrayStoreType::SmallArrayType));
MemoryUsage usage = f.store.getMemoryUsage();
EXPECT_EQUAL(960u, usage.allocatedBytes());
EXPECT_EQUAL(32u, usage.usedBytes());
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
index 414c35864ac..4cd192f602f 100644
--- a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -49,7 +49,7 @@ struct Fixture {
}
~Fixture() {
for (auto& setup : setups) {
- bufferType.onHold(&setup._usedElems, &setup._deadElems);
+ bufferType.onHold(setup._bufferId, &setup._usedElems, &setup._deadElems);
bufferType.onFree(setup._usedElems);
}
}
@@ -134,9 +134,9 @@ TEST("arrays to alloc considers used elements across all active buffers of same
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4));
+ f.add_setup(Setup().used(8 * 4).bufferId(2));
f.assertArraysToAlloc((6 + 8) * 0.5);
- f.add_setup(Setup().used(10 * 4));
+ f.add_setup(Setup().used(10 * 4).bufferId(3));
f.assertArraysToAlloc((6 + 8 + 10) * 0.5);
}
@@ -144,7 +144,7 @@ TEST("arrays to alloc considers used elements across all active buffers of same
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4).resizing(true));
+ f.add_setup(Setup().used(8 * 4).resizing(true).bufferId(2));
f.assertArraysToAlloc(8 + (6 + 8) * 0.5);
}
@@ -152,9 +152,9 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4));
+ f.add_setup(Setup().used(12 * 4).dead(4 * 4).bufferId(2));
f.assertArraysToAlloc((6 - 2 + 12 - 4) * 0.5);
- f.add_setup(Setup().used(20 * 4).dead(6 * 4));
+ f.add_setup(Setup().used(20 * 4).dead(6 * 4).bufferId(3));
f.assertArraysToAlloc((6 - 2 + 12 - 4 + 20 - 6) * 0.5);
}
@@ -162,7 +162,7 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true));
+ f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true).bufferId(2));
f.assertArraysToAlloc(12 + (6 - 2 + 12 - 4) * 0.5);
}
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 548ab9199da..1c4817ea35f 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -55,6 +55,7 @@ public:
using GrowthStats = std::vector<int>;
+using BufferStats = std::vector<int>;
constexpr float ALLOC_GROW_FACTOR = 0.4;
constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
@@ -123,6 +124,19 @@ public:
++i;
}
}
+ BufferStats getBuffers(size_t bufs) {
+ BufferStats buffers;
+ while (buffers.size() < bufs) {
+ RefType iRef = (_type.getArraySize() == 1) ?
+ (_store.template allocator<DataType>(_typeId).alloc().ref) :
+ (_store.template allocator<DataType>(_typeId).allocArray(_type.getArraySize()).ref);
+ int buffer_id = iRef.bufferId();
+ if (buffers.empty() || buffers.back() != buffer_id) {
+ buffers.push_back(buffer_id);
+ }
+ }
+ return buffers;
+ }
vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
};
@@ -563,7 +577,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
{ 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- assertGrowStats({ 4, 4, 8, 32, 32, 32, 64, 128, 128, 128 },
+ assertGrowStats({ 4, 4, 8, 32, 32, 64, 64, 128, 128, 128 },
{ 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
@@ -580,7 +594,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
// Buffers with sizes larger than the huge page size of the mmap allocator.
ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
- assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
+ assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
{ 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
}
@@ -615,10 +629,10 @@ TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_alloc
* The max bytes to alloc is: maxArrays * arraySize * elementSize.
*/
assertGrowStats<uint8_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint8_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint8_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
assertGrowStats<uint8_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
assertGrowStats<uint32_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint32_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint32_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
assertGrowStats<uint32_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
}
@@ -666,8 +680,24 @@ TEST(DataStoreTest, can_set_memory_allocator)
EXPECT_EQ(AllocStats(3, 3), stats);
}
+namespace {
+
+void
+assertBuffers(BufferStats exp_buffers, size_t num_arrays_for_new_buffer)
+{
+ EXPECT_EQ(exp_buffers, IntGrowStore(1, 1, 1024, num_arrays_for_new_buffer).getBuffers(exp_buffers.size()));
+}
+
+}
+
+TEST(DataStoreTest, can_reuse_active_buffer_as_primary_buffer)
+{
+ assertBuffers({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 0);
+ assertBuffers({ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3}, 16);
+}
+
TEST(DataStoreTest, control_static_sizes) {
- EXPECT_EQ(72, sizeof(BufferTypeBase));
+ EXPECT_EQ(96, sizeof(BufferTypeBase));
EXPECT_EQ(32, sizeof(BufferState::FreeList));
EXPECT_EQ(1, sizeof(BufferState::State));
EXPECT_EQ(144, sizeof(BufferState));
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
index eb5865cd68c..068c58a163c 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
@@ -32,10 +32,10 @@ BufferTypeBase::BufferTypeBase(uint32_t arraySize,
_maxArrays(maxArrays),
_numArraysForNewBuffer(std::min(numArraysForNewBuffer, maxArrays)),
_allocGrowFactor(allocGrowFactor),
- _activeBuffers(0),
_holdBuffers(0),
_holdUsedElems(0),
- _aggr_counts()
+ _aggr_counts(),
+ _active_buffers()
{
}
@@ -48,10 +48,10 @@ BufferTypeBase::BufferTypeBase(uint32_t arraySize,
BufferTypeBase::~BufferTypeBase()
{
- assert(_activeBuffers == 0);
assert(_holdBuffers == 0);
assert(_holdUsedElems == 0);
assert(_aggr_counts.empty());
+ assert(_active_buffers.empty());
}
ElemCount
@@ -63,8 +63,9 @@ BufferTypeBase::getReservedElements(uint32_t bufferId) const
void
BufferTypeBase::onActive(uint32_t bufferId, ElemCount* usedElems, ElemCount* deadElems, void* buffer)
{
- ++_activeBuffers;
_aggr_counts.add_buffer(usedElems, deadElems);
+ assert(std::find(_active_buffers.begin(), _active_buffers.end(), bufferId) == _active_buffers.end());
+ _active_buffers.emplace_back(bufferId);
size_t reservedElems = getReservedElements(bufferId);
if (reservedElems != 0u) {
initializeReservedElements(buffer, reservedElems);
@@ -74,10 +75,12 @@ BufferTypeBase::onActive(uint32_t bufferId, ElemCount* usedElems, ElemCount* dea
}
void
-BufferTypeBase::onHold(const ElemCount* usedElems, const ElemCount* deadElems)
+BufferTypeBase::onHold(uint32_t buffer_id, const ElemCount* usedElems, const ElemCount* deadElems)
{
- --_activeBuffers;
++_holdBuffers;
+ auto itr = std::find(_active_buffers.begin(), _active_buffers.end(), buffer_id);
+ assert(itr != _active_buffers.end());
+ _active_buffers.erase(itr);
_aggr_counts.remove_buffer(usedElems, deadElems);
_holdUsedElems += *usedElems;
}
@@ -90,6 +93,17 @@ BufferTypeBase::onFree(ElemCount usedElems)
_holdUsedElems -= usedElems;
}
+void
+BufferTypeBase::resume_primary_buffer(uint32_t buffer_id, ElemCount* used_elems, ElemCount* dead_elems)
+{
+ auto itr = std::find(_active_buffers.begin(), _active_buffers.end(), buffer_id);
+ assert(itr != _active_buffers.end());
+ _active_buffers.erase(itr);
+ _active_buffers.emplace_back(buffer_id);
+ _aggr_counts.remove_buffer(used_elems, dead_elems);
+ _aggr_counts.add_buffer(used_elems, dead_elems);
+}
+
const alloc::MemoryAllocator*
BufferTypeBase::get_memory_allocator() const
{
@@ -141,10 +155,11 @@ BufferTypeBase::calcArraysToAlloc(uint32_t bufferId, ElemCount elemsNeeded, bool
uint32_t
BufferTypeBase::get_scaled_num_arrays_for_new_buffer() const
{
- if (_activeBuffers <= 1u || _numArraysForNewBuffer == 0u) {
+ uint32_t active_buffers_count = get_active_buffers_count();
+ if (active_buffers_count <= 1u || _numArraysForNewBuffer == 0u) {
return _numArraysForNewBuffer;
}
- double scale_factor = std::pow(1.0 + _allocGrowFactor, _activeBuffers - 1);
+ double scale_factor = std::pow(1.0 + _allocGrowFactor, active_buffers_count - 1);
double scaled_result = _numArraysForNewBuffer * scale_factor;
if (scaled_result >= _maxArrays) {
return _maxArrays;
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.h b/vespalib/src/vespa/vespalib/datastore/buffer_type.h
index 1f024743c8b..62d9282aae0 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.h
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.h
@@ -61,8 +61,9 @@ public:
virtual void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) = 0;
size_t getArraySize() const { return _arraySize; }
virtual void onActive(uint32_t bufferId, ElemCount* usedElems, ElemCount* deadElems, void* buffer);
- void onHold(const ElemCount* usedElems, const ElemCount* deadElems);
+ void onHold(uint32_t buffer_id, const ElemCount* usedElems, const ElemCount* deadElems);
virtual void onFree(ElemCount usedElems);
+ void resume_primary_buffer(uint32_t buffer_id, ElemCount* used_elems, ElemCount* dead_elems);
virtual const alloc::MemoryAllocator* get_memory_allocator() const;
/**
@@ -72,9 +73,11 @@ public:
void clampMaxArrays(uint32_t maxArrays);
- uint32_t getActiveBuffers() const { return _activeBuffers; }
+ uint32_t get_active_buffers_count() const { return _active_buffers.size(); }
+ const std::vector<uint32_t>& get_active_buffers() const noexcept { return _active_buffers; }
size_t getMaxArrays() const { return _maxArrays; }
uint32_t get_scaled_num_arrays_for_new_buffer() const;
+ uint32_t get_num_arrays_for_new_buffer() const noexcept { return _numArraysForNewBuffer; }
protected:
struct BufferCounts {
@@ -120,6 +123,7 @@ protected:
uint32_t _holdBuffers;
size_t _holdUsedElems; // Number of used elements in all held buffers for this type.
AggregatedBufferCounts _aggr_counts;
+ std::vector<uint32_t> _active_buffers;
};
/**
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
index 68606c5ea19..704fabffe25 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
@@ -138,7 +138,7 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
void
-BufferState::onHold()
+BufferState::onHold(uint32_t buffer_id)
{
assert(_state == ACTIVE);
assert(_typeHandler != nullptr);
@@ -148,7 +148,7 @@ BufferState::onHold()
assert(_holdElems <= (_usedElems - _deadElems));
_deadElems = 0;
_holdElems = _usedElems; // Put everyting on hold
- _typeHandler->onHold(&_usedElems, &_deadElems);
+ _typeHandler->onHold(buffer_id, &_usedElems, &_deadElems);
if ( ! isFreeListEmpty()) {
removeFromFreeListList();
FreeList().swap(_freeList);
@@ -191,7 +191,7 @@ BufferState::onFree(void *&buffer)
void
-BufferState::dropBuffer(void *&buffer)
+BufferState::dropBuffer(uint32_t buffer_id, void *&buffer)
{
if (_state == FREE) {
assert(buffer == nullptr);
@@ -199,7 +199,7 @@ BufferState::dropBuffer(void *&buffer)
}
assert(buffer != nullptr || _allocElems == 0);
if (_state == ACTIVE) {
- onHold();
+ onHold(buffer_id);
}
if (_state == HOLD) {
onFree(buffer);
@@ -301,5 +301,11 @@ BufferState::fallbackResize(uint32_t bufferId,
std::atomic_thread_fence(std::memory_order_release);
}
+void
+BufferState::resume_primary_buffer(uint32_t buffer_id)
+{
+ _typeHandler->resume_primary_buffer(buffer_id, &_usedElems, &_deadElems);
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index f8738f17daa..e95bef6f459 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -99,7 +99,7 @@ public:
/**
* Transition from ACTIVE to HOLD state.
*/
- void onHold();
+ void onHold(uint32_t buffer_id);
/**
* Transition from HOLD to FREE state.
@@ -157,7 +157,7 @@ public:
void cleanHold(void *buffer, size_t offset, ElemCount numElems) {
_typeHandler->cleanHold(buffer, offset, numElems, BufferTypeBase::CleanContext(_extraUsedBytes, _extraHoldBytes));
}
- void dropBuffer(void *&buffer);
+ void dropBuffer(uint32_t buffer_id, void *&buffer);
uint32_t getTypeId() const { return _typeId; }
uint32_t getArraySize() const { return _arraySize; }
size_t getDeadElems() const { return _deadElems; }
@@ -192,6 +192,7 @@ public:
FreeList &freeList() { return _freeList; }
const FreeListList *freeListList() const { return _freeListList; }
FreeListList *freeListList() { return _freeListList; }
+ void resume_primary_buffer(uint32_t buffer_id);
};
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index aff26fc7b84..008ab0f0af9 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -120,6 +120,47 @@ DataStoreBase::switch_primary_buffer(uint32_t typeId, size_t elemsNeeded)
_primary_buffer_ids[typeId] = buffer_id;
}
+bool
+DataStoreBase::consider_grow_active_buffer(uint32_t type_id, size_t elems_needed)
+{
+ auto type_handler = _typeHandlers[type_id];
+ uint32_t buffer_id = _primary_buffer_ids[type_id];
+ uint32_t active_buffers_count = type_handler->get_active_buffers_count();
+ constexpr uint32_t min_active_buffers = 4u;
+ if (active_buffers_count < min_active_buffers) {
+ return false;
+ }
+ if (type_handler->get_num_arrays_for_new_buffer() == 0u) {
+ return false;
+ }
+ assert(!_states[buffer_id].getCompacting());
+ uint32_t min_buffer_id = buffer_id;
+ size_t min_used = _states[buffer_id].size();
+ uint32_t checked_active_buffers = 1u;
+ for (auto &alt_buffer_id : type_handler->get_active_buffers()) {
+ if (alt_buffer_id != buffer_id && !_states[alt_buffer_id].getCompacting()) {
+ ++checked_active_buffers;
+ if (_states[alt_buffer_id].size() < min_used) {
+ min_buffer_id = alt_buffer_id;
+ min_used = _states[alt_buffer_id].size();
+ }
+ }
+ }
+ if (checked_active_buffers < min_active_buffers) {
+ return false;
+ }
+ auto array_size = type_handler->getArraySize();
+ if (elems_needed + min_used > type_handler->getMaxArrays() * array_size) {
+ return false;
+ }
+ if (min_buffer_id != buffer_id) {
+ // Resume another active buffer for same type as primary buffer
+ _primary_buffer_ids[type_id] = min_buffer_id;
+ _states[min_buffer_id].resume_primary_buffer(min_buffer_id);
+ }
+ return true;
+}
+
void
DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded)
{
@@ -129,8 +170,11 @@ DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded
size_t numEntriesForNewBuffer = numArraysForNewBuffer * arraySize;
uint32_t bufferId = _primary_buffer_ids[typeId];
if (elemsNeeded + _states[bufferId].size() >= numEntriesForNewBuffer) {
- // Don't try to resize existing buffer, new buffer will be large enough
- switch_primary_buffer(typeId, elemsNeeded);
+ if (consider_grow_active_buffer(typeId, elemsNeeded)) {
+ fallbackResize(_primary_buffer_ids[typeId], elemsNeeded);
+ } else {
+ switch_primary_buffer(typeId, elemsNeeded);
+ }
} else {
fallbackResize(bufferId, elemsNeeded);
}
@@ -214,7 +258,7 @@ DataStoreBase::dropBuffers()
{
uint32_t numBuffers = _buffers.size();
for (uint32_t bufferId = 0; bufferId < numBuffers; ++bufferId) {
- _states[bufferId].dropBuffer(_buffers[bufferId].getBuffer());
+ _states[bufferId].dropBuffer(bufferId, _buffers[bufferId].getBuffer());
}
_genHolder.clearHoldLists();
}
@@ -234,7 +278,7 @@ DataStoreBase::getMemoryUsage() const
void
DataStoreBase::holdBuffer(uint32_t bufferId)
{
- _states[bufferId].onHold();
+ _states[bufferId].onHold(bufferId);
size_t holdBytes = 0u; // getMemStats() still accounts held buffers
GenerationHeldBase::UP hold(new BufferHold(holdBytes, *this, bufferId));
_genHolder.hold(std::move(hold));
@@ -429,8 +473,8 @@ DataStoreBase::startCompactWorstBuffer(uint32_t typeId)
{
uint32_t buffer_id = get_primary_buffer_id(typeId);
const BufferTypeBase *typeHandler = _typeHandlers[typeId];
- assert(typeHandler->getActiveBuffers() >= 1u);
- if (typeHandler->getActiveBuffers() == 1u) {
+ assert(typeHandler->get_active_buffers_count() >= 1u);
+ if (typeHandler->get_active_buffers_count() == 1u) {
// Single active buffer for type, no need for scan
_states[buffer_id].setCompacting();
_states[buffer_id].disableElemHoldList();
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 5a50dba6a3c..97ead502408 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -232,6 +232,7 @@ public:
*/
void switch_primary_buffer(uint32_t typeId, size_t elemsNeeded);
+ bool consider_grow_active_buffer(uint32_t type_id, size_t elems_needed);
void switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded);
vespalib::MemoryUsage getMemoryUsage() const;