aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2021-08-15 17:15:24 +0200
committerGitHub <noreply@github.com>2021-08-15 17:15:24 +0200
commite536003faa53fd4df212010357d2327946122c14 (patch)
tree1d86d8513b81b8406c13cb6bbce32183fc79b938
parenta97c478777f7c254d772df406b2afa0b6735261c (diff)
parent64070ae21de63b73f936dca24719c305e52265f6 (diff)
Merge pull request #18742 from vespa-engine/revert-18735-toregge/consider-reusing-active-buffer
Revert "Consider reusing active buffer."
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp6
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp14
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp40
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.cpp31
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.h8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.cpp14
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h5
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp56
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h1
9 files changed, 37 insertions, 138 deletions
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 562ecaaecfa..417d8b80d87 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -150,13 +150,13 @@ TEST("require that we test with trivial and non-trivial types")
TEST_F("control static sizes", NumberFixture(3)) {
#ifdef _LIBCPP_VERSION
- EXPECT_EQUAL(424u, sizeof(f.store));
+ EXPECT_EQUAL(400u, sizeof(f.store));
EXPECT_EQUAL(296u, sizeof(NumberFixture::ArrayStoreType::DataStoreType));
#else
- EXPECT_EQUAL(456u, sizeof(f.store));
+ EXPECT_EQUAL(432u, sizeof(f.store));
EXPECT_EQUAL(328u, sizeof(NumberFixture::ArrayStoreType::DataStoreType));
#endif
- EXPECT_EQUAL(96u, sizeof(NumberFixture::ArrayStoreType::SmallArrayType));
+ EXPECT_EQUAL(72u, sizeof(NumberFixture::ArrayStoreType::SmallArrayType));
MemoryUsage usage = f.store.getMemoryUsage();
EXPECT_EQUAL(960u, usage.allocatedBytes());
EXPECT_EQUAL(32u, usage.usedBytes());
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
index 4cd192f602f..414c35864ac 100644
--- a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -49,7 +49,7 @@ struct Fixture {
}
~Fixture() {
for (auto& setup : setups) {
- bufferType.onHold(setup._bufferId, &setup._usedElems, &setup._deadElems);
+ bufferType.onHold(&setup._usedElems, &setup._deadElems);
bufferType.onFree(setup._usedElems);
}
}
@@ -134,9 +134,9 @@ TEST("arrays to alloc considers used elements across all active buffers of same
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4).bufferId(2));
+ f.add_setup(Setup().used(8 * 4));
f.assertArraysToAlloc((6 + 8) * 0.5);
- f.add_setup(Setup().used(10 * 4).bufferId(3));
+ f.add_setup(Setup().used(10 * 4));
f.assertArraysToAlloc((6 + 8 + 10) * 0.5);
}
@@ -144,7 +144,7 @@ TEST("arrays to alloc considers used elements across all active buffers of same
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4).resizing(true).bufferId(2));
+ f.add_setup(Setup().used(8 * 4).resizing(true));
f.assertArraysToAlloc(8 + (6 + 8) * 0.5);
}
@@ -152,9 +152,9 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4).bufferId(2));
+ f.add_setup(Setup().used(12 * 4).dead(4 * 4));
f.assertArraysToAlloc((6 - 2 + 12 - 4) * 0.5);
- f.add_setup(Setup().used(20 * 4).dead(6 * 4).bufferId(3));
+ f.add_setup(Setup().used(20 * 4).dead(6 * 4));
f.assertArraysToAlloc((6 - 2 + 12 - 4 + 20 - 6) * 0.5);
}
@@ -162,7 +162,7 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true).bufferId(2));
+ f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true));
f.assertArraysToAlloc(12 + (6 - 2 + 12 - 4) * 0.5);
}
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 1c4817ea35f..548ab9199da 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -55,7 +55,6 @@ public:
using GrowthStats = std::vector<int>;
-using BufferStats = std::vector<int>;
constexpr float ALLOC_GROW_FACTOR = 0.4;
constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
@@ -124,19 +123,6 @@ public:
++i;
}
}
- BufferStats getBuffers(size_t bufs) {
- BufferStats buffers;
- while (buffers.size() < bufs) {
- RefType iRef = (_type.getArraySize() == 1) ?
- (_store.template allocator<DataType>(_typeId).alloc().ref) :
- (_store.template allocator<DataType>(_typeId).allocArray(_type.getArraySize()).ref);
- int buffer_id = iRef.bufferId();
- if (buffers.empty() || buffers.back() != buffer_id) {
- buffers.push_back(buffer_id);
- }
- }
- return buffers;
- }
vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
};
@@ -577,7 +563,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
{ 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- assertGrowStats({ 4, 4, 8, 32, 32, 64, 64, 128, 128, 128 },
+ assertGrowStats({ 4, 4, 8, 32, 32, 32, 64, 128, 128, 128 },
{ 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
@@ -594,7 +580,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
// Buffers with sizes larger than the huge page size of the mmap allocator.
ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
- assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
+ assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
{ 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
}
@@ -629,10 +615,10 @@ TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_alloc
* The max bytes to alloc is: maxArrays * arraySize * elementSize.
*/
assertGrowStats<uint8_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint8_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint8_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
assertGrowStats<uint8_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
assertGrowStats<uint32_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint32_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint32_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
assertGrowStats<uint32_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
}
@@ -680,24 +666,8 @@ TEST(DataStoreTest, can_set_memory_allocator)
EXPECT_EQ(AllocStats(3, 3), stats);
}
-namespace {
-
-void
-assertBuffers(BufferStats exp_buffers, size_t num_arrays_for_new_buffer)
-{
- EXPECT_EQ(exp_buffers, IntGrowStore(1, 1, 1024, num_arrays_for_new_buffer).getBuffers(exp_buffers.size()));
-}
-
-}
-
-TEST(DataStoreTest, can_reuse_active_buffer_as_primary_buffer)
-{
- assertBuffers({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 0);
- assertBuffers({ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3}, 16);
-}
-
TEST(DataStoreTest, control_static_sizes) {
- EXPECT_EQ(96, sizeof(BufferTypeBase));
+ EXPECT_EQ(72, sizeof(BufferTypeBase));
EXPECT_EQ(32, sizeof(BufferState::FreeList));
EXPECT_EQ(1, sizeof(BufferState::State));
EXPECT_EQ(144, sizeof(BufferState));
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
index 068c58a163c..eb5865cd68c 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
@@ -32,10 +32,10 @@ BufferTypeBase::BufferTypeBase(uint32_t arraySize,
_maxArrays(maxArrays),
_numArraysForNewBuffer(std::min(numArraysForNewBuffer, maxArrays)),
_allocGrowFactor(allocGrowFactor),
+ _activeBuffers(0),
_holdBuffers(0),
_holdUsedElems(0),
- _aggr_counts(),
- _active_buffers()
+ _aggr_counts()
{
}
@@ -48,10 +48,10 @@ BufferTypeBase::BufferTypeBase(uint32_t arraySize,
BufferTypeBase::~BufferTypeBase()
{
+ assert(_activeBuffers == 0);
assert(_holdBuffers == 0);
assert(_holdUsedElems == 0);
assert(_aggr_counts.empty());
- assert(_active_buffers.empty());
}
ElemCount
@@ -63,9 +63,8 @@ BufferTypeBase::getReservedElements(uint32_t bufferId) const
void
BufferTypeBase::onActive(uint32_t bufferId, ElemCount* usedElems, ElemCount* deadElems, void* buffer)
{
+ ++_activeBuffers;
_aggr_counts.add_buffer(usedElems, deadElems);
- assert(std::find(_active_buffers.begin(), _active_buffers.end(), bufferId) == _active_buffers.end());
- _active_buffers.emplace_back(bufferId);
size_t reservedElems = getReservedElements(bufferId);
if (reservedElems != 0u) {
initializeReservedElements(buffer, reservedElems);
@@ -75,12 +74,10 @@ BufferTypeBase::onActive(uint32_t bufferId, ElemCount* usedElems, ElemCount* dea
}
void
-BufferTypeBase::onHold(uint32_t buffer_id, const ElemCount* usedElems, const ElemCount* deadElems)
+BufferTypeBase::onHold(const ElemCount* usedElems, const ElemCount* deadElems)
{
+ --_activeBuffers;
++_holdBuffers;
- auto itr = std::find(_active_buffers.begin(), _active_buffers.end(), buffer_id);
- assert(itr != _active_buffers.end());
- _active_buffers.erase(itr);
_aggr_counts.remove_buffer(usedElems, deadElems);
_holdUsedElems += *usedElems;
}
@@ -93,17 +90,6 @@ BufferTypeBase::onFree(ElemCount usedElems)
_holdUsedElems -= usedElems;
}
-void
-BufferTypeBase::resume_primary_buffer(uint32_t buffer_id, ElemCount* used_elems, ElemCount* dead_elems)
-{
- auto itr = std::find(_active_buffers.begin(), _active_buffers.end(), buffer_id);
- assert(itr != _active_buffers.end());
- _active_buffers.erase(itr);
- _active_buffers.emplace_back(buffer_id);
- _aggr_counts.remove_buffer(used_elems, dead_elems);
- _aggr_counts.add_buffer(used_elems, dead_elems);
-}
-
const alloc::MemoryAllocator*
BufferTypeBase::get_memory_allocator() const
{
@@ -155,11 +141,10 @@ BufferTypeBase::calcArraysToAlloc(uint32_t bufferId, ElemCount elemsNeeded, bool
uint32_t
BufferTypeBase::get_scaled_num_arrays_for_new_buffer() const
{
- uint32_t active_buffers_count = get_active_buffers_count();
- if (active_buffers_count <= 1u || _numArraysForNewBuffer == 0u) {
+ if (_activeBuffers <= 1u || _numArraysForNewBuffer == 0u) {
return _numArraysForNewBuffer;
}
- double scale_factor = std::pow(1.0 + _allocGrowFactor, active_buffers_count - 1);
+ double scale_factor = std::pow(1.0 + _allocGrowFactor, _activeBuffers - 1);
double scaled_result = _numArraysForNewBuffer * scale_factor;
if (scaled_result >= _maxArrays) {
return _maxArrays;
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.h b/vespalib/src/vespa/vespalib/datastore/buffer_type.h
index 62d9282aae0..1f024743c8b 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.h
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.h
@@ -61,9 +61,8 @@ public:
virtual void cleanHold(void *buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) = 0;
size_t getArraySize() const { return _arraySize; }
virtual void onActive(uint32_t bufferId, ElemCount* usedElems, ElemCount* deadElems, void* buffer);
- void onHold(uint32_t buffer_id, const ElemCount* usedElems, const ElemCount* deadElems);
+ void onHold(const ElemCount* usedElems, const ElemCount* deadElems);
virtual void onFree(ElemCount usedElems);
- void resume_primary_buffer(uint32_t buffer_id, ElemCount* used_elems, ElemCount* dead_elems);
virtual const alloc::MemoryAllocator* get_memory_allocator() const;
/**
@@ -73,11 +72,9 @@ public:
void clampMaxArrays(uint32_t maxArrays);
- uint32_t get_active_buffers_count() const { return _active_buffers.size(); }
- const std::vector<uint32_t>& get_active_buffers() const noexcept { return _active_buffers; }
+ uint32_t getActiveBuffers() const { return _activeBuffers; }
size_t getMaxArrays() const { return _maxArrays; }
uint32_t get_scaled_num_arrays_for_new_buffer() const;
- uint32_t get_num_arrays_for_new_buffer() const noexcept { return _numArraysForNewBuffer; }
protected:
struct BufferCounts {
@@ -123,7 +120,6 @@ protected:
uint32_t _holdBuffers;
size_t _holdUsedElems; // Number of used elements in all held buffers for this type.
AggregatedBufferCounts _aggr_counts;
- std::vector<uint32_t> _active_buffers;
};
/**
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
index 704fabffe25..68606c5ea19 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.cpp
@@ -138,7 +138,7 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId,
void
-BufferState::onHold(uint32_t buffer_id)
+BufferState::onHold()
{
assert(_state == ACTIVE);
assert(_typeHandler != nullptr);
@@ -148,7 +148,7 @@ BufferState::onHold(uint32_t buffer_id)
assert(_holdElems <= (_usedElems - _deadElems));
_deadElems = 0;
_holdElems = _usedElems; // Put everyting on hold
- _typeHandler->onHold(buffer_id, &_usedElems, &_deadElems);
+ _typeHandler->onHold(&_usedElems, &_deadElems);
if ( ! isFreeListEmpty()) {
removeFromFreeListList();
FreeList().swap(_freeList);
@@ -191,7 +191,7 @@ BufferState::onFree(void *&buffer)
void
-BufferState::dropBuffer(uint32_t buffer_id, void *&buffer)
+BufferState::dropBuffer(void *&buffer)
{
if (_state == FREE) {
assert(buffer == nullptr);
@@ -199,7 +199,7 @@ BufferState::dropBuffer(uint32_t buffer_id, void *&buffer)
}
assert(buffer != nullptr || _allocElems == 0);
if (_state == ACTIVE) {
- onHold(buffer_id);
+ onHold();
}
if (_state == HOLD) {
onFree(buffer);
@@ -301,11 +301,5 @@ BufferState::fallbackResize(uint32_t bufferId,
std::atomic_thread_fence(std::memory_order_release);
}
-void
-BufferState::resume_primary_buffer(uint32_t buffer_id)
-{
- _typeHandler->resume_primary_buffer(buffer_id, &_usedElems, &_deadElems);
-}
-
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index e95bef6f459..f8738f17daa 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -99,7 +99,7 @@ public:
/**
* Transition from ACTIVE to HOLD state.
*/
- void onHold(uint32_t buffer_id);
+ void onHold();
/**
* Transition from HOLD to FREE state.
@@ -157,7 +157,7 @@ public:
void cleanHold(void *buffer, size_t offset, ElemCount numElems) {
_typeHandler->cleanHold(buffer, offset, numElems, BufferTypeBase::CleanContext(_extraUsedBytes, _extraHoldBytes));
}
- void dropBuffer(uint32_t buffer_id, void *&buffer);
+ void dropBuffer(void *&buffer);
uint32_t getTypeId() const { return _typeId; }
uint32_t getArraySize() const { return _arraySize; }
size_t getDeadElems() const { return _deadElems; }
@@ -192,7 +192,6 @@ public:
FreeList &freeList() { return _freeList; }
const FreeListList *freeListList() const { return _freeListList; }
FreeListList *freeListList() { return _freeListList; }
- void resume_primary_buffer(uint32_t buffer_id);
};
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index 008ab0f0af9..aff26fc7b84 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -120,47 +120,6 @@ DataStoreBase::switch_primary_buffer(uint32_t typeId, size_t elemsNeeded)
_primary_buffer_ids[typeId] = buffer_id;
}
-bool
-DataStoreBase::consider_grow_active_buffer(uint32_t type_id, size_t elems_needed)
-{
- auto type_handler = _typeHandlers[type_id];
- uint32_t buffer_id = _primary_buffer_ids[type_id];
- uint32_t active_buffers_count = type_handler->get_active_buffers_count();
- constexpr uint32_t min_active_buffers = 4u;
- if (active_buffers_count < min_active_buffers) {
- return false;
- }
- if (type_handler->get_num_arrays_for_new_buffer() == 0u) {
- return false;
- }
- assert(!_states[buffer_id].getCompacting());
- uint32_t min_buffer_id = buffer_id;
- size_t min_used = _states[buffer_id].size();
- uint32_t checked_active_buffers = 1u;
- for (auto &alt_buffer_id : type_handler->get_active_buffers()) {
- if (alt_buffer_id != buffer_id && !_states[alt_buffer_id].getCompacting()) {
- ++checked_active_buffers;
- if (_states[alt_buffer_id].size() < min_used) {
- min_buffer_id = alt_buffer_id;
- min_used = _states[alt_buffer_id].size();
- }
- }
- }
- if (checked_active_buffers < min_active_buffers) {
- return false;
- }
- auto array_size = type_handler->getArraySize();
- if (elems_needed + min_used > type_handler->getMaxArrays() * array_size) {
- return false;
- }
- if (min_buffer_id != buffer_id) {
- // Resume another active buffer for same type as primary buffer
- _primary_buffer_ids[type_id] = min_buffer_id;
- _states[min_buffer_id].resume_primary_buffer(min_buffer_id);
- }
- return true;
-}
-
void
DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded)
{
@@ -170,11 +129,8 @@ DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded
size_t numEntriesForNewBuffer = numArraysForNewBuffer * arraySize;
uint32_t bufferId = _primary_buffer_ids[typeId];
if (elemsNeeded + _states[bufferId].size() >= numEntriesForNewBuffer) {
- if (consider_grow_active_buffer(typeId, elemsNeeded)) {
- fallbackResize(_primary_buffer_ids[typeId], elemsNeeded);
- } else {
- switch_primary_buffer(typeId, elemsNeeded);
- }
+ // Don't try to resize existing buffer, new buffer will be large enough
+ switch_primary_buffer(typeId, elemsNeeded);
} else {
fallbackResize(bufferId, elemsNeeded);
}
@@ -258,7 +214,7 @@ DataStoreBase::dropBuffers()
{
uint32_t numBuffers = _buffers.size();
for (uint32_t bufferId = 0; bufferId < numBuffers; ++bufferId) {
- _states[bufferId].dropBuffer(bufferId, _buffers[bufferId].getBuffer());
+ _states[bufferId].dropBuffer(_buffers[bufferId].getBuffer());
}
_genHolder.clearHoldLists();
}
@@ -278,7 +234,7 @@ DataStoreBase::getMemoryUsage() const
void
DataStoreBase::holdBuffer(uint32_t bufferId)
{
- _states[bufferId].onHold(bufferId);
+ _states[bufferId].onHold();
size_t holdBytes = 0u; // getMemStats() still accounts held buffers
GenerationHeldBase::UP hold(new BufferHold(holdBytes, *this, bufferId));
_genHolder.hold(std::move(hold));
@@ -473,8 +429,8 @@ DataStoreBase::startCompactWorstBuffer(uint32_t typeId)
{
uint32_t buffer_id = get_primary_buffer_id(typeId);
const BufferTypeBase *typeHandler = _typeHandlers[typeId];
- assert(typeHandler->get_active_buffers_count() >= 1u);
- if (typeHandler->get_active_buffers_count() == 1u) {
+ assert(typeHandler->getActiveBuffers() >= 1u);
+ if (typeHandler->getActiveBuffers() == 1u) {
// Single active buffer for type, no need for scan
_states[buffer_id].setCompacting();
_states[buffer_id].disableElemHoldList();
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 97ead502408..5a50dba6a3c 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -232,7 +232,6 @@ public:
*/
void switch_primary_buffer(uint32_t typeId, size_t elemsNeeded);
- bool consider_grow_active_buffer(uint32_t type_id, size_t elems_needed);
void switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded);
vespalib::MemoryUsage getMemoryUsage() const;