summaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2021-08-12 10:58:06 +0200
committerTor Egge <Tor.Egge@online.no>2021-08-12 10:58:06 +0200
commit2c4b8b74608f1c7bdf7599f145f4a46233e70f21 (patch)
tree20c65417831b677b78f468d8cae57692b92e8509 /vespalib
parentd9751d7fe802841eea571956c946e4febf80d0c1 (diff)
Use stats from all active buffers of same type when resizing a buffer.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp12
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp20
-rw-r--r--vespalib/src/vespa/vespalib/datastore/buffer_type.cpp10
3 files changed, 21 insertions, 21 deletions
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
index d647a659eb6..414c35864ac 100644
--- a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -130,7 +130,7 @@ TEST("require that arrays to alloc is capped to min arrays")
TEST_DO(assertArraysToAlloc(17, Setup().used(34 * 4).needed(4).minArrays(16)));
}
-TEST("arrays to alloc considers used elements across all active buffers (no resizing)")
+TEST("arrays to alloc considers used elements across all active buffers of same type (no resizing)")
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
@@ -140,15 +140,15 @@ TEST("arrays to alloc considers used elements across all active buffers (no resi
f.assertArraysToAlloc((6 + 8 + 10) * 0.5);
}
-TEST("arrays to alloc only considers used elements in current buffer when resizing")
+TEST("arrays to alloc considers used elements across all active buffers of same type when resizing")
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
f.add_setup(Setup().used(8 * 4).resizing(true));
- f.assertArraysToAlloc(8 + 8 * 0.5);
+ f.assertArraysToAlloc(8 + (6 + 8) * 0.5);
}
-TEST("arrays to alloc considers (and subtracts) dead elements across all active buffers (no resizing)")
+TEST("arrays to alloc considers (and subtracts) dead elements across all active buffers of same type (no resizing)")
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
@@ -158,12 +158,12 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
f.assertArraysToAlloc((6 - 2 + 12 - 4 + 20 - 6) * 0.5);
}
-TEST("arrays to alloc only considers (and subtracts) dead elements in current buffer when resizing")
+TEST("arrays to alloc considers (and subtracts) dead elements across all active buffers of same type when resizing")
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true));
- f.assertArraysToAlloc(12 + (12 - 4) * 0.5);
+ f.assertArraysToAlloc(12 + (6 - 2 + 12 - 4) * 0.5);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index c4c61250ebe..548ab9199da 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -563,16 +563,16 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
{ 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- assertGrowStats({ 4, 4, 8, 16, 16, 32, 32, 64, 128, 128 },
+ assertGrowStats({ 4, 4, 8, 32, 32, 32, 64, 128, 128, 128 },
{ 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
{ 16 }, 68, 16, 0);
// Resize if buffer size is less than 16, min size 0
- assertGrowStats({ 16, 32, 32, 64, 64, 128, 128, 128, 128 },
+ assertGrowStats({ 16, 32, 32, 128, 128, 128, 128, 128, 128 },
{ 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
// Resize if buffer size is less than 16, min size 4
- assertGrowStats({ 16, 32, 32, 64, 64, 128, 128, 128, 128 },
+ assertGrowStats({ 16, 32, 32, 128, 128, 128, 128, 128, 128 },
{ 4, 8, 16 }, 20, 4, 16);
// Always switch to new buffer, min size 0
assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
@@ -580,7 +580,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
// Buffers with sizes larger than the huge page size of the mmap allocator.
ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
- assertGrowStats({ 262144, 524288, 524288, 524288 * 2, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5, 524288 * 5 },
+ assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
{ 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
}
@@ -614,12 +614,12 @@ TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_alloc
* 4) Cap bytes to alloc to the max offset EntryRef can handle.
* The max bytes to alloc is: maxArrays * arraySize * elementSize.
*/
- assertGrowStats<uint8_t>({8192,16384,16384,32768,32768,65536,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint8_t>({16384,16384,32768,32768,65536,65536,131072,163840,163840,163840,163840,163840}, 5);
- assertGrowStats<uint8_t>({16384,32768,32768,65536,65536,131072,229376,229376,229376,229376,229376,229376}, 7);
- assertGrowStats<uint32_t>({8192,16384,16384,32768,32768,65536,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint32_t>({16384,16384,32768,32768,65536,65536,131072,163840,163840,163840,163840,163840}, 5);
- assertGrowStats<uint32_t>({16384,32768,32768,65536,65536,131072,229376,229376,229376,229376,229376,229376}, 7);
+ assertGrowStats<uint8_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
+ assertGrowStats<uint8_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint8_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
+ assertGrowStats<uint32_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
+ assertGrowStats<uint32_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint32_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
}
namespace {
diff --git a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
index b04547226c0..eb5865cd68c 100644
--- a/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/buffer_type.cpp
@@ -108,22 +108,22 @@ size_t
BufferTypeBase::calcArraysToAlloc(uint32_t bufferId, ElemCount elemsNeeded, bool resizing) const
{
size_t reservedElems = getReservedElements(bufferId);
+ BufferCounts last_bc;
BufferCounts bc;
if (resizing) {
if (!_aggr_counts.empty()) {
- bc = _aggr_counts.last_buffer();
+ last_bc = _aggr_counts.last_buffer();
}
- } else {
- bc = _aggr_counts.all_buffers();
}
+ bc = _aggr_counts.all_buffers();
assert((bc.used_elems % _arraySize) == 0);
assert((bc.dead_elems % _arraySize) == 0);
assert(bc.used_elems >= bc.dead_elems);
- size_t neededArrays = (elemsNeeded + (resizing ? bc.used_elems : reservedElems) + _arraySize - 1) / _arraySize;
+ size_t neededArrays = (elemsNeeded + (resizing ? last_bc.used_elems : reservedElems) + _arraySize - 1) / _arraySize;
size_t liveArrays = (bc.used_elems - bc.dead_elems) / _arraySize;
size_t growArrays = (liveArrays * _allocGrowFactor);
- size_t usedArrays = bc.used_elems / _arraySize;
+ size_t usedArrays = last_bc.used_elems / _arraySize;
size_t wantedArrays = std::max((resizing ? usedArrays : 0u) + growArrays,
static_cast<size_t>(_minArrays));