aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib/src/tests/datastore
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2021-08-16 10:17:41 +0200
committerTor Egge <Tor.Egge@online.no>2021-08-16 10:17:41 +0200
commit18f047e27d5df8a8fc3cfb4b759d4495e2859860 (patch)
tree6140f6e503be843409e37723e020cde815635021 /vespalib/src/tests/datastore
parente536003faa53fd4df212010357d2327946122c14 (diff)
Consider reusing active buffer.
Diffstat (limited to 'vespalib/src/tests/datastore')
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp6
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp14
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp40
3 files changed, 45 insertions, 15 deletions
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 417d8b80d87..562ecaaecfa 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -150,13 +150,13 @@ TEST("require that we test with trivial and non-trivial types")
TEST_F("control static sizes", NumberFixture(3)) {
#ifdef _LIBCPP_VERSION
- EXPECT_EQUAL(400u, sizeof(f.store));
+ EXPECT_EQUAL(424u, sizeof(f.store));
EXPECT_EQUAL(296u, sizeof(NumberFixture::ArrayStoreType::DataStoreType));
#else
- EXPECT_EQUAL(432u, sizeof(f.store));
+ EXPECT_EQUAL(456u, sizeof(f.store));
EXPECT_EQUAL(328u, sizeof(NumberFixture::ArrayStoreType::DataStoreType));
#endif
- EXPECT_EQUAL(72u, sizeof(NumberFixture::ArrayStoreType::SmallArrayType));
+ EXPECT_EQUAL(96u, sizeof(NumberFixture::ArrayStoreType::SmallArrayType));
MemoryUsage usage = f.store.getMemoryUsage();
EXPECT_EQUAL(960u, usage.allocatedBytes());
EXPECT_EQUAL(32u, usage.usedBytes());
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
index 414c35864ac..4cd192f602f 100644
--- a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -49,7 +49,7 @@ struct Fixture {
}
~Fixture() {
for (auto& setup : setups) {
- bufferType.onHold(&setup._usedElems, &setup._deadElems);
+ bufferType.onHold(setup._bufferId, &setup._usedElems, &setup._deadElems);
bufferType.onFree(setup._usedElems);
}
}
@@ -134,9 +134,9 @@ TEST("arrays to alloc considers used elements across all active buffers of same
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4));
+ f.add_setup(Setup().used(8 * 4).bufferId(2));
f.assertArraysToAlloc((6 + 8) * 0.5);
- f.add_setup(Setup().used(10 * 4));
+ f.add_setup(Setup().used(10 * 4).bufferId(3));
f.assertArraysToAlloc((6 + 8 + 10) * 0.5);
}
@@ -144,7 +144,7 @@ TEST("arrays to alloc considers used elements across all active buffers of same
{
Fixture f(Setup().used(6 * 4));
f.assertArraysToAlloc(6 * 0.5);
- f.add_setup(Setup().used(8 * 4).resizing(true));
+ f.add_setup(Setup().used(8 * 4).resizing(true).bufferId(2));
f.assertArraysToAlloc(8 + (6 + 8) * 0.5);
}
@@ -152,9 +152,9 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4));
+ f.add_setup(Setup().used(12 * 4).dead(4 * 4).bufferId(2));
f.assertArraysToAlloc((6 - 2 + 12 - 4) * 0.5);
- f.add_setup(Setup().used(20 * 4).dead(6 * 4));
+ f.add_setup(Setup().used(20 * 4).dead(6 * 4).bufferId(3));
f.assertArraysToAlloc((6 - 2 + 12 - 4 + 20 - 6) * 0.5);
}
@@ -162,7 +162,7 @@ TEST("arrays to alloc considers (and subtracts) dead elements across all active
{
Fixture f(Setup().used(6 * 4).dead(2 * 4));
f.assertArraysToAlloc((6 - 2) * 0.5);
- f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true));
+ f.add_setup(Setup().used(12 * 4).dead(4 * 4).resizing(true).bufferId(2));
f.assertArraysToAlloc(12 + (6 - 2 + 12 - 4) * 0.5);
}
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 548ab9199da..1c4817ea35f 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -55,6 +55,7 @@ public:
using GrowthStats = std::vector<int>;
+using BufferStats = std::vector<int>;
constexpr float ALLOC_GROW_FACTOR = 0.4;
constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
@@ -123,6 +124,19 @@ public:
++i;
}
}
+ BufferStats getBuffers(size_t bufs) {
+ BufferStats buffers;
+ while (buffers.size() < bufs) {
+ RefType iRef = (_type.getArraySize() == 1) ?
+ (_store.template allocator<DataType>(_typeId).alloc().ref) :
+ (_store.template allocator<DataType>(_typeId).allocArray(_type.getArraySize()).ref);
+ int buffer_id = iRef.bufferId();
+ if (buffers.empty() || buffers.back() != buffer_id) {
+ buffers.push_back(buffer_id);
+ }
+ }
+ return buffers;
+ }
vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
};
@@ -563,7 +577,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
{ 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- assertGrowStats({ 4, 4, 8, 32, 32, 32, 64, 128, 128, 128 },
+ assertGrowStats({ 4, 4, 8, 32, 32, 64, 64, 128, 128, 128 },
{ 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
@@ -580,7 +594,7 @@ TEST(DataStoreTest, require_that_buffer_growth_works)
// Buffers with sizes larger than the huge page size of the mmap allocator.
ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
- assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
+ assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
{ 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
}
@@ -615,10 +629,10 @@ TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_alloc
* The max bytes to alloc is: maxArrays * arraySize * elementSize.
*/
assertGrowStats<uint8_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint8_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint8_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
assertGrowStats<uint8_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
assertGrowStats<uint32_t>({8192,16384,16384,65536,65536,98304,98304,98304,98304,98304,98304,98304}, 3);
- assertGrowStats<uint32_t>({16384,16384,65536,65536,65536,131072,163840,163840,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint32_t>({16384,16384,65536,65536,131072,131072,163840,163840,163840,163840,163840,163840}, 5);
assertGrowStats<uint32_t>({16384,32768,32768,131072,131072,229376,229376,229376,229376,229376,229376,229376}, 7);
}
@@ -666,8 +680,24 @@ TEST(DataStoreTest, can_set_memory_allocator)
EXPECT_EQ(AllocStats(3, 3), stats);
}
+namespace {
+
+void
+assertBuffers(BufferStats exp_buffers, size_t num_arrays_for_new_buffer)
+{
+ EXPECT_EQ(exp_buffers, IntGrowStore(1, 1, 1024, num_arrays_for_new_buffer).getBuffers(exp_buffers.size()));
+}
+
+}
+
+TEST(DataStoreTest, can_reuse_active_buffer_as_primary_buffer)
+{
+ assertBuffers({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 0);
+ assertBuffers({ 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3}, 16);
+}
+
TEST(DataStoreTest, control_static_sizes) {
- EXPECT_EQ(72, sizeof(BufferTypeBase));
+ EXPECT_EQ(96, sizeof(BufferTypeBase));
EXPECT_EQ(32, sizeof(BufferState::FreeList));
EXPECT_EQ(1, sizeof(BufferState::State));
EXPECT_EQ(144, sizeof(BufferState));